gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from django.shortcuts import get_object_or_404
from django.db.models import Q
from rest_framework import viewsets, filters
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAdminUser, IsAuthenticated
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.response import Response
from .serializers import UserSerializer, PlaceSerializer
from .serializers import RatingSerializer, CuisineSerializer, LocationTypeSerializer
from main.utils import IsStaffOrReadOnly
from main.models import User,Place,Rating,Cuisine,LocationType,RecommandationHistory
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
import math
import json
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class PlaceViewSet(viewsets.ModelViewSet):
serializer_class = PlaceSerializer
authentication_classes = [TokenAuthentication]
permission_classes = [IsStaffOrReadOnly]
filter_backends = [filters.DjangoFilterBackend]
@method_decorator(csrf_exempt)
def get_queryset(self):
queryset = Place.objects.all()
if self.request.query_params.get('search'):
search = self.request.query_params['search']
queryset = queryset.filter(
Q(name__icontains=search) |
Q(address__icontains=search)
)
return queryset
class MeViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication]
serializer_class = UserSerializer
def get_queryset(self):
return User.objects.filter(pk=self.request.user.pk)
class RatingViewSet(viewsets.ModelViewSet):
queryset = Rating.objects.all()
serializer_class = RatingSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
authentication_classes = [TokenAuthentication]
filter_backends = [filters.DjangoFilterBackend]
filter_fields = ['user', 'place']
def perform_create(self, serializer):
serializer.save(user=self.request.user)
def perform_update(self, serializer):
serializer.save(user=self.request.user)
class CuisineViewSet(viewsets.ModelViewSet):
queryset = Cuisine.objects.all()
serializer_class = CuisineSerializer
authentication_classes = [TokenAuthentication]
permission_classes = []
class LocationTypeViewSet(viewsets.ModelViewSet):
queryset = LocationType.objects.all()
serializer_class = LocationTypeSerializer
authentication_classes = [TokenAuthentication]
permission_classes = [IsStaffOrReadOnly]
class RecomandationViewSet(viewsets.ReadOnlyModelViewSet):
model = Place
serializer_class = PlaceSerializer
authentication_classes = [TokenAuthentication]
def retrieve(self, request, pk=None):
return Response(status=403)
def distance_meters(self, lat1, long1, lat2, long2):
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
arc = math.acos( cos )
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
# we need the distance in meters (http://www.johndcook.com/blog/python_longitude_latitude/)
return arc * 6373 * 1000
def update_history(self, search_info):
if self.request.user.is_anonymous():
return 0
history_object = RecommandationHistory(
user = self.request.user,
location_lat = search_info['lat'],
location_lon = search_info['lng'],
radius = search_info['radius'])
all_history = RecommandationHistory.objects.filter(user = self.request.user).order_by('-time')
if not all_history:
history_object.save()
return 1
else:
history_object.save()
last_history_object = all_history[0]
cur_cuisines = set()
cur_locations = set()
if search_info['location_types'] != None:
loc_instances = LocationType.objects.filter(
pk__in=search_info['location_types']);
for item in loc_instances:
history_object.location_types.add(item)
cur_locations.add(item.name)
if search_info['cuisines'] != None:
cuisine_instances = Cuisine.objects.filter(
pk__in = search_info['cuisines']);
for item in cuisine_instances:
history_object.cuisines.add(item)
cur_cuisines.add(item.name)
last_locations = set(item.name for item in last_history_object.location_types.all())
last_cuisines = set(item.name for item in last_history_object.cuisines.all())
if last_cuisines == cur_cuisines:
if last_locations == cur_locations:
if history_object.location_lat == last_history_object.location_lat:
if self.distance_meters(last_history_object.location_lat, last_history_object.location_lon, history_object.location_lat, history_object.location_lon) < 1:
if last_history_object.radius == history_object.radius:
#don't care, this query came from 'more results'
history_object.delete()
print "The same"
return 0
history_object.save()
return 1
def refine_set(self, recommended_queryset, k_threshold):
recent_hist = RecommandationHistory.objects.filter(user = self.request.user).order_by('time')
recent_hist = recent_hist[max(0, len(recent_hist) - k_threshold):]
recent_hist.reverse()
cache = dict()
for item in recent_hist:
all_cuisines = item.cuisines.all()
all_locations = item.location_types.all()
for cuis in all_cuisines:
if cuis.name not in cache.keys():
cache[cuis.name] = 1
else:
cache[cuis.name] += 1
for loc in all_locations:
if loc.name not in cache.keys():
cache[loc.name] = 1
else:
cache[loc.name] += 1
scores = dict()
for item in recommended_queryset:
cur_sum = 0
for cuisine in item.cuisines.all():
if cuisine.name in cache.keys():
cur_sum += cache[cuisine.name]
avg = item.average_stars
if avg is None:
avg = 3.0
scores[item] = avg * cur_sum
def compare_function(x, y):
if scores[x] == scores[y]:
return 0
if scores[x] < scores[y]:
return -1
return 1
recommended_queryset = sorted(recommended_queryset, cmp=compare_function, reverse=True)
#get new recommandations
#import pdb; pdb.set_trace()
rated_list = Rating.objects.filter(user=self.request.user)
rated_set = set()
for rated_item in rated_list:
rated_set.add(rated_item.place.id)
#take the first 3 unrated
rated_items = list()
unrated_items = list()
left_over = list()
for item_idx in range(len(recommended_queryset)):
item = recommended_queryset[item_idx]
if item.id in rated_set:
rated_items.append(item_idx)
else:
if len(unrated_items) < 3:
unrated_items.append(item_idx)
else:
left_over.append(item_idx)
recommended_queryset_clone = list()
final_order = list()
for x in unrated_items:
final_order.append(x)
for x in rated_items:
final_order.append(x)
for x in left_over:
final_order.append(x)
rec_clone = [recommended_queryset[x] for x in final_order]
#print([scores[item] for item in recommended_queryset])
return rec_clone
def get_queryset(self):
cuisines_arg = self.request.QUERY_PARAMS.get('cuisines', None)
types_arg = self.request.QUERY_PARAMS.get('locationtypes', None)
lat_arg = self.request.QUERY_PARAMS.get('lat', None)
lng_arg = self.request.QUERY_PARAMS.get('lng', None)
radius_arg = self.request.QUERY_PARAMS.get('radius', None)
recommended_queryset = Place.objects.all()
cuisines_json_list = None
types_json_list = None
if cuisines_arg is not None:
cuisines_json_list = json.loads(cuisines_arg)
if cuisines_json_list:
recommended_queryset = recommended_queryset.filter(cuisines__pk__in = cuisines_json_list)
if types_arg is not None:
types_json_list = json.loads(types_arg)
if types_json_list:
recommended_queryset = recommended_queryset.filter(location_types__pk__in = types_json_list)
radius = 100
if radius_arg is not None:
try:
radius = float(radius_arg)
except ValueError:
pass
history_info = {
'cuisines': cuisines_json_list,
'location_types': types_json_list,
'lat': None,
'lng': None,
'radius': radius
}
if lat_arg is not None and lng_arg is not None:
try:
lat = float(lat_arg)
lng = float(lng_arg)
recommended_queryset = filter(lambda x: (self.distance_meters(lat, lng, float(x.location_lat), float(x.location_lon)) <= radius), recommended_queryset)
history_info['lat'] = lat;
history_info['lng'] = lng;
except ValueError:
pass
self.update_history(history_info)
if self.request.user.is_anonymous() != True:
#take the last 30 recommandations
recommended_queryset = self.refine_set(recommended_queryset, 30)
else:
recommended_queryset = sorted(recommended_queryset, key=lambda x: x.average_stars, reverse = True)
return recommended_queryset
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import abc
import errno
import fcntl
import fnmatch
import logging
import argparse
import contextlib
from concurrent import futures
import lehash
class File(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def openflag(self):
pass
@property
@abc.abstractmethod
def lockmode(self):
pass
def __init__(self, path, encoding, hasher):
self.path = os.path.realpath(path)
self.encoding = encoding
self.hasher = hasher
self.fileno = -1
self.statc = self.stat0
def __eq__(self, other):
for attr in ['st_size', 'st_mtime']:
if getattr(self.stat, attr) != getattr(other.stat, attr):
return False
for attr in ['basename', 'digest']:
if getattr(self, attr) != getattr(other, attr):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.path
@property
def encoded(self):
return str(self).encode(self.encoding, 'surrogateescape')
@property
def basename(self):
return os.path.basename(str(self))
@property
def digest(self):
if self.opened:
with self.hasher.open() as desc:
return desc.digest(self.fileno, self.stat.st_size)
@property
def opened(self):
return self.fileno >= 0
@property
def stat(self):
if self.opened and self.statc == self.stat0:
self.statc = os.stat_result(int(s) for s in os.fstat(self.fileno))
return self.statc
@property
def stat0(self):
return os.stat_result((-1,) * 10)
@property
def errnoignore(self):
return []
@property
def isdir(self):
return os.path.isdir(self.encoded)
def iterdir(self):
for f in os.listdir(self.encoded):
yield self.join(f.decode(self.encoding, 'surrogateescape'))
def join(self, f):
path = os.path.join(str(self), f)
return type(self)(path, self.encoding, self.hasher)
@contextlib.contextmanager
def open(self, mode=0o0644):
try:
try:
self.fileno = os.open(self.encoded, self.openflag, mode)
fcntl.flock(self.fileno, self.lockmode | fcntl.LOCK_NB)
except OSError as e:
if e.errno not in self.errnoignore:
raise
yield self
finally:
if self.opened:
os.close(self.fileno)
self.fileno = -1
@contextlib.contextmanager
def mkdir(self, src):
yield
def seek(self, *args, **kwargs):
if self.opened:
os.lseek(self.fileno, *args, **kwargs)
def truncate(self, *args, **kwargs):
pass
def copyfrom(self, src):
pass
class FileRD(File):
@property
def openflag(self):
return os.O_LARGEFILE | os.O_RDONLY
@property
def lockmode(self):
return fcntl.LOCK_SH
class FileRDWR(File):
@property
def openflag(self):
return os.O_LARGEFILE | os.O_RDWR | os.O_CREAT
@property
def lockmode(self):
return fcntl.LOCK_EX
@contextlib.contextmanager
def mkdir(self, src):
try:
os.mkdir(self.encoded, src.stat.st_mode & 0o0777)
except OSError as e:
if e.errno != errno.EEXIST:
raise
yield
os.utime(self.encoded, (src.stat.st_atime, src.stat.st_mtime))
def truncate(self, *args, **kwargs):
if self.opened:
os.ftruncate(self.fileno, *args, **kwargs)
def copyfrom(self, src):
if not (self.opened and src.opened):
return
size = src.stat.st_size
while size > 0:
size -= os.sendfile(self.fileno, src.fileno, None, size)
os.utime(self.fileno, (src.stat.st_atime, src.stat.st_mtime))
class FileStat(FileRD):
@property
def errnoignore(self):
return [errno.ENOENT]
@contextlib.contextmanager
def umask(mask):
try:
oldmask = -1
oldmask = os.umask(mask)
yield
finally:
if oldmask > 0:
os.umask(oldmask)
def copy(args, src, dst):
with src.open(), dst.open(src.stat.st_mode & 0o0777):
if args.sync and src == dst:
return logging.debug('skip: %s', src)
dst.seek(0, os.SEEK_SET)
dst.truncate(0)
dst.copyfrom(src)
logging.info('copy: %s -> %s', src, dst)
def xfnmatch(path, patterns):
return any(fnmatch.fnmatch(path, p) for p in patterns)
def walk(args, src, dst):
match = str(src) + os.sep if src.isdir else str(src)
if not xfnmatch(match, args.include) or xfnmatch(match, args.exclude):
return logging.debug('skip: %s', src)
if dst.isdir:
dst = dst.join(src.basename)
if not src.isdir:
return args.executor.submit(copy, args, src, dst)
with src.open(), dst.mkdir(src):
for s in src.iterdir():
walk(args, s, dst)
def run(args):
nfiles = len(args.files)
if nfiles < 2:
return logging.error('two files required at least')
if nfiles > 2:
dst = args.reader(args.files[-1], args.dst_enc, args.hasher)
if not dst.isdir:
return logging.error('last file must be a directory')
dst = args.writer(args.files.pop(), args.dst_enc, args.hasher)
while args.files:
src = args.reader(args.files.pop(0), args.src_enc, args.hasher)
walk(args, src, dst)
def prepare(args):
args.reader = FileRD
args.writer = FileStat if args.dry_run else FileRDWR
args.hasher = lehash.Hash.instance(args.digest_algo, args.digest_key)
args.executor = futures.ThreadPoolExecutor(max_workers=args.threads)
if args.verbose > 1:
loglevel = logging.DEBUG
elif args.verbose > 0 or args.dry_run:
loglevel = logging.INFO
else:
loglevel = logging.WARN
logging.basicConfig(level=loglevel, format='[%(levelname)s] %(message)s')
def main():
digs = sorted(lehash.Hash.algorithm().keys())
argp = argparse.ArgumentParser()
argp.add_argument('-v', '--verbose', action='count', default=0)
argp.add_argument('-n', '--dry-run', action='store_true', default=False)
argp.add_argument('-S', '--sync', action='store_true', default=False)
argp.add_argument('-I', '--include', nargs='+', default=['*/', '*'])
argp.add_argument('-X', '--exclude', nargs='+', default=[])
argp.add_argument('-s', '--src-enc', default=sys.getfilesystemencoding())
argp.add_argument('-d', '--dst-enc', default='utf-8')
argp.add_argument('-a', '--digest-algo', choices=digs, default='dummy')
argp.add_argument('-k', '--digest-key', type=os.fsencode)
argp.add_argument('-t', '--threads', type=int, default=os.cpu_count())
argp.add_argument('files', nargs=argparse.REMAINDER)
args = argp.parse_args()
prepare(args)
with umask(0), args.executor:
run(args)
if __name__ == '__main__':
main()
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for TruncatedNormal distribution."""
import itertools
import unittest
# Dependency imports
from absl.testing import parameterized
import numpy as np
from scipy import stats as sp_stats
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
EPSILON = 1e-5
def scipy_trunc_norm_dist(loc, scale, low, high):
"""Construct a scipy.sp_stats.truncnorm for the (scalar) parameters given.
Note: scipy's definition of the parameters is slightly different.
https://github.com/scipy/scipy/issues/7591
Args:
loc: Params describing distribution (doesn't support batch)
scale:
low:
high:
Returns:
scipy frozen distribution.
"""
a = (low - loc) / scale
b = (high - loc) / scale
return sp_stats.truncnorm(a, b, loc=loc, scale=scale)
class _TruncatedNormalTestCase(test_util.TestCase):
def setUp(self):
super(_TruncatedNormalTestCase, self).setUp()
self._rng = np.random.RandomState(42)
def assertAllGreaterEqual(self, a, b):
comparison = a >= b
all_true = np.ones_like(comparison, dtype=np.bool_)
self.assertAllEqual(comparison, all_true)
def assertAllLessEqual(self, a, b):
comparison = a <= b
all_true = np.ones_like(comparison, dtype=np.bool_)
self.assertAllEqual(comparison, all_true)
def assertEmpiricalDistributionsEqual(self, sample_a, sample_b, rtol=1e-6,
atol=1e-6):
"""Assert the empirical distribution of two set of samples is similar.
Args:
sample_a: Flat numpy array of samples from dist a.
sample_b: Flat numpy array of samples from dist b.
rtol: Relative tolerances in the histogram comparison.
atol: Absolute tolerances in the histogram comparison.
"""
self.assertAllFinite(sample_a)
self.assertAllFinite(sample_b)
lb = np.min([sample_a, sample_b])
ub = np.max([sample_a, sample_b])
hist_a = np.histogram(sample_a, range=(lb, ub), bins=30, density=True)[0]
hist_b = np.histogram(sample_b, range=(lb, ub), bins=30, density=True)[0]
self.assertAllClose(hist_a, hist_b, rtol=rtol, atol=atol)
@test_util.test_all_tf_execution_regimes
class TruncatedNormalStandaloneTestCase(_TruncatedNormalTestCase):
def _testParamShapes(self, desired_shape):
tn_param_shapes = tfd.TruncatedNormal.param_shapes(desired_shape)
# Check the shapes by comparison with the untruncated Normal.
n_param_shapes = tfd.Normal.param_shapes(desired_shape)
self.assertAllEqual(
self.evaluate(tn_param_shapes['loc']),
self.evaluate(n_param_shapes['loc']))
self.assertAllEqual(
self.evaluate(tn_param_shapes['scale']),
self.evaluate(n_param_shapes['scale']))
self.assertAllEqual(
self.evaluate(tn_param_shapes['low']),
self.evaluate(n_param_shapes['loc']))
self.assertAllEqual(
self.evaluate(tn_param_shapes['high']),
self.evaluate(n_param_shapes['loc']))
loc = tf.zeros(tn_param_shapes['loc'])
scale = tf.ones(tn_param_shapes['scale'])
high = tf.ones(tn_param_shapes['high'])
low = tf.zeros(tn_param_shapes['low'])
sample_shape = self.evaluate(
tf.shape(
tfd.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high,
validate_args=True).sample(seed=test_util.test_seed())))
self.assertAllEqual(desired_shape, sample_shape)
def testParamShapes(self):
desired_shape = [10, 3, 4]
self._testParamShapes(desired_shape)
self._testParamShapes(tf.constant(desired_shape))
def testParamStaticShapes(self):
sample_shape = [7]
self._testParamShapes(sample_shape)
self._testParamShapes(tf.TensorShape(sample_shape))
def testShapeWithPlaceholders(self):
if tf.executing_eagerly():
return
loc = tf1.placeholder_with_default(5., shape=None)
scale = tf1.placeholder_with_default([1., 2], shape=None)
ub = tf1.placeholder_with_default([10., 11.], shape=None)
lb = tf1.placeholder_with_default([-1.], shape=None)
dist = tfd.TruncatedNormal(loc, scale, lb, ub, validate_args=True)
self.assertEqual(dist.batch_shape, tf.TensorShape(None))
self.assertEqual(dist.event_shape, ())
self.assertAllEqual(self.evaluate(dist.event_shape_tensor()), [])
self.assertAllEqual(self.evaluate(dist.batch_shape_tensor()), [2])
self.assertAllEqual(self.evaluate(
dist.sample(5, seed=test_util.test_seed())).shape, [5, 2])
ub = tf1.placeholder_with_default([[5., 11.]], shape=None)
dist = tfd.TruncatedNormal(loc, scale, lb, ub, validate_args=True)
self.assertAllEqual(self.evaluate(
dist.sample(5, seed=test_util.test_seed())).shape, [5, 1, 2])
def testBatchSampling(self):
"""Check (empirically) the different parameters in a batch are respected.
"""
n = int(1e5)
lb = [[-1.0, 9.0], [0., 8.]]
ub = [[1.0, 11.0], [5., 20.]]
dist = tfd.TruncatedNormal(
loc=[[0., 10.], [0., 10.]],
scale=[[1., 1.], [5., 5.]],
low=lb,
high=ub,
validate_args=True)
x = self.evaluate(dist.sample(n, seed=test_util.test_seed()))
self.assertEqual(x.shape, (n, 2, 2))
means = np.mean(x, axis=0)
var = np.var(x, axis=0)
self.assertAllClose(
means, [[0., 10.], [2.299, 12.48]], rtol=1e-2, atol=1e-2)
self.assertAllClose(var, [[0.29, 0.29], [1.99, 8.74]], rtol=1e-2, atol=1e-2)
empirical_lb = np.min(x, axis=0)
self.assertAllClose(empirical_lb, lb, atol=0.1)
empirical_ub = np.max(x, axis=0)
self.assertAllClose(empirical_ub, ub, atol=0.1)
@parameterized.parameters(
(0., 1., -1., 1.),
(1., 1., 0., 2.),
(-0.5, 0.5, -0.9, -0.4),
(10., 3.0, 9.9, 25.),
(2., 1.5, 0.1, 1.9))
def testMomentsEmpirically(self, loc, scale, low, high):
n = int(2e5)
dist = tfd.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=True)
x = self.evaluate(dist.sample(n, seed=test_util.test_seed()))
empirical_mean = np.mean(x)
empirical_var = np.var(x)
expected_mean = self.evaluate(dist.mean())
expected_var = self.evaluate(dist.variance())
self.assertAlmostEqual(expected_mean, empirical_mean, places=1)
self.assertAlmostEqual(expected_var, empirical_var, places=1)
def testNegativeSigmaFails(self):
with self.assertRaisesOpError('`scale` must be positive'):
dist = tfd.TruncatedNormal(
loc=0., scale=-0.1, low=-1.0, high=1.0, validate_args=True)
self.evaluate(dist.mean())
def testIncorrectBoundsFails(self):
with self.assertRaisesOpError('`low >= high`'):
dist = tfd.TruncatedNormal(
loc=0., scale=0.1, low=1.0, high=-1.0, validate_args=True)
self.evaluate(dist.mean())
with self.assertRaisesOpError('`low >= high`'):
dist = tfd.TruncatedNormal(
loc=0., scale=0.1, low=1.0, high=1.0, validate_args=True)
self.evaluate(dist.mean())
def testAssertValidSample(self):
dist = tfd.TruncatedNormal(
loc=0., scale=2., low=-4., high=3., validate_args=True)
with self.assertRaisesOpError('must be greater than or equal to `low`'):
self.evaluate(dist.cdf([-4.2, 1.7, 2.3]))
with self.assertRaisesOpError('must be less than or equal to `high`'):
self.evaluate(dist.survival_function([2.3, -3.2, 4.]))
def testLogPdfAtBoundary(self):
dist = tfd.TruncatedNormal(
loc=[-2., 3.], scale=1., low=-4., high=2., validate_args=True)
log_pdf_at_boundary = self.evaluate(dist.log_prob([[-4.], [2.]]))
self.assertTrue(np.isfinite(log_pdf_at_boundary).all())
def testTruncatedAtTail(self):
dist = tfd.TruncatedNormal(
loc=0., scale=1., low=13., high=15., validate_args=True)
sp_dist = scipy_trunc_norm_dist(0., 1., 13., 15.)
actual_log_prob = self.evaluate(dist.log_prob(14.))
self.assertTrue(np.isfinite(actual_log_prob))
expected_log_prob = sp_dist.logpdf(14.)
self.assertAlmostEqual(actual_log_prob, expected_log_prob, places=4)
actual_cdf = self.evaluate(dist.cdf(14.))
self.assertAlmostEqual(actual_cdf, 1., places=4)
actual_log_cdf = self.evaluate(dist.log_cdf(14.))
self.assertAlmostEqual(actual_log_cdf, 0., places=4)
def testNegativeSigmaFailsVarAssignment(self):
dist = tfd.TruncatedNormal(
loc=0., scale=tf.Variable(0.1), low=-1.0, high=1.0, validate_args=True)
self.evaluate([v.initializer for v in dist.variables])
self.evaluate(dist.mean())
with tf.control_dependencies([dist.scale.assign(-.1)]):
with self.assertRaisesOpError('`scale` must be positive'):
self.evaluate(dist.mean())
def testIncorrectBoundsFailsVarAssignment(self):
# low is var
dist = tfd.TruncatedNormal(
loc=0., scale=0.1, low=tf.Variable(-1.), high=-.5, validate_args=True)
self.evaluate([v.initializer for v in dist.variables])
self.evaluate(dist.mean())
with tf.control_dependencies([dist.low.assign(-.1)]):
with self.assertRaisesOpError('`low >= high`'):
self.evaluate(dist.mean())
# high is var
dist = tfd.TruncatedNormal(
loc=0., scale=0.1, low=-1., high=tf.Variable(-.5), validate_args=True)
self.evaluate([v.initializer for v in dist.variables])
self.evaluate(dist.mean())
with tf.control_dependencies([dist.high.assign(-1.1)]):
with self.assertRaisesOpError('`low >= high`'):
self.evaluate(dist.mean())
# both are vars
dist = tfd.TruncatedNormal(
loc=0., scale=0.1, low=tf.Variable(-1.), high=tf.Variable(-.5),
validate_args=True)
self.evaluate([v.initializer for v in dist.variables])
self.evaluate(dist.mean())
with tf.control_dependencies([dist.high.assign(-1.)]):
with self.assertRaisesOpError('`low >= high`'):
self.evaluate(dist.mean())
@parameterized.parameters(
(0., 1., -1., 1.),
(1., 1., 0., 2.),
(-0.5, 0.5, -0.9, -0.4),
(10., 3.0, 9.9, 25.),
(2., 1.5, 0.1, 1.9),
(-2., 0.2, -1.5, -0.5))
def testMode(self, loc, scale, low, high):
dist = tfd.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=True)
mode = np.asscalar(self.evaluate(dist.mode()))
if loc < low:
expected_mode = low
elif loc > high:
expected_mode = high
else:
expected_mode = loc
self.assertAlmostEqual(mode, expected_mode)
@parameterized.parameters((np.float32), (np.float64))
def testReparametrizable(self, dtype=np.float32):
loc = dtype(0.1)
scale = dtype(1.1)
low = dtype(-10.0)
high = dtype(5.0)
def f(loc, scale, low, high):
dist = tfd.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=True)
n = int(2e5)
return tf.reduce_mean(
tf.abs(dist.sample(n, seed=test_util.test_seed())))
err = self.compute_max_gradient_error(f, [loc, scale, low, high], delta=0.1)
# These gradients are noisy due to sampling.
self.assertLess(err, 0.05)
def testReparametrizableBatch(self):
def samples_sum(loc):
dist = tfp.distributions.TruncatedNormal(
loc=loc, scale=1., low=-1., high=1., validate_args=True)
return tf.reduce_sum(dist.sample(100, seed=test_util.test_seed()))
loc = tf.constant([0., 1.])
_, dy_loc = self.evaluate(tfp.math.value_and_gradient(samples_sum, loc))
self.assertAllGreaterEqual(dy_loc, 0.)
@parameterized.parameters(
itertools.product((np.float32, np.float64),
('prob', 'log_prob', 'cdf', 'log_cdf',
'survival_function', 'log_survival_function'))
)
def testGradientsFx(self, dtype, fn_name):
if not tf.executing_eagerly(): return
loc = dtype(0.1)
scale = dtype(3.0)
low = dtype(-10.0)
high = dtype(5.0)
x = np.array([-1.0, 0.01, 0.1, 1., 4.9]).astype(dtype).reshape((5, 1))
def f(loc, scale):
dist = tfd.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=True)
func = getattr(dist, fn_name)
return tf.reduce_mean(func(x))
err = self.compute_max_gradient_error(f, [loc, scale])
self.assertLess(err, 1e-2)
@parameterized.parameters(
itertools.product((np.float32, np.float64),
('entropy', 'mean', 'variance', 'mode'))
)
def testGradientsNx(self, dtype, fn_name):
loc = dtype(0.1)
scale = dtype(3.0)
low = dtype(-10.0)
high = dtype(5.0)
def f(loc, scale):
dist = tfd.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=True)
func = getattr(dist, fn_name)
return func()
if fn_name not in ['mode']:
err = self.compute_max_gradient_error(f, [loc, scale])
self.assertLess(err, 0.005)
else:
err = self.compute_max_gradient_error(lambda x: f(x, scale), [loc])
self.assertLess(err, 0.005)
def testSupportBijectorOutsideRange(self):
low = np.array([1., 2., 3., -5.]).astype(np.float32)
loc = np.array([4., 4., 4., -2.]).astype(np.float32)
high = np.array([6., 7., 6., 1.]).astype(np.float32)
dist = tfd.TruncatedNormal(
loc, scale=2., low=low, high=high, validate_args=False)
eps = 1e-6
x = np.array([1. - eps, 1.5, 6. + eps, -5. - eps]).astype(np.float32)
bijector_inverse_x = dist.experimental_default_event_space_bijector(
).inverse(x)
self.assertAllNan(self.evaluate(bijector_inverse_x))
def testSampleXLA(self):
self.skip_if_no_xla()
@tf.function(jit_compile=True)
def f(loc):
return tfd.TruncatedNormal(
loc=loc, scale=1., low=-1., high=1.).sample(
[3], seed=test_util.test_seed())
self.evaluate(f(tf.constant(0.2)))
# TODO(b/150161911): reconcile graph- and eager-mode handling of denormal floats
# so that we can re-enable eager mode tests.
@test_util.test_graph_mode_only
class TruncatedNormalTestGraphMode(_TruncatedNormalTestCase):
@parameterized.named_parameters(
{'testcase_name': '_float32', 'dtype': tf.float32},
{'testcase_name': '_float64', 'dtype': tf.float64})
def testReproduceVmap1(self, dtype):
# Regression test for b/145554459
loc = tf.constant(-200., dtype=dtype)
scale = tf.constant(2.188274e+01, dtype=dtype)
high = tf.constant(113.33857, dtype=dtype)
low = tf.constant(102.94414, dtype=dtype)
# Not validating args b/c the assertions confuse pfor.
dist = tfd.TruncatedNormal(loc, scale, low, high, validate_args=False)
sample = tf.constant([102.950745, 103.87256, 107.78299], dtype=dtype)
batch_lp = dist.log_prob(sample)
pfor_lp = tf.vectorized_map(dist.log_prob, sample)
batch_lp_, pfor_lp_ = self.evaluate((batch_lp, pfor_lp))
self.assertAllClose(batch_lp_, pfor_lp_, atol=1e-6)
@parameterized.named_parameters(
{'testcase_name': '_float32', 'dtype': tf.float32},
{'testcase_name': '_float64', 'dtype': tf.float64})
def testReproduceVmap2(self, dtype):
# Regression test for b/150811273
if dtype == np.float32:
raise unittest.SkipTest('b/150811273')
seed = test_util.test_seed()
loc = tf.constant(-12.500191, dtype=dtype)
scale = tf.constant(1e-06, dtype=dtype)
high = tf.constant(-12.502851, dtype=dtype)
low = tf.constant(-187.50009, dtype=dtype)
# Not validating args b/c the assertions confuse pfor.
dist = tfd.TruncatedNormal(loc, scale, low, high, validate_args=False)
# At the default seed, the sample comes out as [-12.502851 -12.502851
# -12.502851], but that's also weird. At a scale of 1e-6, the samples
# should cluster more tightly around the location, which is -12.500191.
sample = self.evaluate(dist.sample(3, seed=seed))
batch_lp = dist.log_prob(sample)
pfor_lp = tf.vectorized_map(dist.log_prob, tf.convert_to_tensor(sample))
batch_lp_, pfor_lp_ = self.evaluate((batch_lp, pfor_lp))
self.assertAllClose(batch_lp_, pfor_lp_, atol=1e-6)
@test_util.test_all_tf_execution_regimes
@parameterized.parameters(
(0.0, 1.0),
(10.0, 1.0),
(-0.3, 2.0),
(100., 5.0),
)
class TruncatedNormalTestCompareWithNormal(_TruncatedNormalTestCase):
"""Test by comparing TruncatedNormals with wide bounds and unbounded Normal.
"""
def constructDists(self, loc, scale, validate_args=True):
truncated_dist = tfd.TruncatedNormal(
loc=loc,
scale=scale,
low=loc - (10. * scale),
high=loc + (10. * scale),
validate_args=validate_args)
normal_dist = tfd.Normal(loc=loc, scale=scale)
return truncated_dist, normal_dist
def testEntropy(self, loc, scale):
truncated_dist, normal_dist = self.constructDists(loc, scale)
self.assertAllClose(
self.evaluate(truncated_dist.entropy()),
self.evaluate(normal_dist.entropy()),
rtol=1e-6, atol=1e-6)
def testSampling(self, loc, scale):
n = 1000000
truncated_dist, normal_dist = self.constructDists(loc, scale)
seed_stream = test_util.test_seed_stream(salt='TruncNormal')
truncated_samples = self.evaluate(
truncated_dist.sample(n, seed=seed_stream())).flatten()
lb = self.evaluate(truncated_dist.low)
ub = self.evaluate(truncated_dist.high)
self.assertAllGreaterEqual(truncated_samples, lb)
self.assertAllLessEqual(truncated_samples, ub)
normal_samples = self.evaluate(normal_dist.sample(
n, seed=seed_stream())).flatten()
# Rejection sample the normal distribution
rejection_samples = normal_samples[normal_samples >= lb]
rejection_samples = rejection_samples[rejection_samples <= ub]
self.assertEmpiricalDistributionsEqual(
truncated_samples, rejection_samples, rtol=1e-2, atol=1e-1)
def testLogProb(self, loc, scale):
truncated_dist, normal_dist = self.constructDists(
loc, scale, validate_args=False)
low = self.evaluate(truncated_dist.low)
high = self.evaluate(truncated_dist.high)
test_x = list(np.float32(np.random.uniform(low, high, 10)))
test_x += [low, high, low + EPSILON, high - EPSILON]
tr_log_prob = self.evaluate(truncated_dist.log_prob(test_x))
n_log_prob = self.evaluate(normal_dist.log_prob(test_x))
self.assertAllClose(tr_log_prob, n_log_prob, rtol=1e-4, atol=1e-4)
no_support_log_prob = self.evaluate(
truncated_dist.log_prob(
np.float32(
[low - EPSILON, high + EPSILON, low - 100., high + 100.]
)))
self.assertAllEqual(no_support_log_prob,
[np.log(0.)] * len(no_support_log_prob))
def testCDF(self, loc, scale):
truncated_dist, normal_dist = self.constructDists(loc, scale)
low = self.evaluate(truncated_dist.low)
high = self.evaluate(truncated_dist.high)
test_x = list(
np.float32(np.random.uniform(low, high, 10)))
test_x += [low, high, low + EPSILON, high - EPSILON]
tr_cdf = self.evaluate(truncated_dist.cdf(test_x))
n_cdf = self.evaluate(normal_dist.cdf(test_x))
self.assertAllClose(tr_cdf, n_cdf, rtol=1e-4, atol=1e-4)
@test_util.test_all_tf_execution_regimes
@parameterized.parameters(
(0., 1., -1., 1.),
(1., 1., 0., 2.),
(-0.5, 0.5, -0.9, -0.4),
(10., 3.0, 9.9, 25.),
(2., 1.5, 0.1, 1.9),
(-2., 0.2, -1.5, -0.5))
class TruncatedNormalTestCompareWithScipy(_TruncatedNormalTestCase):
def constructDists(self, loc, scale, low, high, validate_args=True):
tf_dist = tfd.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=validate_args)
sp_dist = scipy_trunc_norm_dist(loc, scale, low, high)
return tf_dist, sp_dist
@test_util.jax_disable_test_missing_functionality(
'In JAX, truncated_normal samples can fall outside the support.')
def testSampling(self, loc, scale, low, high):
n = int(1000000)
tf_dist, sp_dist = self.constructDists(loc, scale, low, high)
tf_samples = self.evaluate(tf_dist.sample(
n, seed=test_util.test_seed())).flatten()
self.assertAllGreaterEqual(tf_samples, low)
self.assertAllLessEqual(tf_samples, high)
sp_samples = sp_dist.rvs(size=n)
self.assertEmpiricalDistributionsEqual(
tf_samples, sp_samples, atol=0.05, rtol=0.05)
def testEntropy(self, loc, scale, low, high):
tf_dist, sp_dist = self.constructDists(loc, scale, low, high)
self.assertAlmostEqual(
self.evaluate(tf_dist.entropy()), sp_dist.entropy(), places=2)
def testLogProb(self, loc, scale, low, high):
test_x = list(np.float32(np.random.uniform(low, high, 10)))
test_x += [
low, high, low + EPSILON, low - EPSILON, high + EPSILON,
high - EPSILON
]
tf_dist, sp_dist = self.constructDists(
loc, scale, low, high, validate_args=False)
tf_log_prob = self.evaluate(tf_dist.log_prob(test_x))
sp_log_prob = sp_dist.logpdf(test_x)
self.assertAllClose(tf_log_prob, sp_log_prob, rtol=1e-4, atol=1e-4)
def testCDF(self, loc, scale, low, high):
test_x = list(np.float32(np.random.uniform(low, high, 10)))
test_x += [
low, high, low + EPSILON, low - EPSILON, high + EPSILON,
high - EPSILON, low - 100., high + 100.
]
tf_dist, sp_dist = self.constructDists(
loc, scale, low, high, validate_args=False)
tf_cdf = self.evaluate(tf_dist.cdf(test_x))
sp_cdf = sp_dist.cdf(test_x)
self.assertAllClose(tf_cdf, sp_cdf, rtol=1e-4, atol=1e-4)
def testLogCDF(self, loc, scale, low, high):
test_x = list(np.float32(np.random.uniform(low, high, 10)))
test_x += [
low, high, low + 100 * EPSILON, low - EPSILON, high + EPSILON,
high - EPSILON, low - 100., high + 100.
]
tf_dist, sp_dist = self.constructDists(
loc, scale, low, high, validate_args=False)
tf_log_cdf = self.evaluate(tf_dist.log_cdf(test_x))
sp_log_cdf = sp_dist.logcdf(test_x)
self.assertAllClose(tf_log_cdf, sp_log_cdf, rtol=1e-4, atol=1e-4)
def testMoments(self, loc, scale, low, high):
tf_dist, sp_dist = self.constructDists(loc, scale, low, high)
self.assertAlmostEqual(
self.evaluate(tf_dist.mean()), sp_dist.mean(), places=3)
self.assertAlmostEqual(
self.evaluate(tf_dist.variance()), sp_dist.var(), places=3)
def testQuantile(self, loc, scale, low, high):
tf_dist, sp_dist = self.constructDists(loc, scale, low, high)
for q in [0.01, 0.1, 0.5, 0.9, 0.99]:
self.assertAlmostEqual(
self.evaluate(tf_dist.quantile(value=q)), sp_dist.ppf(q=q), places=3)
if __name__ == '__main__':
test_util.main()
|
|
# -*- coding: utf-8 -*-
from collections import Mapping, Sequence
from operator import itemgetter
import math
import time
import numpy as np
import scipy.sparse as sp
import random
from sklearn.preprocessing import normalize
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import atleast2d_or_csr
from pyvotune.theano import dataset, RBM
from pyvotune.log import logger
log = logger()
global_theano = None
global_T = None
global_RandomStreams = None
class TheanoRBMFeatureExtractor(BaseEstimator, TransformerMixin):
def __init__(
self,
learning_rate=0.1, training_epochs=15,
batch_size=20, n_resamples=10,
n_hidden=500):
self.learning_rate = learning_rate
self.training_epochs = training_epochs
self.batch_size = batch_size
self.n_hidden = n_hidden
self.n_resamples = n_resamples
super(TheanoRBMFeatureExtractor, self).__init__()
def fit(self, X, y=None):
global global_theano
global global_T
global global_RandomStreams
log.debug(u"RBM Fitting with lr={0} epochs={1} n_hidden={2}".format(
self.learning_rate, self.training_epochs, self.n_hidden))
## This prevents us from multiple importing theano which is important
## since it performs some global initialization, especially for cuda
if not global_theano:
log.debug(u"Importing Theano")
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
theano.config.warn.subtensor_merge_bug = False
global_theano = theano
global_T = T
global_RandomStreams = RandomStreams
self.rng = np.random.RandomState(123456)
self.theano_rng = global_RandomStreams(self.rng.randint(2 ** 30))
self.n_visible = np.shape(X)[1]
#log.debug(u"RBM Featureset has {0} visible nodes".format(
#self.n_visible))
train_x, train_y = dataset.shared_dataset(global_theano, global_T, X, y, borrow=True)
self.init_objects(train_x)
self.train(train_x)
return self
def train(self, train_x):
n_train_batches = train_x.get_value(borrow=True).shape[0] / self.batch_size
log.debug(
u"Fitting RBM With {0} training batches".format(n_train_batches))
for epoch in xrange(self.training_epochs):
# go through the training set
mean_cost = []
t_start = time.time()
log.debug(u"RBM Training epoch {0}".format(epoch))
for batch_index in xrange(n_train_batches):
t_batch_start = time.time()
mean_cost += [self.train_rbm(batch_index)]
t_batch_end = time.time()
log.debug(u"Training batch {0} of {1} - took {2}s".format(
batch_index, n_train_batches, t_batch_end - t_batch_start))
t_end = time.time()
log.debug(u'Training epoch {0}, cost is {1} - took {2}s'.format(
epoch, np.mean(mean_cost), t_end - t_start))
def transform(self, X, y=None):
test_set_x, _ = dataset.shared_dataset(global_theano, global_T, X, borrow=True)
# pick random test examples, with which to initialize the persistent chain
persistent_vis_chain = global_theano.shared(np.asarray(test_set_x.get_value(borrow=True), dtype=global_theano.config.floatX))
[presig_hids, hid_mfs, hid_samples, presig_vis,
vis_mfs, vis_samples], updates = \
global_theano.scan(
self.rbm.gibbs_vhv,
outputs_info=[None, None, None, None, None, persistent_vis_chain],
n_steps=1)
# add to updates the shared variable that takes care of our persistent
# chain :.
#updates.update({persistent_vis_chain: vis_samples[-1]})
# construct the function that implements our persistent chain.
# we generate the "mean field" activations for plotting and the actual
# samples for reinitializing the state of our persistent chain
sample_fn = global_theano.function(
[], [hid_mfs[-1], hid_samples[-1], vis_mfs[-1], vis_samples[-1]],
name='sample_fn')
ident = random.randint(0, 500)
all_hid_mfs = []
all_vis_sample = []
all_hid_sample = []
for i in range(self.n_resamples):
hid_mfs, hid_sample, vis_mfs, vis_sample = sample_fn()
all_hid_mfs.append(hid_mfs)
all_hid_sample.append(hid_sample)
all_vis_sample.append(vis_sample)
hidden_mean_field = np.mean(all_hid_mfs, axis=0)
visible_mean_field = np.mean(all_vis_sample, axis=0)
print "all_hid_mfs shape", np.shape(all_hid_mfs)
print "Hidden mean field", np.shape(hidden_mean_field)
print "Shapes", np.shape(hidden_mean_field), np.shape(all_hid_mfs)
#self.sample_all(X, all_hid_sample, all_vis_sample, ident)
#return hidden_mean_field
return visible_mean_field
#def sample_all(self, X, all_hid_sample, all_vis_sample, ident):
#width = np.shape(X)[1]
#sq = math.sqrt(width)
#if width != sq ** 2:
#return
#hid_sample_mean_field = np.mean(all_hid_sample, axis=0)
#vis_sample_mean_field = np.mean(all_vis_sample, axis=0)
#all_recons = []
#n_padding = (width - self.n_hidden) / 2
#padding = np.zeros((n_padding, ))
#for sample, recons, hidden in zip(X, vis_sample_mean_field, hid_sample_mean_field)[:10]:
#padded_hidden = np.hstack((padding, hidden, padding))
#comb = np.hstack((
#sample.reshape(50, 50), recons.reshape(50, 50),
#padded_hidden.reshape(50, 50)))
#comb = np.flipud(comb)
#all_recons.append(comb)
#np_to_pil(
#np.vstack(all_recons), colorize=True,
#filename='samples/%i_samp_reconstruction_%i_%ires.png' % (
#ident, len(X), self.n_resamples))
def init_objects(self, train_x):
# allocate symbolic variables for the data
self.index = global_T.lscalar() # index to a [mini]batch
self.x = global_T.matrix('x') # the data is presented as rasterized images
# initialize storage for the persistent chain (state = hidden
# layer of chain)
self.persistent_chain = global_theano.shared(
np.zeros(
(self.batch_size, self.n_hidden),
dtype=global_theano.config.floatX),
borrow=True)
# construct the RBM class
self.rbm = RBM(
global_theano, global_T,
input=self.x, n_visible=self.n_visible,
n_hidden=self.n_hidden, np_rng=self.rng, theano_rng=self.theano_rng)
# get the cost and the gradient corresponding to one step of CD-15
self.cost, self.updates = self.rbm.get_cost_updates(
lr=self.learning_rate, persistent=self.persistent_chain, k=15)
# it is ok for a theano function to have no output
# the purpose of train_rbm is solely to update the RBM parameters
self.train_rbm = global_theano.function(
[self.index], self.cost,
updates=self.updates,
givens={self.x: train_x[self.index * self.batch_size: (self.index + 1) * self.batch_size]},
name='train_rbm')
|
|
"""
API routes
~~~~~~~~~~
Routes from this file are included first.
Routes that can't be grouped under an endpoint go here
"""
import os
from app import gpg
from app.core import ApiResponse, ApiException
from app.tasks import send_to_ks
from flask import current_app, request
from flask_jsonschema import validate
from werkzeug.utils import secure_filename
from . import api
HTTP_METHODS = [
# RFC 2616
'OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT',
# RFC 2518
'PROPFIND', 'PROPPATCH', 'MKCOL', 'COPY', 'MOVE', 'LOCK', 'UNLOCK',
# RFC 3253
'VERSION-CONTROL', 'REPORT', 'CHECKOUT', 'CHECKIN', 'UNCHECKOUT',
'MKWORKSPACE', 'UPDATE', 'LABEL', 'MERGE', 'BASELINE-CONTROL',
'MKACTIVITY'
# RFC 3648
'ORDERPATCH',
# RFC 3744
'ACL',
# RFC 5789
'PATCH'
]
@api.route('/')
def api_index():
"""List available endpoints
"""
url_rules = [r.rule for r in current_app.url_map.iter_rules()]
return ApiResponse({'endpoints': sorted(list(set(url_rules)))})
@api.route('/teapot')
def teapot():
return ApiResponse({'message': "I'm a teapot"}, 418)
@api.route('/users', methods=HTTP_METHODS)
def api_honeytoken():
# Honeytoken endpoint
return ApiResponse({'message': "No such user"}, 200)
@api.route('/submit-key', methods=['POST', 'PUT'])
@validate('gnupg', 'submit_key')
def submit_gpg_key():
"""Submit GPG key to CERT-EU keyserver
Keys are send to the first server from the GPG_KEYSERVERS configuration
option
**Example request**:
.. sourcecode:: http
POST /submit-key HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
Content-Type: application/json
{
"ascii_key": "-----BEGIN PGP PUBLIC KEY BLOCK-----nmQENBFHn
...-----END PGP PUBLIC KEY BLOCK-----"
}
**Example response**:
.. sourcecode:: http
HTTP/1.0 201 CREATED
Content-Type: application/json
{
"fingerprints": [
"2D39D3A9ACCD18B1D7774A00A485C88DDA2AA2BF"
],
"message": "Key saved"
}
:reqheader Accept: Content type(s) accepted by the client
:resheader Content-Type: this depends on `Accept` header or request
:<json string ascii_key: ASCII armored GPG public key
:>json array fingerprints: List of fingerprints
:>json string message: Status message
:statuscode 201: GPG key successfully saved
:statuscode 400: Bad request
"""
result = gpg.gnupg.import_keys(request.json['ascii_key'])
if result.fingerprints:
send_to_ks.delay(
current_app.config['GPG_KEYSERVERS'][0], result.fingerprints
)
return ApiResponse({
'message': 'Key saved',
'fingerprints': [f for f in result.fingerprints]},
201)
else:
raise ApiException('The PGP Key could not be imported')
@api.route('/upload', methods=['POST'])
def upload():
"""Upload files. This endpoint is used to upload "trusted" files;
E.i. files created by CERT-EU
E.g. CITAR, CIMBL, IDS signatures, etc.
**Example request**:
.. sourcecode:: http
POST /api/1.0/upload HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryklDA9
------WebKitFormBoundaryklDA94BtcALil3R2
Content-Disposition: form-data; name="files[0]"; filename="test.gz"
Content-Type: application/x-gzip
------WebKitFormBoundaryklDA94BtcALil3R2--
**Example response**:
.. sourcecode:: http
HTTP/1.0 201 CREATED
Content-Type: application/json
{
"files": [
"test.gz"
],
"message": "Files uploaded"
}
:reqheader Accept: Content type(s) accepted by the client
:reqheader Content-Type: multipart/form-data required
:resheader Content-Type: this depends on `Accept` header or request
:>json array files: List of files saved to disk
:>json string message: Status message
:statuscode 201: Files successfully saved
"""
uploaded_files = []
for idx, file in request.files.items():
filename = secure_filename(file.filename)
file.save(os.path.join(current_app.config['APP_UPLOADS'], filename))
uploaded_files.append(filename)
return ApiResponse({
'message': 'Files uploaded',
'files': uploaded_files
}, 201)
@api.route('/search-keys', methods=['POST'])
def search_public_ks(email=None):
"""Search GPG keys on public keyserver pool.
The keysever pool is the second server from the GPG_KEYSERVERS
configuration option.
**Example request**:
.. sourcecode:: http
POST /api/1.0/search-keys HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
Content-Type: application/json
{
"email": "alex@cert.europa.eu"
}
**Example response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
{
"keys": [
{
"algo": "1",
"date": "1379079830",
"expires": "1479375689",
"keyid": "8CC4185CF057F6F8690309DD28432835514AA0F6",
"length": "4096",
"type": "pub",
"uids": [
"Alexandru Ciobanu <alex@cert.europa.eu>"
]
}
]
}
:reqheader Accept: Content type(s) accepted by the client
:resheader Content-Type: this depends on `Accept` header or request
:<json string email: E-mail address
:>json array keys: List of found keys
:>jsonarr string algo: `Key algorithm
<https://tools.ietf.org/html/rfc4880#section-9.1>`_
:>jsonarr string date: Creation date
:>jsonarr string expires: Expiration date
:>jsonarr string keyid: KeyID date
:>jsonarr string length: Key size
:>jsonarr string type: Key type. Only public keys are returned
:>jsonarr array uids: Key type. Only public keys are returned
:statuscode 200: GPG keys found
:statuscode 404: No keys found for given email address
:param email:
"""
if email is None:
email = request.json['email']
keys = gpg.gnupg.search_keys(
email, current_app.config['GPG_KEYSERVERS'][1])
if not keys:
raise ApiException('No keys found', 404)
return ApiResponse({'keys': keys})
@api.route('/import-keys', methods=['POST'])
def import_keys():
"""Import GPG keys from public keyserver into local keychain
**Example request**:
.. sourcecode:: http
POST /api/1.0/import-keys HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
Content-Type: application/json;charset=UTF-8
{
"keys": [
"8CC4185CF057F6F8690309DD28432835514AA0F6"
]
}
**Example response**:
.. sourcecode:: http
HTTP/1.0 201 CREATED
Content-Type: application/json
{
"message": "Imported"
}
:reqheader Accept: Content type(s) accepted by the client
:resheader Content-Type: this depends on `Accept` header or request
:<json array keys: Key IDs to import
:>json string message: Status message
:statuscode 201: GPG keys imported
"""
for key in request.json['keys']:
gpg.gnupg.recv_keys(current_app.config['GPG_KEYSERVERS'][1], key)
return ApiResponse({'message': 'Imported'}, 201)
|
|
# Natural Language Toolkit: Classifier Interface
#
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
# Dan Garrette <dhgarrette@gmail.com>
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Interfaces and base classes for theorem provers and model builders.
``Prover`` is a standard interface for a theorem prover which tries to prove a goal from a
list of assumptions.
``ModelBuilder`` is a standard interface for a model builder. Given just a set of assumptions.
the model builder tries to build a model for the assumptions. Given a set of assumptions and a
goal *G*, the model builder tries to find a counter-model, in the sense of a model that will satisfy
the assumptions plus the negation of *G*.
"""
from __future__ import print_function
import threading
import time
class Prover(object):
"""
Interface for trying to prove a goal from assumptions. Both the goal and
the assumptions are constrained to be formulas of ``logic.Expression``.
"""
def prove(self, goal=None, assumptions=None, verbose=False):
"""
:return: Whether the proof was successful or not.
:rtype: bool
"""
return self._prove(goal, assumptions, verbose)[0]
def _prove(self, goal=None, assumptions=None, verbose=False):
"""
:return: Whether the proof was successful or not, along with the proof
:rtype: tuple: (bool, str)
"""
raise NotImplementedError()
class ModelBuilder(object):
"""
Interface for trying to build a model of set of formulas.
Open formulas are assumed to be universally quantified.
Both the goal and the assumptions are constrained to be formulas
of ``logic.Expression``.
"""
def build_model(self, goal=None, assumptions=None, verbose=False):
"""
Perform the actual model building.
:return: Whether a model was generated
:rtype: bool
"""
return self._build_model(goal, assumptions, verbose)[0]
def _build_model(self, goal=None, assumptions=None, verbose=False):
"""
Perform the actual model building.
:return: Whether a model was generated, and the model itself
:rtype: tuple(bool, sem.Valuation)
"""
raise NotImplementedError()
class TheoremToolCommand(object):
"""
This class holds a goal and a list of assumptions to be used in proving
or model building.
"""
def add_assumptions(self, new_assumptions):
"""
Add new assumptions to the assumption list.
:param new_assumptions: new assumptions
:type new_assumptions: list(sem.Expression)
"""
raise NotImplementedError()
def retract_assumptions(self, retracted, debug=False):
"""
Retract assumptions from the assumption list.
:param debug: If True, give warning when ``retracted`` is not present on
assumptions list.
:type debug: bool
:param retracted: assumptions to be retracted
:type retracted: list(sem.Expression)
"""
raise NotImplementedError()
def assumptions(self):
"""
List the current assumptions.
:return: list of ``Expression``
"""
raise NotImplementedError()
def goal(self):
"""
Return the goal
:return: ``Expression``
"""
raise NotImplementedError()
def print_assumptions(self):
"""
Print the list of the current assumptions.
"""
raise NotImplementedError()
class ProverCommand(TheoremToolCommand):
"""
This class holds a ``Prover``, a goal, and a list of assumptions. When
prove() is called, the ``Prover`` is executed with the goal and assumptions.
"""
def prove(self, verbose=False):
"""
Perform the actual proof.
"""
raise NotImplementedError()
def proof(self, simplify=True):
"""
Return the proof string
:param simplify: bool simplify the proof?
:return: str
"""
raise NotImplementedError()
def get_prover(self):
"""
Return the prover object
:return: ``Prover``
"""
raise NotImplementedError()
class ModelBuilderCommand(TheoremToolCommand):
"""
This class holds a ``ModelBuilder``, a goal, and a list of assumptions.
When build_model() is called, the ``ModelBuilder`` is executed with the goal
and assumptions.
"""
def build_model(self, verbose=False):
"""
Perform the actual model building.
:return: A model if one is generated; None otherwise.
:rtype: sem.Valuation
"""
raise NotImplementedError()
def model(self, format=None):
"""
Return a string representation of the model
:param simplify: bool simplify the proof?
:return: str
"""
raise NotImplementedError()
def get_model_builder(self):
"""
Return the model builder object
:return: ``ModelBuilder``
"""
raise NotImplementedError()
class BaseTheoremToolCommand(TheoremToolCommand):
"""
This class holds a goal and a list of assumptions to be used in proving
or model building.
"""
def __init__(self, goal=None, assumptions=None):
"""
:param goal: Input expression to prove
:type goal: sem.Expression
:param assumptions: Input expressions to use as assumptions in
the proof.
:type assumptions: list(sem.Expression)
"""
self._goal = goal
if not assumptions:
self._assumptions = []
else:
self._assumptions = list(assumptions)
self._result = None
"""A holder for the result, to prevent unnecessary re-proving"""
def add_assumptions(self, new_assumptions):
"""
Add new assumptions to the assumption list.
:param new_assumptions: new assumptions
:type new_assumptions: list(sem.Expression)
"""
self._assumptions.extend(new_assumptions)
self._result = None
def retract_assumptions(self, retracted, debug=False):
"""
Retract assumptions from the assumption list.
:param debug: If True, give warning when ``retracted`` is not present on
assumptions list.
:type debug: bool
:param retracted: assumptions to be retracted
:type retracted: list(sem.Expression)
"""
retracted = set(retracted)
result_list = filter(lambda a: a not in retracted, self._assumptions)
if debug and result_list == self._assumptions:
print(Warning("Assumptions list has not been changed:"))
self.print_assumptions()
self._assumptions = result_list
self._result = None
def assumptions(self):
"""
List the current assumptions.
:return: list of ``Expression``
"""
return self._assumptions
def goal(self):
"""
Return the goal
:return: ``Expression``
"""
return self._goal
def print_assumptions(self):
"""
Print the list of the current assumptions.
"""
for a in self.assumptions():
print(a)
class BaseProverCommand(BaseTheoremToolCommand, ProverCommand):
"""
This class holds a ``Prover``, a goal, and a list of assumptions. When
prove() is called, the ``Prover`` is executed with the goal and assumptions.
"""
def __init__(self, prover, goal=None, assumptions=None):
"""
:param prover: The theorem tool to execute with the assumptions
:type prover: Prover
:see: ``BaseTheoremToolCommand``
"""
self._prover = prover
"""The theorem tool to execute with the assumptions"""
BaseTheoremToolCommand.__init__(self, goal, assumptions)
self._proof = None
def prove(self, verbose=False):
"""
Perform the actual proof. Store the result to prevent unnecessary
re-proving.
"""
if self._result is None:
self._result, self._proof = self._prover._prove(self.goal(),
self.assumptions(),
verbose)
return self._result
def proof(self, simplify=True):
"""
Return the proof string
:param simplify: bool simplify the proof?
:return: str
"""
if self._result is None:
raise LookupError("You have to call prove() first to get a proof!")
else:
return self.decorate_proof(self._proof, simplify)
def decorate_proof(self, proof_string, simplify=True):
"""
Modify and return the proof string
:param proof_string: str the proof to decorate
:param simplify: bool simplify the proof?
:return: str
"""
return proof_string
def get_prover(self):
return self._prover
class BaseModelBuilderCommand(BaseTheoremToolCommand, ModelBuilderCommand):
"""
This class holds a ``ModelBuilder``, a goal, and a list of assumptions. When
build_model() is called, the ``ModelBuilder`` is executed with the goal and
assumptions.
"""
def __init__(self, modelbuilder, goal=None, assumptions=None):
"""
:param modelbuilder: The theorem tool to execute with the assumptions
:type modelbuilder: ModelBuilder
:see: ``BaseTheoremToolCommand``
"""
self._modelbuilder = modelbuilder
"""The theorem tool to execute with the assumptions"""
BaseTheoremToolCommand.__init__(self, goal, assumptions)
self._model = None
def build_model(self, verbose=False):
"""
Attempt to build a model. Store the result to prevent unnecessary
re-building.
"""
if self._result is None:
self._result, self._model = \
self._modelbuilder._build_model(self.goal(),
self.assumptions(),
verbose)
return self._result
def model(self, format=None):
"""
Return a string representation of the model
:param simplify: bool simplify the proof?
:return: str
"""
if self._result is None:
raise LookupError('You have to call build_model() first to '
'get a model!')
else:
return self._decorate_model(self._model, format)
def _decorate_model(self, valuation_str, format=None):
"""
:param valuation_str: str with the model builder's output
:param format: str indicating the format for displaying
:return: str
"""
return valuation_str
def get_model_builder(self):
return self._modelbuilder
class TheoremToolCommandDecorator(TheoremToolCommand):
"""
A base decorator for the ``ProverCommandDecorator`` and
``ModelBuilderCommandDecorator`` classes from which decorators can extend.
"""
def __init__(self, command):
"""
:param command: ``TheoremToolCommand`` to decorate
"""
self._command = command
#The decorator has its own versions of 'result' different from the
#underlying command
self._result = None
def assumptions(self):
return self._command.assumptions()
def goal(self):
return self._command.goal()
def add_assumptions(self, new_assumptions):
self._command.add_assumptions(new_assumptions)
self._result = None
def retract_assumptions(self, retracted, debug=False):
self._command.retract_assumptions(retracted, debug)
self._result = None
def print_assumptions(self):
self._command.print_assumptions()
class ProverCommandDecorator(TheoremToolCommandDecorator, ProverCommand):
"""
A base decorator for the ``ProverCommand`` class from which other
prover command decorators can extend.
"""
def __init__(self, proverCommand):
"""
:param proverCommand: ``ProverCommand`` to decorate
"""
TheoremToolCommandDecorator.__init__(self, proverCommand)
#The decorator has its own versions of 'result' and 'proof'
#because they may be different from the underlying command
self._proof = None
def prove(self, verbose=False):
if self._result is None:
prover = self.get_prover()
self._result, self._proof = prover._prove(self.goal(),
self.assumptions(),
verbose)
return self._result
def proof(self, simplify=True):
"""
Return the proof string
:param simplify: bool simplify the proof?
:return: str
"""
if self._result is None:
raise LookupError("You have to call prove() first to get a proof!")
else:
return self.decorate_proof(self._proof, simplify)
def decorate_proof(self, proof_string, simplify=True):
"""
Modify and return the proof string
:param proof_string: str the proof to decorate
:param simplify: bool simplify the proof?
:return: str
"""
return self._command.decorate_proof(proof_string, simplify)
def get_prover(self):
return self._command.get_prover()
class ModelBuilderCommandDecorator(TheoremToolCommandDecorator, ModelBuilderCommand):
"""
A base decorator for the ``ModelBuilderCommand`` class from which other
prover command decorators can extend.
"""
def __init__(self, modelBuilderCommand):
"""
:param modelBuilderCommand: ``ModelBuilderCommand`` to decorate
"""
TheoremToolCommandDecorator.__init__(self, modelBuilderCommand)
#The decorator has its own versions of 'result' and 'valuation'
#because they may be different from the underlying command
self._model = None
def build_model(self, verbose=False):
"""
Attempt to build a model. Store the result to prevent unnecessary
re-building.
"""
if self._result is None:
modelbuilder = self.get_model_builder()
self._result, self._model = \
modelbuilder._build_model(self.goal(),
self.assumptions(),
verbose)
return self._result
def model(self, format=None):
"""
Return a string representation of the model
:param simplify: bool simplify the proof?
:return: str
"""
if self._result is None:
raise LookupError('You have to call build_model() first to '
'get a model!')
else:
return self._decorate_model(self._model, format)
def _decorate_model(self, valuation_str, format=None):
"""
Modify and return the proof string
:param valuation_str: str with the model builder's output
:param format: str indicating the format for displaying
:return: str
"""
return self._command._decorate_model(valuation_str, format)
def get_model_builder(self):
return self._command.get_prover()
class ParallelProverBuilder(Prover, ModelBuilder):
"""
This class stores both a prover and a model builder and when either
prove() or build_model() is called, then both theorem tools are run in
parallel. Whichever finishes first, the prover or the model builder, is the
result that will be used.
"""
def __init__(self, prover, modelbuilder):
self._prover = prover
self._modelbuilder = modelbuilder
def _prove(self, goal=None, assumptions=None, verbose=False):
return self._run(goal, assumptions, verbose), ''
def _build_model(self, goal=None, assumptions=None, verbose=False):
return not self._run(goal, assumptions, verbose), ''
def _run(self, goal, assumptions, verbose):
# Set up two thread, Prover and ModelBuilder to run in parallel
tp_thread = TheoremToolThread(lambda: self._prover.prove(goal, assumptions, verbose), verbose, 'TP')
mb_thread = TheoremToolThread(lambda: self._modelbuilder.build_model(goal, assumptions, verbose), verbose, 'MB')
tp_thread.start()
mb_thread.start()
while tp_thread.isAlive() and mb_thread.isAlive():
# wait until either the prover or the model builder is done
pass
if tp_thread.result is not None:
return tp_thread.result
elif mb_thread.result is not None:
return not mb_thread.result
else:
return None
class ParallelProverBuilderCommand(BaseProverCommand, BaseModelBuilderCommand):
"""
This command stores both a prover and a model builder and when either
prove() or build_model() is called, then both theorem tools are run in
parallel. Whichever finishes first, the prover or the model builder, is the
result that will be used.
Because the theorem prover result is the opposite of the model builder
result, we will treat self._result as meaning "proof found/no model found".
"""
def __init__(self, prover, modelbuilder, goal=None, assumptions=None):
BaseProverCommand.__init__(self, prover, goal, assumptions)
BaseModelBuilderCommand.__init__(self, modelbuilder, goal, assumptions)
def prove(self, verbose=False):
return self._run(verbose)
def build_model(self, verbose=False):
return not self._run(verbose)
def _run(self, verbose):
# Set up two thread, Prover and ModelBuilder to run in parallel
tp_thread = TheoremToolThread(lambda: BaseProverCommand.prove(self, verbose), verbose, 'TP')
mb_thread = TheoremToolThread(lambda: BaseModelBuilderCommand.build_model(self, verbose), verbose, 'MB')
tp_thread.start()
mb_thread.start()
while tp_thread.isAlive() and mb_thread.isAlive():
# wait until either the prover or the model builder is done
pass
if tp_thread.result is not None:
self._result = tp_thread.result
elif mb_thread.result is not None:
self._result = not mb_thread.result
return self._result
class TheoremToolThread(threading.Thread):
def __init__(self, command, verbose, name=None):
threading.Thread.__init__(self)
self._command = command
self._result = None
self._verbose = verbose
self._name = name
def run(self):
try:
self._result = self._command()
if self._verbose:
print('Thread %s finished with result %s at %s' % \
(self._name, self._result, time.localtime(time.time())))
except Exception as e:
print(e)
print('Thread %s completed abnormally' % (self._name))
@property
def result(self): return self._result
|
|
import errno
import msgpack
from twisted.python import failure
from twisted.internet import protocol
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import task
from twisted.protocols import policies
MSGTYPE_REQUEST=0
MSGTYPE_RESPONSE=1
MSGTYPE_NOTIFICATION=2
class MsgpackError(Exception):
def __init__(self, message, errno=0, result=None):
"""
msgpack rpc errors include a 'result' field
"""
self.message = message
self.errno = errno
self.result = result
def __str__(self):
return "[Errno %s] %s" % (self.errno, self.message)
class Msgpack(protocol.Protocol, policies.TimeoutMixin):
"""
msgpack rpc client/server protocol
@ivar factory: The L{MsgpackClientFactory} or L{MsgpackServerFactory} which created this L{Msgpack}.
"""
def __init__(self, factory, sendErrors=False, timeOut=None, packerEncoding="utf-8", unpackerEncoding=None, default=None):
"""
@param factory: factory which created this protocol.
@type factory: C{protocol.Factory}.
@param sendErrors: forward any uncaught Exception details to remote peer.
@type sendErrors: C{bool}.
@param timeOut: idle timeout in seconds before connection will be closed.
@type timeOut: C{int}
@param packerEncoding: encoding used to encode Python str and unicode. Default is 'utf-8'.
@type packerEncoding: C{str}
@param unpackerEncoding: encoding used for decoding msgpack bytes. If None (default), msgpack bytes are deserialized to Python bytes.
@type unpackerEncoding: C{str}.
@param default: if msgpack fails to serialize an object it will pass the object into this method, and try to serialize the result.
@type default: C{callable}.
"""
self.factory = factory
self._sendErrors = sendErrors
self._incoming_requests = {}
self._outgoing_requests = {}
self._next_msgid = 0
self._packer = msgpack.Packer(encoding=packerEncoding)
self._unpacker = msgpack.Unpacker(encoding=unpackerEncoding, unicode_errors='strict')
def createRequest(self, method, *params):
msgid = self.getNextMsgid()
message = (MSGTYPE_REQUEST, msgid, method, params)
self.writeMessage(message)
df = defer.Deferred()
self._outgoing_requests[msgid] = df
return df
def createNotification(self, method, params):
if not type(params) in (list, tuple):
params = (params,)
message = (MSGTYPE_NOTIFICATION, method, params)
self.writeMessage(message)
def getNextMsgid(self):
self._next_msgid += 1
return self._next_msgid
def dataReceived(self, data):
self.resetTimeout()
self._unpacker.feed(data)
for message in self._unpacker:
self.messageReceived(message)
def messageReceived(self, message):
if message[0] == MSGTYPE_REQUEST:
return self.requestReceived(message)
if message[0] == MSGTYPE_RESPONSE:
return self.responseReceived(message)
if message[0] == MSGTYPE_NOTIFICATION:
return self.notificationReceived(message)
return self.undefinedMessageReceived(message)
def requestReceived(self, message):
try:
(msgType, msgid, methodName, params) = message
except ValueError, e:
if self._sendErrors:
raise
if not len(message) == 4:
raise MsgpackError("Incorrect message length. Expected 4; received %s" % (len(message),), errno.EINVAL)
raise MsgpackError("Failed to unpack request.", errno.EINVAL)
except Exception, e:
if self._sendErrors:
raise
raise MsgpackError("Unexpected error. Failed to unpack request.", errno.EINVAL)
if msgid in self._incoming_requests:
raise MsgpackError("Request with msgid '%s' already exists" % (msgid,), errno.EALREADY)
try:
result = self.callRemoteMethod(msgid, methodName, params)
except Exception, e:
if self._sendErrors:
f = failure.Failure()
else:
ex = MsgpackError("Failed to find method: %s" % (methodName,), errno.ENOSYS)
f = failure.Failure(exc_value=ex)
return self.respondErrback(f, msgid)
try:
result.addCallback(self.respondCallback, msgid)
result.addErrback(self.respondErrback, msgid)
result.addBoth(self.endRequest, msgid)
self._incoming_requests[msgid] = result
return result
except AttributeError, e:
return self.respondCallback(result, msgid)
def getCallableForMethodName(self, methodName):
try:
return getattr(self, "remote_" + methodName)
except Exception, e:
if self._sendErrors:
raise
raise MsgpackError("Client attempted to call unimplemented method: remote_%" % (methodName,), errno.ENOSYS)
def callRemoteMethod(self, msgid, methodName, params):
try:
method = self.getCallableForMethodName(methodName)
except Exception, e:
if self._sendErrors:
raise
raise MsgpackError("Client attempted to call unimplemented method: %s" % (methodName,), errno.ENOSYS)
send_msgid = False
try:
"""
If the remote_method has a keyword argment called msgid, then pass
it the msgid as a keyword argument. 'params' is always a list.
"""
method_arguments = method.func_code.co_varnames
if 'msgid' in method_arguments:
send_msgid = True
except Exception, e:
pass
try:
if send_msgid:
result = method(*params, msgid=msgid)
else:
result = method(*params)
except TypeError, e:
if self._sendErrors:
raise
raise MsgpackError("Wrong number of arguments for %s" % (methodName,), errno.EINVAL)
except Exception, e:
if self._sendErrors:
raise
raise MsgpackError("Unexpected error calling %s" % (methodName), 0)
return result
def endRequest(self, result, msgid):
if msgid in self._incoming_requests:
del self._incoming_requests[msgid]
return result
def responseReceived(self, message):
try:
(msgType, msgid, error, result) = message
except Exception, e:
if self._sendErrors:
raise
raise MsgpackError("Failed to unpack response: %s" % (e,), errno.EINVAL)
try:
df = self._outgoing_requests.pop(msgid)
except KeyError, e:
"""
There's nowhere to send this error, except the log
if self._sendErrors:
raise
raise MsgpackError("Failed to find dispatched request with msgid %s to match incoming repsonse" % (msgid,), errno.ENOSYS)
"""
pass
if error is not None:
"""
The remote host returned an error, so we need to create a Failure
object to pass into the errback chain. The Failure object in turn
requires an Exception
"""
ex = MsgpackError(error, 0, result=result)
df.errback(failure.Failure(exc_value=ex))
else:
df.callback(result)
def respondCallback(self, result, msgid):
error = None
response = (MSGTYPE_RESPONSE, msgid, error, result)
return self.writeMessage(response)
def respondErrback(self, f, msgid):
"""
"""
result = None
if self._sendErrors:
error = f.getBriefTraceback()
else:
error = f.getErrorMessage()
self.respondError(msgid, error, result)
def respondError(self, msgid, error, result=None):
response = (MSGTYPE_RESPONSE, msgid, error, result)
self.writeMessage(response)
def writeMessage(self, message):
try:
message = self._packer.pack(message)
except Exception, e:
if self._sendErrors:
raise
raise MsgpackError("ERROR: Failed to write message: %s" % (message[0], message[1],))
# transport.write returns None
self.transport.write(message)
def notificationReceived(self, message):
# Notifications don't expect a return value, so they don't supply a msgid
msgid = None
try:
(msgType, methodName, params) = message
except Exception, e:
# Log the error - there's no way to return it for a notification
print e
return
try:
result = self.callRemoteMethod(msgid, methodName, params)
try:
result.addBoth(self.notificationCallback)
except AttributeError, e:
self.notificationCallback(result)
except Exception, e:
# Log the error - there's no way to return it for a notification
print e
return
return None
def notificationCallback(self, result):
# Log the result if required
pass
def undefinedMessageReceived(self, message):
raise NotImplementedError("Msgpack received a message of type '%s', " \
"and no method has been specified to " \
"handle this." % (message[0],))
def connectionMade(self):
#print "connectionMade"
self.factory.numProtocols = self.factory.numProtocols+1
"""
self.transport.write(
"Welcome! There are currently %d open connections.\n" %
(self.factory.numProtocols,))
"""
def connectionLost(self, reason):
#print "connectionLost"
self.factory.numProtocols = self.factory.numProtocols-1
def closeConnection(self):
self.transport.loseConnection()
class MsgpackServerFactory(protocol.Factory):
protocol = Msgpack
numProtocols = 0
def buildProtocol(self, addr):
p = self.protocol(self, sendErrors=True)
return p
class MsgpackClientFactory(protocol.ReconnectingClientFactory):
maxDelay = 12
protocol = Msgpack
numProtocols = 0
def buildProtocol(self, addr):
self.resetDelay()
p = self.protocol(self)
return p
|
|
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .utils import check_random_state
from .utils.validation import check_array
from .utils.validation import check_consistent_length
from .utils.random import random_choice_csc
from .utils.stats import _weighted_percentile
from .utils.multiclass import class_distribution
class DummyClassifier(BaseEstimator, ClassifierMixin):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "prior": always predicts the class that maximizes the class prior
(like "most_frequent") and ``predict_proba`` returns the class prior.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
.. versionadded:: 0.17
Dummy Classifier now supports prior fitting strategy using
parameter *prior*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use.
constant : int or str or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
classes_ : array or list of array of shape = [n_classes]
Class labels for each output.
n_classes_ : array or list of array of shape = [n_classes]
Number of label for each output.
class_prior_ : array or list of array of shape = [n_classes]
Probability of each class for each output.
n_outputs_ : int,
Number of outputs.
outputs_2d_ : bool,
True if the output at fit is 2d, else false.
sparse_output_ : bool,
True if the array returned from predict is to be in sparse CSC format.
Is automatically set to True if the input y is passed in sparse format.
"""
def __init__(self, strategy="stratified", random_state=None,
constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y, sample_weight=None):
"""Fit the random classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform",
"constant", "prior"):
raise ValueError("Unknown strategy type.")
if self.strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn('A local copy of the target data has been converted '
'to a numpy array. Predicting on sparse target data '
'with the uniform strategy would not save memory '
'and would be slower.',
UserWarning)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.atleast_1d(y)
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if self.strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
(self.classes_,
self.n_classes_,
self.class_prior_) = class_distribution(y, sample_weight)
if (self.strategy == "constant" and
any(constant[k] not in self.classes_[k]
for k in range(self.n_outputs_))):
# Checking in case of constant strategy if the constant
# provided by the user is in y.
raise ValueError("The constant target value must be "
"present in training data")
if self.n_outputs_ == 1 and not self.output_2d_:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self.strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self.strategy in ("most_frequent", "prior"):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self.strategy == "stratified":
class_prob = class_prior_
elif self.strategy == "uniform":
raise ValueError("Sparse target prediction is not "
"supported with the uniform strategy")
elif self.strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = random_choice_csc(n_samples, classes_, class_prob,
self.random_state)
else:
if self.strategy in ("most_frequent", "prior"):
y = np.tile([classes_[k][class_prior_[k].argmax()] for
k in range(self.n_outputs_)], [n_samples, 1])
elif self.strategy == "stratified":
y = np.vstack(classes_[k][proba[k].argmax(axis=1)] for
k in range(self.n_outputs_)).T
elif self.strategy == "uniform":
ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)]
y = np.vstack(ret).T
elif self.strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-lke of shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1 and not self.output_2d_:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self.strategy == "most_frequent":
ind = class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self.strategy == "prior":
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self.strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
elif self.strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self.strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1 and not self.output_2d_:
P = P[0]
return P
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-like of shape = [n_samples, n_classes]
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
class DummyRegressor(BaseEstimator, RegressorMixin):
"""
DummyRegressor is a regressor that makes predictions using
simple rules.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "mean": always predicts the mean of the training set
* "median": always predicts the median of the training set
* "quantile": always predicts a specified quantile of the training set,
provided with the quantile parameter.
* "constant": always predicts a constant value that is provided by
the user.
constant : int or float or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
quantile : float in [0.0, 1.0]
The quantile to predict using the "quantile" strategy. A quantile of
0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the
maximum.
Attributes
----------
constant_ : float or array of shape [n_outputs]
Mean or median or quantile of the training targets or constant value
given by the user.
n_outputs_ : int,
Number of outputs.
outputs_2d_ : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="mean", constant=None, quantile=None):
self.strategy = strategy
self.constant = constant
self.quantile = quantile
def fit(self, X, y, sample_weight=None):
"""Fit the random regressor.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("mean", "median", "quantile", "constant"):
raise ValueError("Unknown strategy type: %s, expected "
"'mean', 'median', 'quantile' or 'constant'"
% self.strategy)
y = check_array(y, ensure_2d=False)
if len(y) == 0:
raise ValueError("y must not be empty.")
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=50.)
for k in range(self.n_outputs_)]
elif self.strategy == "quantile":
if self.quantile is None or not np.isscalar(self.quantile):
raise ValueError("Quantile must be a scalar in the range "
"[0.0, 1.0], but got %s." % self.quantile)
percentile = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=percentile)
for k in range(self.n_outputs_)]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError("Constant target value has to be specified "
"when the constant strategy is used.")
self.constant = check_array(self.constant,
accept_sparse=['csr', 'csc', 'coo'],
ensure_2d=False, ensure_min_samples=0)
if self.output_2d_ and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have "
"shape (%d, 1)." % y.shape[1])
self.constant_ = self.constant
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "constant_"):
raise ValueError("DummyRegressor not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples = X.shape[0]
y = np.ones((n_samples, 1)) * self.constant_
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
|
|
import sys
import pytest
from click.core import Argument
from click.core import Command
from click.core import Group
from click.core import Option
from click.shell_completion import CompletionItem
from click.shell_completion import ShellComplete
from click.types import Choice
from click.types import File
from click.types import Path
def _get_completions(cli, args, incomplete):
comp = ShellComplete(cli, {}, cli.name, "_CLICK_COMPLETE")
return comp.get_completions(args, incomplete)
def _get_words(cli, args, incomplete):
return [c.value for c in _get_completions(cli, args, incomplete)]
def test_command():
cli = Command("cli", params=[Option(["-t", "--test"])])
assert _get_words(cli, [], "") == []
assert _get_words(cli, [], "-") == ["-t", "--test", "--help"]
assert _get_words(cli, [], "--") == ["--test", "--help"]
assert _get_words(cli, [], "--t") == ["--test"]
# -t has been seen, so --test isn't suggested
assert _get_words(cli, ["-t", "a"], "-") == ["--help"]
def test_group():
cli = Group("cli", params=[Option(["-a"])], commands=[Command("x"), Command("y")])
assert _get_words(cli, [], "") == ["x", "y"]
assert _get_words(cli, [], "-") == ["-a", "--help"]
def test_group_command_same_option():
cli = Group(
"cli", params=[Option(["-a"])], commands=[Command("x", params=[Option(["-a"])])]
)
assert _get_words(cli, [], "-") == ["-a", "--help"]
assert _get_words(cli, ["-a", "a"], "-") == ["--help"]
assert _get_words(cli, ["-a", "a", "x"], "-") == ["-a", "--help"]
assert _get_words(cli, ["-a", "a", "x", "-a", "a"], "-") == ["--help"]
def test_chained():
cli = Group(
"cli",
chain=True,
commands=[
Command("set", params=[Option(["-y"])]),
Command("start"),
Group("get", commands=[Command("full")]),
],
)
assert _get_words(cli, [], "") == ["get", "set", "start"]
assert _get_words(cli, [], "s") == ["set", "start"]
assert _get_words(cli, ["set", "start"], "") == ["get"]
# subcommands and parent subcommands
assert _get_words(cli, ["get"], "") == ["full", "set", "start"]
assert _get_words(cli, ["get", "full"], "") == ["set", "start"]
assert _get_words(cli, ["get"], "s") == ["set", "start"]
def test_help_option():
cli = Group("cli", commands=[Command("with"), Command("no", add_help_option=False)])
assert _get_words(cli, ["with"], "--") == ["--help"]
assert _get_words(cli, ["no"], "--") == []
def test_argument_order():
cli = Command(
"cli",
params=[
Argument(["plain"]),
Argument(["c1"], type=Choice(["a1", "a2", "b"])),
Argument(["c2"], type=Choice(["c1", "c2", "d"])),
],
)
# first argument has no completions
assert _get_words(cli, [], "") == []
assert _get_words(cli, [], "a") == []
# first argument filled, now completion can happen
assert _get_words(cli, ["x"], "a") == ["a1", "a2"]
assert _get_words(cli, ["x", "b"], "d") == ["d"]
def test_argument_default():
cli = Command(
"cli",
add_help_option=False,
params=[
Argument(["a"], type=Choice(["a"]), default="a"),
Argument(["b"], type=Choice(["b"]), default="b"),
],
)
assert _get_words(cli, [], "") == ["a"]
assert _get_words(cli, ["a"], "b") == ["b"]
# ignore type validation
assert _get_words(cli, ["x"], "b") == ["b"]
def test_type_choice():
cli = Command("cli", params=[Option(["-c"], type=Choice(["a1", "a2", "b"]))])
assert _get_words(cli, ["-c"], "") == ["a1", "a2", "b"]
assert _get_words(cli, ["-c"], "a") == ["a1", "a2"]
assert _get_words(cli, ["-c"], "a2") == ["a2"]
@pytest.mark.parametrize(
("type", "expect"),
[(File(), "file"), (Path(), "file"), (Path(file_okay=False), "dir")],
)
def test_path_types(type, expect):
cli = Command("cli", params=[Option(["-f"], type=type)])
out = _get_completions(cli, ["-f"], "ab")
assert len(out) == 1
c = out[0]
assert c.value == "ab"
assert c.type == expect
def test_option_flag():
cli = Command(
"cli",
add_help_option=False,
params=[
Option(["--on/--off"]),
Argument(["a"], type=Choice(["a1", "a2", "b"])),
],
)
assert _get_words(cli, [], "--") == ["--on", "--off"]
# flag option doesn't take value, use choice argument
assert _get_words(cli, ["--on"], "a") == ["a1", "a2"]
def test_option_custom():
def custom(ctx, param, incomplete):
return [incomplete.upper()]
cli = Command(
"cli",
params=[
Argument(["x"]),
Argument(["y"]),
Argument(["z"], shell_complete=custom),
],
)
assert _get_words(cli, ["a", "b"], "") == [""]
assert _get_words(cli, ["a", "b"], "c") == ["C"]
def test_autocompletion_deprecated():
# old function takes args and not param, returns all values, can mix
# strings and tuples
def custom(ctx, args, incomplete):
assert isinstance(args, list)
return [("art", "x"), "bat", "cat"]
with pytest.deprecated_call():
cli = Command("cli", params=[Argument(["x"], autocompletion=custom)])
assert _get_words(cli, [], "") == ["art", "bat", "cat"]
assert _get_words(cli, [], "c") == ["cat"]
def test_option_multiple():
cli = Command(
"type",
params=[Option(["-m"], type=Choice(["a", "b"]), multiple=True), Option(["-f"])],
)
assert _get_words(cli, ["-m"], "") == ["a", "b"]
assert "-m" in _get_words(cli, ["-m", "a"], "-")
assert _get_words(cli, ["-m", "a", "-m"], "") == ["a", "b"]
# used single options aren't suggested again
assert "-c" not in _get_words(cli, ["-c", "f"], "-")
def test_option_nargs():
cli = Command("cli", params=[Option(["-c"], type=Choice(["a", "b"]), nargs=2)])
assert _get_words(cli, ["-c"], "") == ["a", "b"]
assert _get_words(cli, ["-c", "a"], "") == ["a", "b"]
assert _get_words(cli, ["-c", "a", "b"], "") == []
def test_argument_nargs():
cli = Command(
"cli",
params=[
Argument(["x"], type=Choice(["a", "b"]), nargs=2),
Argument(["y"], type=Choice(["c", "d"]), nargs=-1),
Option(["-z"]),
],
)
assert _get_words(cli, [], "") == ["a", "b"]
assert _get_words(cli, ["a"], "") == ["a", "b"]
assert _get_words(cli, ["a", "b"], "") == ["c", "d"]
assert _get_words(cli, ["a", "b", "c"], "") == ["c", "d"]
assert _get_words(cli, ["a", "b", "c", "d"], "") == ["c", "d"]
assert _get_words(cli, ["a", "-z", "1"], "") == ["a", "b"]
assert _get_words(cli, ["a", "-z", "1", "b"], "") == ["c", "d"]
def test_double_dash():
cli = Command(
"cli",
add_help_option=False,
params=[
Option(["--opt"]),
Argument(["name"], type=Choice(["name", "--", "-o", "--opt"])),
],
)
assert _get_words(cli, [], "-") == ["--opt"]
assert _get_words(cli, ["value"], "-") == ["--opt"]
assert _get_words(cli, [], "") == ["name", "--", "-o", "--opt"]
assert _get_words(cli, ["--"], "") == ["name", "--", "-o", "--opt"]
def test_hidden():
cli = Group(
"cli",
commands=[
Command(
"hidden",
add_help_option=False,
hidden=True,
params=[
Option(["-a"]),
Option(["-b"], type=Choice(["a", "b"]), hidden=True),
],
)
],
)
assert "hidden" not in _get_words(cli, [], "")
assert "hidden" not in _get_words(cli, [], "hidden")
assert _get_words(cli, ["hidden"], "-") == ["-a"]
assert _get_words(cli, ["hidden", "-b"], "") == ["a", "b"]
def test_add_different_name():
cli = Group("cli", commands={"renamed": Command("original")})
words = _get_words(cli, [], "")
assert "renamed" in words
assert "original" not in words
def test_completion_item_data():
c = CompletionItem("test", a=1)
assert c.a == 1
assert c.b is None
@pytest.fixture()
def _patch_for_completion(monkeypatch):
monkeypatch.setattr("click.core._fast_exit", sys.exit)
monkeypatch.setattr(
"click.shell_completion.BashComplete._check_version", lambda self: True
)
@pytest.mark.parametrize(
"shell", ["bash", "zsh", "fish"],
)
@pytest.mark.usefixtures("_patch_for_completion")
def test_full_source(runner, shell):
cli = Group("cli", commands=[Command("a"), Command("b")])
result = runner.invoke(cli, env={"_CLI_COMPLETE": f"{shell}_source"})
assert f"_CLI_COMPLETE={shell}_complete" in result.output
@pytest.mark.parametrize(
("shell", "env", "expect"),
[
("bash", {"COMP_WORDS": "", "COMP_CWORD": "0"}, "plain,a\nplain,b\n"),
("bash", {"COMP_WORDS": "a b", "COMP_CWORD": "1"}, "plain,b\n"),
("zsh", {"COMP_WORDS": "", "COMP_CWORD": "0"}, "plain\na\n_\nplain\nb\nbee\n"),
("zsh", {"COMP_WORDS": "a b", "COMP_CWORD": "1"}, "plain\nb\nbee\n"),
("fish", {"COMP_WORDS": "", "COMP_CWORD": ""}, "plain,a\nplain,b\tbee\n"),
("fish", {"COMP_WORDS": "a b", "COMP_CWORD": "b"}, "plain,b\tbee\n"),
],
)
@pytest.mark.usefixtures("_patch_for_completion")
def test_full_complete(runner, shell, env, expect):
cli = Group("cli", commands=[Command("a"), Command("b", help="bee")])
env["_CLI_COMPLETE"] = f"{shell}_complete"
result = runner.invoke(cli, env=env)
assert result.output == expect
@pytest.mark.usefixtures("_patch_for_completion")
def test_context_settings(runner):
def complete(ctx, param, incomplete):
return ctx.obj["choices"]
cli = Command("cli", params=[Argument("x", shell_complete=complete)])
result = runner.invoke(
cli,
obj={"choices": ["a", "b"]},
env={"COMP_WORDS": "", "COMP_CWORD": "0", "_CLI_COMPLETE": "bash_complete"},
)
assert result.output == "plain,a\nplain,b\n"
@pytest.mark.parametrize(("value", "expect"), [(False, ["Au", "al"]), (True, ["al"])])
def test_choice_case_sensitive(value, expect):
cli = Command(
"cli",
params=[Option(["-a"], type=Choice(["Au", "al", "Bc"], case_sensitive=value))],
)
completions = _get_words(cli, ["-a"], "a")
assert completions == expect
|
|
# Copyright 2020 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from lo import *
import numpy as np
from qkeras import print_qstats
from qkeras import QActivation
from qkeras import QConv2D
from qkeras import QDense
from qkeras import quantized_bits
from qkeras import ternary
np.random.seed(42)
OPTIMIZER = Adam(lr=0.002)
NB_EPOCH = 10
BATCH_SIZE = 32
VERBOSE = 1
NB_CLASSES = 10
N_HIDDEN = 100
VALIDATION_SPLIT = 0.1
RESHAPED = 784
def QDenseModel(weights_f, load_weights=False):
"""Construct QDenseModel."""
x = x_in = Input((28,28,1), name="input")
x = QActivation("quantized_relu(2)", name="act_i")(x)
x = Conv2D(32, (3, 3), strides=(2, 2), name="conv2d_0_m")(x)
x = BatchNormalization(name="bn0")(x)
#x = QConv2D(32, (3, 3), strides=(2, 2),
# kernel_quantizer=quantized_bits(4,0,1),
# bias_quantizer=quantized_bits(4,0,1),
# name="conv2d_0_m")(x)
x = QActivation("quantized_relu(2)", name="act0_m")(x)
x = Conv2D(64, (3, 3), strides=(2, 2), name="conv2d_1_m")(x)
x = BatchNormalization(name="bn1")(x)
x = QActivation("quantized_relu(2)", name="act1_m")(x)
x = Conv2D(64, (3, 3), strides=(2, 2), name="conv2d_2_m")(x)
x = BatchNormalization(name="bn2")(x)
x = QActivation("quantized_relu(2)", name="act2_m")(x)
x = Flatten(name="flatten")(x)
x = QDense(
NB_CLASSES,
kernel_quantizer=quantized_bits(4, 0, 1),
bias_quantizer=quantized_bits(4, 0, 1),
name="dense2")(x)
x = Activation("softmax", name="softmax")(x)
model = Model(inputs=[x_in], outputs=[x])
model.summary()
model.compile(loss="categorical_crossentropy",
optimizer=OPTIMIZER, metrics=["accuracy"])
if load_weights and weights_f:
model.load_weights(weights_f)
return model
def UseNetwork(weights_f, load_weights=False):
"""Use DenseModel.
Args:
weights_f: weight file location.
load_weights: load weights when it is True.
"""
model = QDenseModel(weights_f, load_weights)
batch_size = BATCH_SIZE
(x_train_, y_train_), (x_test_, y_test_) = mnist.load_data()
x_train_ = x_train_.reshape(60000, 28, 28, 1)
x_test_ = x_test_.reshape(10000, 28, 28, 1)
x_train_ = x_train_.astype("float32")
x_test_ = x_test_.astype("float32")
x_train_ /= 256.
x_test_ /= 256.
# x_train_ = 2*x_train_ - 1.0
# x_test_ = 2*x_test_ - 1.0
print(x_train_.shape[0], "train samples")
print(x_test_.shape[0], "test samples")
y_train_ = to_categorical(y_train_, NB_CLASSES)
y_test_ = to_categorical(y_test_, NB_CLASSES)
if not load_weights:
model.fit(
x_train_,
y_train_,
batch_size=batch_size,
epochs=NB_EPOCH,
verbose=VERBOSE,
validation_split=VALIDATION_SPLIT)
if weights_f:
model.save_weights(weights_f)
score = model.evaluate(x_test_, y_test_, verbose=False)
print("Test score:", score[0])
print("Test accuracy:", score[1])
return model, x_train_, x_test_
def ParserArgs():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--logic_optimize", default=False,
action="store_true",
help="optimize network.")
parser.add_argument("-l", "--load_weight", default=False,
action="store_true",
help="load weights directly from file.")
parser.add_argument("-w", "--weight_file", default=None)
parser.add_argument("--output_group", type=int, default=1)
parser.add_argument("--kernel", default=None, type=int,
help="kernel if more complex layer")
parser.add_argument("--strides", default=None, type=int,
help="stride if more complex layer")
parser.add_argument("--padding", default=None,
help="padding if more complex layer")
parser.add_argument("--conv_sample", default=None, type=int,
help="number of samples within image for conv layer")
parser.add_argument("--sample", default=None,
help="number of training samples")
parser.add_argument("--use_pla", default=False,
action="store_true", help="use pla table format")
parser.add_argument("--binary", default=False,
action="store_true", help="use binary inputs")
parser.add_argument("--i_name", default=None,
help="input layer name")
parser.add_argument("--o_name", default=None,
help="output layer name")
parser.add_argument("--run_abc", default=False, action="store_true")
parser.add_argument("--run_rf", default=False, action="store_true")
parser.add_argument("--n_trees", default=3, type=int)
a = parser.parse_args()
return a
if __name__ == "__main__":
args = ParserArgs()
model, x_train, x_test = UseNetwork(args.weight_file, load_weights=args.load_weight)
if args.logic_optimize:
# i_dict = get_quantized_po2_dict(4,1,0)
i_dict = get_quantized_bits_dict(
2,0,0,mode="bin" if args.use_pla or args.binary else "dec")
o_dict = get_quantized_bits_dict(
2,0,0,mode="bin" if args.use_pla else "dec")
print("... generating table with {} entries".format(x_train.shape[0]))
strides, kernel, padding = model_to_receptive_field(
model, args.i_name, args.o_name)
files = optimize_conv2d_logic(
model, args.i_name, args.o_name, x_train,
i_dict, o_dict, output_group=args.output_group,
kernel=kernel[0], strides=strides[0], padding=padding[0],
samples=int(args.sample) if args.sample else x_train.shape[0],
randomize=args.conv_sample, generate_pla=args.use_pla, prefix="results")
if args.run_abc and args.use_pla:
run_abc_optimizer(files)
elif args.run_rf:
run_rf_optimizer(files, args.n_trees)
optimize_conv2d_logic(
model, args.i_name, args.o_name, x_test,
i_dict, o_dict, output_group=args.output_group,
kernel=kernel[0], strides=strides[0], padding=padding[0],
samples=int(args.sample) if args.sample else x_train.shape[0],
randomize=args.conv_sample, generate_pla=args.use_pla, prefix="test")
# optimize_conv2d_dt(model, "conv2d_1_m", "act1_m", i_dict, o_dict,
# x_train, single_output=args.single_output,
# samples=5000, pixels=2)
|
|
"""Tests for the _pyjava module.
This package contains tests for the internal components of PyJava, implemented
in C as the _pyjava package. Even if it is not used directly but rather through
the pyjava Python interface, the package is covered by tests.
"""
import math
import _pyjava
from base import PyjavaTestCase, unittest
class Test_getclass(PyjavaTestCase):
def test_nonexistent(self):
"""Tests that ClassNotFound is raised when getting an unknown class.
No other error should happen, and the problem should be detected
immediately.
"""
self.assertRaises(
_pyjava.ClassNotFound,
_pyjava.getclass,
'java/lang/Nonexistent')
def test_String(self):
"""Wraps a language class.
"""
String = _pyjava.getclass('java/lang/String')
self.assertIsNotNone(String)
self.assertEqual(String.getName(), u'java.lang.String')
self.assertIsInstance(String, _pyjava.JavaClass)
def test_Reader(self):
"""Wraps a well-known class.
"""
Reader = _pyjava.getclass('java/io/Reader')
self.assertIsNotNone(Reader)
self.assertEqual(Reader.getName(), u'java.io.Reader')
self.assertIsInstance(Reader, _pyjava.JavaClass)
class Test_classobject(PyjavaTestCase):
def test_subclass(self):
"""Tests subclassing a Java class.
"""
String = _pyjava.getclass('java/lang/String')
with self.assertRaises(NotImplementedError):
class MyString(String):
pass
@unittest.skip("JavaClass is not a metaclass right now")
def test_issubclass(self):
"""Requests well-known classes and tests issubclass().
"""
Object = _pyjava.getclass('java/lang/Object')
String = _pyjava.getclass('java/lang/String')
Class = _pyjava.getclass('java/lang/Class')
self.assertTrue(issubclass(String, String))
self.assertTrue(issubclass(Object, Object))
self.assertTrue(issubclass(Class, Class))
self.assertFalse(issubclass(Object, String))
self.assertFalse(issubclass(String, Class))
self.assertTrue(issubclass(Class, Object))
self.assertFalse(issubclass(Class, String))
self.assertTrue(issubclass(String, Object))
self.assertFalse(issubclass(Object, Class))
self.assertFalse(issubclass(String, list))
self.assertFalse(issubclass(int, Class))
@unittest.skip("JavaClass is not a metaclass right now")
def test_isinstance(self):
"""Requests well-known classes and tests isinstance().
"""
Collections = _pyjava.getclass('java/util/Collections')
List = _pyjava.getclass('java/util/List')
Integer = _pyjava.getclass('java/lang/Integer')
empty = Collections.emptyList()
self.assertTrue(isinstance(empty, List))
self.assertFalse(isinstance(empty, Integer))
self.assertFalse(isinstance(2, List))
self.assertFalse(isinstance(empty, list))
def test_is_same_object(self):
"""Tests for equality of references.
"""
jcl = _pyjava.getclass('pyjavatest/ObjFactory')
makeObject = jcl.makeObject
obj1 = makeObject(1)
obj2 = makeObject(2)
obj3 = makeObject(2)
# == here tests Java reference equality, it does not call equals()
self.assertTrue(obj1 == obj1)
self.assertTrue(obj2 == obj2)
self.assertTrue(obj3 == obj3)
self.assertFalse(obj1 == obj2)
self.assertTrue(obj2 == obj3)
self.assertFalse(obj3 == obj1)
self.assertFalse(obj2 == obj1)
self.assertTrue(obj3 == obj2)
self.assertFalse(obj1 == obj3)
String = _pyjava.getclass('java/lang/String')
# These shouldn't raise
self.assertFalse(obj1 == String)
self.assertFalse(obj2 == String)
class Test_get_method(PyjavaTestCase):
def test_method(self):
"""Requests a well-known method.
"""
String = _pyjava.getclass('java/lang/String')
length = String.length
self.assertIsNotNone(length)
self.assertTrue(isinstance(length, _pyjava.UnboundMethod))
def test_staticmethod(self):
"""Requests a well-known static method.
"""
Math = _pyjava.getclass('java/lang/Math')
sin = Math.sin
self.assertIsNotNone(sin)
self.assertTrue(isinstance(sin, _pyjava.UnboundMethod))
class Test_call(PyjavaTestCase):
def test_constructor(self):
"""Constructs a Java object from a constructor.
"""
Vector = _pyjava.getclass('java/util/Vector')
vector = Vector(10)
self.assertIsNotNone(vector)
self.assertEqual(vector.capacity(), 10)
def test_method(self):
"""Calls a well-known method on a wrapper returned by a static method.
"""
Collections = _pyjava.getclass('java/util/Collections')
List = _pyjava.getclass('java/util/List')
emptyList = Collections.emptyList
li = emptyList()
size = List.size
self.assertTrue(isinstance(size, _pyjava.UnboundMethod))
b_size = li.size
self.assertTrue(isinstance(b_size, _pyjava.BoundMethod))
self.assertEqual(size(li), 0)
self.assertEqual(b_size(), 0)
def test_staticmethod(self):
"""Calls a well-known static method.
"""
Math = _pyjava.getclass('java/lang/Math')
sin = Math.sin
self.assertAlmostEqual(sin(math.pi / 2), 1.0)
def test_badoverload(self):
"""Calls an existing method but with wrong argument types.
"""
Math = _pyjava.getclass('java/lang/Math')
sin = Math.sin
with self.assertRaises(_pyjava.NoMatchingOverload):
sin(4, 2)
with self.assertRaises(_pyjava.NoMatchingOverload):
sin()
class Test_get_field(PyjavaTestCase):
def test_field(self):
"""Requests a well-known field.
"""
Dimension = _pyjava.getclass('java/awt/Dimension')
d = Dimension()
self.assertEqual(d.width, 0)
def test_staticfield(self):
"""Requests a well-known static field.
"""
Collections = _pyjava.getclass('java/util/Collections')
empty_list = Collections.EMPTY_LIST
self.assertIsNotNone(empty_list)
self.assertEqual(empty_list.size(), 0)
def test_nonexistent_instance(self):
"""Requests an unknown field/method on an instance.
This should be detected immediately.
"""
Dimension = _pyjava.getclass('java/awt/Dimension')
d = Dimension()
with self.assertRaises(AttributeError):
d.nonExistentField
def test_nonexistent_class(self):
"""Requests an unknown field/method on a class.
This should be detected immediately.
"""
Math = _pyjava.getclass('java/lang/Math')
with self.assertRaises(AttributeError):
Math.nonExistentField
class Test_set_field(PyjavaTestCase):
def test_field(self):
"""Sets a well-known field.
"""
Dimension = _pyjava.getclass('java/awt/Dimension')
d = Dimension()
d.width = 42
self.assertEqual(d.width, 42)
def test_staticfield(self):
"""Sets a static field.
"""
SetField = _pyjava.getclass('pyjavatest/test_fields/SetField')
SetField.a = 4
SetField.b = u"hello"
self.assertEqual((SetField.a, SetField.b), (4, u"hello"))
def test_nonexistent_instance(self):
"""Sets an unknown field on an instance.
"""
Dimension = _pyjava.getclass('java/awt/Dimension')
d = Dimension()
with self.assertRaises(AttributeError):
d.nonExistentField = 42
SetField = _pyjava.getclass('pyjavatest/test_fields/SetField')
sf = SetField()
sf.c = 5
sf.d = u"r\xE9mi is out of ideas"
sf2 = SetField()
self.assertEqual(sf.c, 5)
self.assertEqual(sf2.c, 2)
self.assertEqual(len(sf.d), 20)
self.assertEqual(len(sf2.d), 22)
def test_nonexistent_class(self):
"""Sets an unknown field on a class.
"""
Dimension = _pyjava.getclass('java/awt/Dimension')
with self.assertRaises(AttributeError):
Dimension.nonExistentField = 42
def test_wrongtype(self):
"""Assigns values of different types to fields.
"""
SetField = _pyjava.getclass('pyjavatest/test_fields/SetField')
sf = SetField()
with self.assertRaises(TypeError):
SetField.a = 6.87
with self.assertRaises(TypeError):
SetField.b = sf
with self.assertRaises(TypeError):
sf.c = u"test"
with self.assertRaises(TypeError):
sf.d = 1
class Test_accessfield(PyjavaTestCase):
def test_staticfield(self):
"""Requests a well-known static field.
"""
Integer = _pyjava.getclass('java/lang/Integer')
size = Integer.SIZE
self.assertEqual(size, 32)
String = _pyjava.getclass('java/lang/String')
comparator = String.CASE_INSENSITIVE_ORDER
self.assertIsNotNone(comparator)
def test_testclass(self):
cl = _pyjava.getclass(
'pyjavatest/test_fields/AccessField')
obj = cl()
self.assertEqual(cl.a, 7)
self.assertEqual(cl.b, 'test')
self.assertEqual(cl.c, None)
self.assertEqual(obj.d, -7)
self.assertEqual(obj.e, None)
self.assertEqual(obj.f, '4')
class Test_reflection(PyjavaTestCase):
def test_forname(self):
"""Uses Class.forName().
"""
Class = _pyjava.getclass('java/lang/Class')
String = Class.forName(u'java.lang.String')
self.assertIsInstance(String, _pyjava.JavaClass)
self.assertEqual(String(u'lala').length(), 4)
String2 = _pyjava.getclass('java/lang/String')
self.assertEqual(String, String2)
with self.assertRaises(AttributeError):
# forName() is static and can only be accessed from Class
String2.forName
def test_nonstatic_method(self):
"""Access a non-static Class method.
"""
String = _pyjava.getclass('java/lang/String')
self.assertEqual(String.getName(), u'java.lang.String')
Class = _pyjava.getclass('java/lang/Class')
self.assertEqual(Class.getName(), u'java.lang.Class')
self.assertEqual(Class.getName(String), u'java.lang.String')
class Test_conversions(PyjavaTestCase):
"""Big set of method calls to cover the conversions.
"""
def setUp(self):
self._jcl = _pyjava.getclass(
'pyjavatest/test_conversions/CallMethod_Conversions')
self._jo = self._jcl()
def test_v_ii(self):
m = self._jcl.v_ii
self.assertIsNone(m(self._jo, 12, -5))
def test_i_fc(self):
m = self._jcl.i_fc
self.assertEqual(m(self._jo, 12.5, u'\u05D0'), -7)
def test_b_Bs(self):
m = self._jcl._b_Bs
self.assertEqual(m(0x42, 13042), False)
def test_c_lS(self):
m = self._jcl.c_lS
self.assertEqual(m(self._jo, -70458L, u'R\xE9mi'), u'\u05D0')
def test_d_iSb(self):
m = self._jcl.d_iSb
self.assertAlmostEqual(m(self._jo, 0, u'', True), 197.9986e17)
def test_f_(self):
m = self._jcl._f_
self.assertAlmostEqual(m(), -0.07)
def test_S_(self):
m = self._jcl.S_
self.assertEqual(m(self._jo), u'\xE9\xEA\x00\xE8')
def test_B_loi(self):
g = self._jcl.o_b
o = g(self._jo, False)
self.assertIsNotNone(o)
self.assertTrue(isinstance(o, _pyjava.JavaInstance))
m = self._jcl.B_loi
self.assertEqual(m(self._jo, 142005L, o, -100), 0x20)
def test_s_So(self):
g = self._jcl.o_b
o = g(self._jo, False)
self.assertIsNotNone(o)
self.assertTrue(isinstance(o, _pyjava.JavaInstance))
m = self._jcl._s_So
self.assertEqual(m(u'\x00\u252C\u2500\u2500\u252C', o), -15)
def test_o_S(self):
m = self._jcl._o_S
self.assertEqual(m(None), None)
def test_v_o(self):
m = self._jcl.v_o
self.assertIsNone(m(self._jo, None))
def test_C_(self):
g = self._jcl._C_
C = g() # this returns a Class, which should be wrapped as
# JavaClass instead of JavaInstance automatically
self.assertIsNotNone(C)
o = C(17)
m = C.i_
self.assertEqual(m(o), 42)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
import six
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import filters
# TODO(amotoki): [drop-nova-network] Add neutron policy support
class DeleteGroup(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Security Group",
u"Delete Security Groups",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Security Group",
u"Deleted Security Groups",
count
)
def allowed(self, request, security_group=None):
if not security_group:
return True
return security_group.name != 'default'
def delete(self, request, obj_id):
api.neutron.security_group_delete(request, obj_id)
class CreateGroup(tables.LinkAction):
name = "create"
verbose_name = _("Create Security Group")
url = "horizon:project:security_groups:create"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, security_group=None):
usages = quotas.tenant_quota_usages(request,
targets=('security_groups', ))
if usages['security_groups'].get('available', 1) <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ["disabled"]
self.verbose_name = _("Create Security Group (Quota exceeded)")
else:
self.verbose_name = _("Create Security Group")
self.classes = [c for c in self.classes if c != "disabled"]
return True
class EditGroup(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Security Group")
url = "horizon:project:security_groups:update"
classes = ("ajax-modal",)
icon = "pencil"
def allowed(self, request, security_group=None):
if not security_group:
return True
return security_group.name != 'default'
class ManageRules(policy.PolicyTargetMixin, tables.LinkAction):
name = "manage_rules"
verbose_name = _("Manage Rules")
url = "horizon:project:security_groups:detail"
icon = "pencil"
class SecurityGroupsFilterAction(tables.FilterAction):
def filter(self, table, security_groups, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [security_group for security_group in security_groups
if query in security_group.name.lower()]
class SecurityGroupsTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Name"))
security_group_id = tables.Column("id",
verbose_name=_("Security Group ID"))
description = tables.Column("description", verbose_name=_("Description"))
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
class Meta(object):
name = "security_groups"
verbose_name = _("Security Groups")
table_actions = (CreateGroup, DeleteGroup, SecurityGroupsFilterAction)
row_actions = (ManageRules, EditGroup, DeleteGroup)
class CreateRule(tables.LinkAction):
name = "add_rule"
verbose_name = _("Add Rule")
url = "horizon:project:security_groups:add_rule"
classes = ("ajax-modal",)
icon = "plus"
def get_link_url(self):
return reverse(self.url, args=[self.table.kwargs['security_group_id']])
class DeleteRule(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Rule",
u"Delete Rules",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Rule",
u"Deleted Rules",
count
)
def delete(self, request, obj_id):
api.neutron.security_group_rule_delete(request, obj_id)
def get_success_url(self, request):
sg_id = self.table.kwargs['security_group_id']
return reverse("horizon:project:security_groups:detail", args=[sg_id])
def get_remote_ip_prefix(rule):
if 'cidr' in rule.ip_range:
if rule.ip_range['cidr'] is None:
range = '::/0' if rule.ethertype == 'IPv6' else '0.0.0.0/0'
else:
range = rule.ip_range['cidr']
return range
else:
return None
def get_remote_security_group(rule):
return rule.group.get('name')
def get_port_range(rule):
# There is no case where from_port is None and to_port has a value,
# so it is enough to check only from_port.
if rule.from_port is None:
return _('Any')
ip_proto = rule.ip_protocol
if rule.from_port == rule.to_port:
return check_rule_template(rule.from_port, ip_proto)
else:
return (u"%(from)s - %(to)s" %
{'from': check_rule_template(rule.from_port, ip_proto),
'to': check_rule_template(rule.to_port, ip_proto)})
def filter_direction(direction):
if direction is None or direction.lower() == 'ingress':
return _('Ingress')
else:
return _('Egress')
def filter_protocol(protocol):
if protocol is None:
return _('Any')
return six.text_type.upper(protocol)
def check_rule_template(port, ip_proto):
rules_dict = getattr(settings, 'SECURITY_GROUP_RULES', {})
if not rules_dict:
return port
templ_rule = [rule for rule in rules_dict.values()
if (str(port) == rule['from_port']
and str(port) == rule['to_port']
and ip_proto == rule['ip_protocol'])]
if templ_rule:
return u"%(from_port)s (%(name)s)" % templ_rule[0]
return port
class RulesTable(tables.DataTable):
direction = tables.Column("direction",
verbose_name=_("Direction"),
filters=(filter_direction,))
ethertype = tables.Column("ethertype",
verbose_name=_("Ether Type"))
protocol = tables.Column("ip_protocol",
verbose_name=_("IP Protocol"),
filters=(filter_protocol,))
port_range = tables.Column(get_port_range,
verbose_name=_("Port Range"))
remote_ip_prefix = tables.Column(get_remote_ip_prefix,
verbose_name=_("Remote IP Prefix"))
remote_security_group = tables.Column(get_remote_security_group,
verbose_name=_("Remote Security"
" Group"))
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
def get_object_display(self, rule):
return six.text_type(rule)
class Meta(object):
name = "rules"
verbose_name = _("Security Group Rules")
table_actions = (CreateRule, DeleteRule)
row_actions = (DeleteRule,)
|
|
"""
This is one of the Kolibri core components, the abstract layer of all contents.
To access it, please use the public APIs in api.py
The ONLY public object is ContentNode
"""
from __future__ import print_function
import os
import uuid
from gettext import gettext as _
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.text import get_valid_filename
from jsonfield import JSONField
from kolibri.core.fields import DateTimeTzField
from le_utils.constants import content_kinds, file_formats, format_presets
from le_utils.constants.languages import LANGUAGE_DIRECTIONS
from mptt.models import MPTTModel, TreeForeignKey
from .utils import paths
PRESET_LOOKUP = dict(format_presets.choices)
V020BETA1 = 'v0.2.0-beta1'
V040BETA3 = 'v0.4.0-beta3'
NO_VERSION = 'unversioned'
CONTENT_SCHEMA_VERSION = '1'
class UUIDField(models.CharField):
"""
Adaptation of Django's UUIDField, but with 32-char hex representation as Python representation rather than a UUID instance.
"""
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(*args, **kwargs)
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except AttributeError:
raise TypeError(self.error_messages['invalid'] % {'value': value})
return value.hex
def from_db_value(self, value, expression, connection, context):
return self.to_python(value)
def to_python(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
@python_2_unicode_compatible
class ContentTag(models.Model):
id = UUIDField(primary_key=True)
tag_name = models.CharField(max_length=30, blank=True)
def __str__(self):
return self.tag_name
@python_2_unicode_compatible
class ContentNode(MPTTModel):
"""
The top layer of the contentDB schema, defines the most common properties that are shared across all different contents.
Things it can represent are, for example, video, exercise, audio or document...
"""
id = UUIDField(primary_key=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
license_name = models.CharField(max_length=50, null=True, blank=True)
license_description = models.CharField(max_length=400, null=True, blank=True)
has_prerequisite = models.ManyToManyField('self', related_name='prerequisite_for', symmetrical=False, blank=True)
related = models.ManyToManyField('self', symmetrical=True, blank=True)
tags = models.ManyToManyField(ContentTag, symmetrical=False, related_name='tagged_content', blank=True)
title = models.CharField(max_length=200)
# the content_id is used for tracking a user's interaction with a piece of
# content, in the face of possibly many copies of that content. When a user
# interacts with a piece of content, all substantially similar pieces of
# content should be marked as such as well. We track these "substantially
# similar" types of content by having them have the same content_id.
content_id = UUIDField(db_index=True)
channel_id = UUIDField(db_index=True)
description = models.CharField(max_length=400, blank=True, null=True)
sort_order = models.FloatField(blank=True, null=True)
license_owner = models.CharField(max_length=200, blank=True)
author = models.CharField(max_length=200, blank=True)
kind = models.CharField(max_length=200, choices=content_kinds.choices, blank=True)
available = models.BooleanField(default=False)
stemmed_metaphone = models.CharField(max_length=1800, blank=True) # for fuzzy search in title and description
lang = models.ForeignKey('Language', blank=True, null=True)
class Meta:
ordering = ('lft',)
index_together = [
["level", "channel_id", "kind"],
["level", "channel_id", "available"],
]
def __str__(self):
return self.title
def get_descendant_content_ids(self):
"""
Retrieve a queryset of content_ids for non-topic content nodes that are
descendants of this node.
"""
return ContentNode.objects \
.filter(lft__gte=self.lft, lft__lte=self.rght) \
.exclude(kind=content_kinds.TOPIC) \
.values_list("content_id", flat=True)
@python_2_unicode_compatible
class Language(models.Model):
id = models.CharField(max_length=14, primary_key=True)
lang_code = models.CharField(max_length=3, db_index=True)
lang_subcode = models.CharField(max_length=10, db_index=True, blank=True, null=True)
# Localized name
lang_name = models.CharField(max_length=100, blank=True, null=True)
lang_direction = models.CharField(max_length=3, choices=LANGUAGE_DIRECTIONS, default=LANGUAGE_DIRECTIONS[0][0])
def __str__(self):
return self.lang_name or ''
class File(models.Model):
"""
The second to bottom layer of the contentDB schema, defines the basic building brick for content.
Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...
"""
id = UUIDField(primary_key=True)
# The foreign key mapping happens here as many File objects can map onto a single local file
local_file = models.ForeignKey('LocalFile', related_name='files')
available = models.BooleanField(default=False)
contentnode = models.ForeignKey(ContentNode, related_name='files')
preset = models.CharField(max_length=150, choices=format_presets.choices, blank=True)
lang = models.ForeignKey(Language, blank=True, null=True)
supplementary = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
priority = models.IntegerField(blank=True, null=True, db_index=True)
class Meta:
ordering = ["priority"]
class Admin:
pass
def get_extension(self):
return self.local_file.extension
def get_file_size(self):
return self.local_file.file_size
def get_storage_url(self):
return self.local_file.get_storage_url()
def get_preset(self):
"""
Return the preset.
"""
return PRESET_LOOKUP.get(self.preset, _('Unknown format'))
def get_download_filename(self):
"""
Return a valid filename to be downloaded as.
"""
title = self.contentnode.title
filename = "{} ({}).{}".format(title, self.get_preset(), self.get_extension())
valid_filename = get_valid_filename(filename)
return valid_filename
def get_download_url(self):
"""
Return the download url.
"""
new_filename = self.get_download_filename()
return reverse('downloadcontent', kwargs={'filename': self.local_file.get_filename(), 'new_filename': new_filename})
class LocalFileManager(models.Manager):
def delete_orphan_files(self):
for file in self.filter(files__isnull=True):
try:
os.remove(paths.get_content_storage_file_path(file.get_filename()))
except (IOError, OSError,):
pass
yield file
def get_orphan_files(self):
return self.filter(files__isnull=True)
def delete_orphan_file_objects(self):
return self.get_orphan_files().delete()
@python_2_unicode_compatible
class LocalFile(models.Model):
"""
The bottom layer of the contentDB schema, defines the local state of files on the device storage.
"""
# ID should be the checksum of the file
id = models.CharField(max_length=32, primary_key=True)
extension = models.CharField(max_length=40, choices=file_formats.choices, blank=True)
available = models.BooleanField(default=False)
file_size = models.IntegerField(blank=True, null=True)
objects = LocalFileManager()
class Admin:
pass
def __str__(self):
return paths.get_content_file_name(self)
def get_filename(self):
return self.__str__()
def get_storage_url(self):
"""
Return a url for the client side to retrieve the content file.
The same url will also be exposed by the file serializer.
"""
if self.available:
return paths.get_content_storage_file_url(filename=self.get_filename(), baseurl="/")
else:
return None
class AssessmentMetaData(models.Model):
"""
A model to describe additional metadata that characterizes assessment behaviour in Kolibri.
This model contains additional fields that are only revelant to content nodes that probe a
user's state of knowledge and allow them to practice to Mastery.
ContentNodes with this metadata may also be able to be used within quizzes and exams.
"""
id = UUIDField(primary_key=True)
contentnode = models.ForeignKey(
ContentNode, related_name='assessmentmetadata'
)
# A JSON blob containing a serialized list of ids for questions that the assessment can present.
assessment_item_ids = JSONField(default=[])
# Length of the above assessment_item_ids for a convenience lookup.
number_of_assessments = models.IntegerField()
# A JSON blob describing the mastery model that is used to set this assessment as mastered.
mastery_model = JSONField(default={})
# Should the questions listed in assessment_item_ids be presented in a random order?
randomize = models.BooleanField(default=False)
# Is this assessment compatible with being previewed and answer filled for display in coach reports
# and use in summative and formative tests?
is_manipulable = models.BooleanField(default=False)
@python_2_unicode_compatible
class ChannelMetadata(models.Model):
"""
Holds metadata about all existing content databases that exist locally.
"""
id = UUIDField(primary_key=True)
name = models.CharField(max_length=200)
description = models.CharField(max_length=400, blank=True)
author = models.CharField(max_length=400, blank=True)
version = models.IntegerField(default=0)
thumbnail = models.TextField(blank=True)
last_updated = DateTimeTzField(null=True)
# Minimum version of Kolibri that this content database is compatible with
min_schema_version = models.CharField(max_length=50)
root = models.ForeignKey(ContentNode)
class Admin:
pass
def __str__(self):
return self.name
def delete_content_tree_and_files(self):
# Use Django ORM to ensure cascading delete:
self.root.delete()
|
|
import os
from datetime import timedelta
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
from django.core.urlresolvers import reverse_lazy
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_DIR = os.path.join(BASE_DIR, '..', '..')
SECRET_KEY = 'na2p&yexkp-g83$2m^&b!r+a%nv2ci1!d9vh^a_7h!hv*7&h79'
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mailme',
'suit',
'django.contrib.admin',
'social.apps.django_app.default',
'celery',
'kombu.transport.django',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'mailme.middlewares.social.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'mailme.urls'
WSGI_APPLICATION = 'mailme.wsgi.application'
AUTH_USER_MODEL = 'mailme.User'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_DIR, 'db.sqlite3'),
}
}
TEMPLATE_CONTEXT_PROCESSORS = TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'bower_components'),
)
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
AUTHENTICATION_BACKENDS = (
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'social.backends.facebook.FacebookOAuth2',
'social.backends.github.GithubOAuth2',
'social.backends.username.UsernameAuth',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = reverse_lazy('mailme-index')
LOGIN_ERROR_URL = reverse_lazy('mailme-index')
LOGIN_REDIRECT_URL = reverse_lazy('mailme-index')
SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage'
SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy'
SOCIAL_AUTH_RAISE_EXCEPTIONS = False
SOCIAL_AUTH_SANITIZE_REDIRECTS = True
SOCIAL_AUTH_USER_FIELDS = ('username', 'email', 'password')
SOCIAL_AUTH_REQUIRED_USER_FIELDS = ('username', 'email')
SOCIAL_AUTH_PROTECTED_USER_FIELDS = SOCIAL_AUTH_REQUIRED_USER_FIELDS
SOCIAL_AUTH_PIPELINE = (
'mailme.pipeline.unique_login',
'mailme.pipeline.conditional_social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.social_user',
'mailme.pipeline.require_user_details',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'mailme.pipeline.email_verification',
)
SOCIAL_AUTH_USERNAME_FORM_URL = reverse_lazy('mailme-index')
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = ''
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = ''
SOCIAL_AUTH_TWITTER_KEY = ''
SOCIAL_AUTH_TWITTER_SECRET = ''
SOCIAL_AUTH_FACEBOOK_KEY = ''
SOCIAL_AUTH_FACEBOOK_SECRET = ''
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_GITHUB_KEY = ''
SOCIAL_AUTH_GITHUB_SECRET = ''
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# 3 hours
MAILME_REFRESH_EVERY = 3 * 60 * 60
MAILME_POST_LIMIT = 20
MAILME_FEED_TIMEOUT = 10
MAILME_MIN_REFRESH_INTERVAL = timedelta(seconds=60 * 20)
# Celery / Queue configuration
from kombu import Queue
BROKER_URL = "django://"
# Per default process all celery tasks in-process.
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
# Only accept JSON. This will be the default in Celery 3.2
CELERY_ACCEPT_CONTENT = ['json']
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
# Explicitly set default queue and exchange types. This is only useuseful for
# RabbitMQ but still good to have as a general rule.
CELERY_DEFAULT_QUEUE = "default"
CELERY_DEFAULT_EXCHANGE = "default"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DEFAULT_ROUTING_KEY = "default"
CELERY_CREATE_MISSING_QUEUES = True
# Track started tasks. This adds a new STARTED state once a task
# is started by the celery worker.
CELERY_TRACK_STARTED = True
CELERY_QUEUES = (
Queue('default', routing_key='default'),
Queue('celery', routing_key='celery'),
)
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
CELERYBEAT_MAX_LOOP_INTERVAL = 3600
CELERY_DISABLE_RATE_LIMITS = True
# Make our `LOGGING` configuration the only truth and don't let celery
# overwrite it.
CELERYD_HIJACK_ROOT_LOGGER = False
# Don't log celery log-redirection as warning (default).
# We manage our logging through `django.conf.settings.LOGGING` and
# want that to be our first-citizen config.
CELERY_REDIRECT_STDOUTS_LEVEL = 'INFO'
# Disable South in tests as it is sending incorrect create signals
SOUTH_TESTS_MIGRATE = True
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'sentry': {
'level': 'WARNING',
'class': 'raven.contrib.django.handlers.SentryHandler',
'formatter': 'simple'
}
},
'formatters': {
'verbose': {
'format':
'[%(asctime)s] %(levelname)s:%(name)s %(funcName)s\n %(message)s', # noqa
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'loggers': {
# This is the root logger that catches everything, if there's no other
# match on the logger name. If we want custom logging handing for our
# code vs. third-party code, define loggers for each module/app
# that's using standard python logging.
'root': {
'level': 'INFO',
'handlers': ['console'],
},
'celery': {
'level': 'INFO',
'handlers': ['console'],
'propagate': False,
},
'mailme': {
'level': 'INFO',
'handlers': ['console'],
'propagate': False,
},
'django': {
'level': 'INFO',
'handlers': ['console'],
'propagate': False,
},
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes api."""
import webob
from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import uuidutils
from cinder import utils
from cinder import volume
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def _translate_attachment_detail_view(_context, vol):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(_context, vol)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(_context, vol):
"""Maps keys for attachment summary view."""
d = {}
volume_id = vol['id']
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volume_id'] = volume_id
d['server_id'] = vol['instance_uuid']
if vol.get('mountpoint'):
d['device'] = vol['mountpoint']
return d
def _translate_volume_detail_view(context, vol, image_id=None):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol, image_id)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol, image_id=None):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availability_zone'] = vol['availability_zone']
d['created_at'] = vol['created_at']
d['attachments'] = []
if vol['attach_status'] == 'attached':
attachment = _translate_attachment_detail_view(context, vol)
d['attachments'].append(attachment)
d['display_name'] = vol['display_name']
d['display_description'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volume_type'] = vol['volume_type']['name']
else:
# TODO(bcwaldon): remove str cast once we use uuids
d['volume_type'] = str(vol['volume_type_id'])
d['snapshot_id'] = vol['snapshot_id']
d['source_volid'] = vol['source_volid']
if image_id:
d['image_id'] = image_id
LOG.audit(_("vol=%s"), vol, context=context)
if vol.get('volume_metadata'):
metadata = vol.get('volume_metadata')
d['metadata'] = dict((item['key'], item['value']) for item in metadata)
# avoid circular ref when vol is a Volume instance
elif vol.get('metadata') and isinstance(vol.get('metadata'), dict):
d['metadata'] = vol['metadata']
else:
d['metadata'] = {}
if vol.get('volume_glance_metadata'):
d['bootable'] = 'true'
else:
d['bootable'] = 'false'
return d
def make_attachment(elem):
elem.set('id')
elem.set('server_id')
elem.set('volume_id')
elem.set('device')
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availability_zone')
elem.set('created_at')
elem.set('display_name')
elem.set('display_description')
elem.set('volume_type')
elem.set('snapshot_id')
elem.set('source_volid')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
# Attach metadata node
elem.append(common.MetadataTemplate())
volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V1, 'atom': xmlutil.XMLNS_ATOM}
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
volume = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['display_name', 'display_description', 'size',
'volume_type', 'availability_zone']
for attr in attributes:
if volume_node.getAttribute(attr):
volume[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
volume['metadata'] = self.extract_metadata(metadata_node)
return volume
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted create volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = utils.safe_minidom_parse_string(string)
volume = self._extract_volume(dom)
return {'body': {'volume': volume}}
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self, ext_mgr):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['cinder.context']
try:
vol = self.volume_api.get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['cinder.context']
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
volume = self.volume_api.get(context, id)
self.volume_api.delete(context, volume)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['cinder.context']
remove_invalid_options(context,
search_opts, self._get_volume_search_options())
volumes = self.volume_api.get_all(context, marker=None, limit=None,
sort_key='created_at',
sort_dir='desc', filters=search_opts)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id.
try:
image_uuid = image_href.split('/').pop()
except (TypeError, AttributeError):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
@wsgi.serializers(xml=VolumeTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new volume."""
if not self.is_valid_body(body, 'volume'):
raise exc.HTTPUnprocessableEntity()
context = req.environ['cinder.context']
volume = body['volume']
kwargs = {}
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
if not uuidutils.is_uuid_like(req_volume_type):
try:
kwargs['volume_type'] = \
volume_types.get_volume_type_by_name(
context, req_volume_type)
except exception.VolumeTypeNotFound:
explanation = 'Volume type not found.'
raise exc.HTTPNotFound(explanation=explanation)
else:
try:
kwargs['volume_type'] = volume_types.get_volume_type(
context, req_volume_type)
except exception.VolumeTypeNotFound:
explanation = 'Volume type not found.'
raise exc.HTTPNotFound(explanation=explanation)
kwargs['metadata'] = volume.get('metadata', None)
snapshot_id = volume.get('snapshot_id')
if snapshot_id is not None:
kwargs['snapshot'] = self.volume_api.get_snapshot(context,
snapshot_id)
else:
kwargs['snapshot'] = None
source_volid = volume.get('source_volid')
if source_volid is not None:
kwargs['source_volume'] = self.volume_api.get_volume(context,
source_volid)
else:
kwargs['source_volume'] = None
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:
size = kwargs['snapshot']['volume_size']
elif size is None and kwargs['source_volume'] is not None:
size = kwargs['source_volume']['size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
image_href = None
image_uuid = None
if self.ext_mgr.is_loaded('os-image-create'):
image_href = volume.get('imageRef')
if snapshot_id and image_href:
msg = _("Snapshot and image cannot be specified together.")
raise exc.HTTPBadRequest(explanation=msg)
if image_href:
image_uuid = self._image_uuid_from_href(image_href)
kwargs['image_id'] = image_uuid
kwargs['availability_zone'] = volume.get('availability_zone', None)
new_volume = self.volume_api.create(context,
size,
volume.get('display_name'),
volume.get('display_description'),
**kwargs)
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context,
dict(new_volume.iteritems()),
image_uuid)
return {'volume': retval}
def _get_volume_search_options(self):
"""Return volume search options allowed by non-admin."""
return ('display_name', 'status')
@wsgi.serializers(xml=VolumeTemplate)
def update(self, req, id, body):
"""Update a volume."""
context = req.environ['cinder.context']
if not body:
raise exc.HTTPUnprocessableEntity()
if 'volume' not in body:
raise exc.HTTPUnprocessableEntity()
volume = body['volume']
update_dict = {}
valid_update_keys = (
'display_name',
'display_description',
'metadata',
)
for key in valid_update_keys:
if key in volume:
update_dict[key] = volume[key]
try:
volume = self.volume_api.get(context, id)
self.volume_api.update(context, volume, update_dict)
except exception.NotFound:
raise exc.HTTPNotFound()
volume.update(update_dict)
return {'volume': _translate_volume_detail_view(context, volume)}
def create_resource(ext_mgr):
return wsgi.Resource(VolumeController(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
LOG.debug(log_msg)
for opt in unknown_options:
del search_options[opt]
|
|
# pyOCD debugger
# Copyright (c) 2019-2020 Arm Limited
# Copyright (C) 2020 Ted Tawara
# Copyright (c) 2021 Chris Reed
# Copyright (c) 2021 Matthias Wauer
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import sleep
import logging
from ...utility.sequencer import CallSequence
from ...core import exceptions
from ...core.target import Target
from ...coresight.coresight_target import CoreSightTarget
from ...coresight.cortex_m import CortexM
from ...coresight.cortex_m_v8m import CortexM_v8M
from ...utility import timeout
FPB_CTRL = 0xE0002000
FPB_COMP0 = 0xE0002008
DWT_COMP0 = 0xE0001020
DWT_FUNCTION0 = 0xE0001028
DWT_FUNCTION_MATCH = 0x4 << 0 # Instruction address.
DWT_FUNCTION_ACTION = 0x1 << 4 # Generate debug event.
DWT_FUNCTION_DATAVSIZE = 0x2 << 10 # 4 bytes.
PERIPHERAL_BASE_NS = 0x40000000
PERIPHERAL_BASE_S = 0x50000000
FLASH_CMD = 0x00034000
FLASH_STARTA = 0x00034010
FLASH_STOPA = 0x00034014
FLASH_DATAW0 = 0x00034080
FLASH_INT_STATUS = 0x00034FE0
FLASH_INT_CLR_STATUS = 0x00034FE8
FLASH_CMD_READ_SINGLE_WORD = 0x3
FLASH_CMD_BLANK_CHECK = 0x5
BOOTROM_MAGIC_ADDR = 0x50000040
DM_AP = 2
# Control and Status Word (CSW) is used to control the Debug Mailbox communication
DM_CSW = 0x00
# Debugger will set this bit to 1 to request a resynchronrisation
DM_CSW_RESYNCH_REQ_MASK = (1<<0)
# Write only bit. Once written will cause the chip to reset (note that the DM is not
# reset by this reset as it is only resettable by a SOFT reset or a POR/BOD event)
DM_CSW_CHIP_RESET_REQ_MASK = (1<<5)
# Request register is used to send data from debugger to device
DM_REQUEST = 0x04
# Return register is used to send data from device to debugger
# Note: Any read from debugger side will be stalled until new data is present.
DM_RETURN = 0x08
## Debugger mailbox command to start a debug session (unlock debug).
DM_START_DBG_SESSION = 7
LOG = logging.getLogger(__name__)
class LPC5500Family(CoreSightTarget):
VENDOR = "NXP"
# Minimum value for the 'adi.v5.max_invalid_ap_count' option.
_MIN_INVALID_APS = 3
def create_init_sequence(self):
seq = super(LPC5500Family, self).create_init_sequence()
seq.wrap_task('discovery', self.modify_discovery)
return seq
def modify_discovery(self, seq):
seq.insert_before('find_aps',
('set_max_invalid_aps', self.set_max_invalid_aps)) \
.insert_before('find_components',
('check_locked_state', lambda : self.check_locked_state(seq))) \
.wrap_task('find_components', self._modify_ap1) \
.replace_task('create_cores', self.create_lpc55xx_cores) \
.insert_before('create_components',
('enable_traceclk', self._enable_traceclk),
) \
.append(('restore_max_invalid_aps', self.restore_max_invalid_aps))
return seq
def set_max_invalid_aps(self):
# Save current option and make sure it is set to at least 3.
self._saved_max_invalid_aps = self.session.options.get('adi.v5.max_invalid_ap_count')
if self._saved_max_invalid_aps < self._MIN_INVALID_APS:
self.session.options.set('adi.v5.max_invalid_ap_count', self._MIN_INVALID_APS)
def restore_max_invalid_aps(self):
# Only restore if we changed it.
if self._saved_max_invalid_aps < self._MIN_INVALID_APS:
self.session.options.set('adi.v5.max_invalid_ap_count', self._saved_max_invalid_aps)
def _modify_ap1(self, seq):
# If AP#1 exists we need to adjust it before we can read the ROM.
if seq.has_task('init_ap.1'):
seq.insert_before('init_ap.1',
('set_ap1_nonsec', self._set_ap1_nonsec),
)
return seq
def check_locked_state(self, seq):
"""@brief Attempt to unlock cores if they are locked (flash is empty etc.)"""
# The device is not locked if AP#0 was found and is enabled.
if (0 in self.aps) and self.aps[0].is_enabled:
return
# The debugger mailbox should always be present.
if not DM_AP in self.aps:
LOG.error("cannot request debug unlock; no debugger mailbox AP was found")
return
# Perform the unlock procedure using the debugger mailbox.
self.unlock(self.aps[DM_AP])
# Finished, if not called from init sequence
if seq is None:
return
# re-run discovery
LOG.info("re-running discovery")
new_seq = CallSequence()
for entry in seq:
if entry[0] == 'check_locked_state':
break
new_seq.append(entry)
self.dp.valid_aps = None
return new_seq
def _set_ap1_nonsec(self):
# Make AP#1 transactions non-secure so transfers will succeed.
self.aps[1].hnonsec = 1
def create_lpc55xx_cores(self):
# Make sure AP#0 was detected.
if (0 not in self.aps) or (not self.aps[0].is_enabled):
LOG.error("AP#0 was not found, unable to create core 0")
return
try:
# Create core 0 with a custom class.
core0 = CortexM_LPC5500(self.session, self.aps[0], self.memory_map, 0)
core0.default_reset_type = self.ResetType.SW_SYSRESETREQ
self.aps[0].core = core0
core0.init()
self.add_core(core0)
except exceptions.Error as err:
LOG.error("Error creating core 0: %s", err, exc_info=self.session.log_tracebacks)
# Create core 1 if the AP is present. It uses the standard Cortex-M core class for v8-M.
if (1 in self.aps) and (self.aps[0].is_enabled):
try:
core1 = CortexM_v8M(self.session, self.aps[1], self.memory_map, 1)
core1.default_reset_type = self.ResetType.SW_SYSRESETREQ
self.aps[1].core = core1
core1.init()
self.add_core(core1)
except exceptions.Error as err:
LOG.error("Error creating core 1: %s", err, exc_info=self.session.log_tracebacks)
def _enable_traceclk(self):
# Don't make it worse if no APs were found.
if (0 not in self.aps) or (not self.aps[0].is_enabled):
return
SYSCON_NS_Base_Addr = 0x40000000
IOCON_NS_Base_Addr = 0x40001000
TRACECLKSEL_Addr = SYSCON_NS_Base_Addr + 0x268
TRACECLKDIV_Addr = SYSCON_NS_Base_Addr + 0x308
AHBCLKCTRLSET0_Addr = IOCON_NS_Base_Addr + 0x220
clksel = self.read32(TRACECLKSEL_Addr) # Read current TRACECLKSEL value
if clksel > 2:
self.write32(TRACECLKSEL_Addr, 0x0) # Select Trace divided clock
clkdiv = self.read32(TRACECLKDIV_Addr) & 0xFF # Read current TRACECLKDIV value, preserve divider but clear rest to enable
self.write32(TRACECLKDIV_Addr, clkdiv)
self.write32(AHBCLKCTRLSET0_Addr, (1 << 13)) # Enable IOCON clock
def trace_start(self):
# Configure PIO0_10: FUNC - 6, MODE - 0, SLEW - 1, INVERT - 0, DIGMODE - 0, OD - 0
self.write32(0x40001028, 0x00000046)
self.call_delegate('trace_start', target=self, mode=0)
# On a reset when ITM is enabled, TRACECLKDIV/TRACECLKSEL will be reset
# even though ITM will remain enabled -- which will cause ITM stimulus
# writes to hang in the target because the FIFO will never appear ready.
# To prevent this, we explicitly (re)enable traceclk.
self._enable_traceclk()
def unlock(self, dm_ap):
"""@brief Unlock Cores. See UM11126 51.6.1 """
assert self.dp.probe.is_open
LOG.info("attempting unlock procedure")
# Set RESYNCH_REQ (0x1) and CHIP_RESET_REQ (0x20) in DM.CSW.
dm_ap.write_reg(addr=DM_CSW, data=(DM_CSW_RESYNCH_REQ_MASK | DM_CSW_CHIP_RESET_REQ_MASK))
dm_ap.dp.flush()
# Wait for reset to complete.
sleep(0.1)
# Read CSW to verify the reset happened and the register is cleared.
retval = dm_ap.read_reg(addr=DM_CSW)
if retval != 0:
LOG.error("debugger mailbox failed to reset the device")
return
# Write debug unlock request.
dm_ap.write_reg(addr=DM_REQUEST, data=DM_START_DBG_SESSION)
dm_ap.dp.flush()
# Read reply from boot ROM. The return status is the low half-word.
retval = dm_ap.read_reg(addr=DM_RETURN) & 0xffff
if retval != 0:
LOG.error("received error from unlock attempt (%x)", retval)
return
return
class CortexM_LPC5500(CortexM_v8M):
def reset_and_halt(self, reset_type=None):
"""@brief Perform a reset and stop the core on the reset handler. """
halt_only = False
catch_mode = 0
delegateResult = self.call_delegate('set_reset_catch', core=self, reset_type=reset_type)
# Save CortexM.DEMCR
demcr = self.read_memory(CortexM.DEMCR)
# enable the vector catch
if not delegateResult:
# This sequence is copied from the NXP LPC55S69_DFP debug sequence.
reset_vector = 0xFFFFFFFF
# Clear reset vector catch.
self.write32(CortexM.DEMCR, demcr & ~CortexM.DEMCR_VC_CORERESET)
# If the processor is in Secure state, we have to access the flash controller
# through the secure alias.
if self.get_security_state() == Target.SecurityState.SECURE:
base = PERIPHERAL_BASE_S
else:
base = PERIPHERAL_BASE_NS
#
# Check to see if the flash is erased
#
self.write32(base + FLASH_STARTA, 0x00000000) # Program flash word start address to 0x0
self.write32(base + FLASH_STOPA, 0x00000000) # Program flash word stop address to 0x0
self.write32(base + FLASH_INT_CLR_STATUS, 0x0000000F) # Clear Flash controller status
self.write32(base + FLASH_CMD, FLASH_CMD_BLANK_CHECK) # Check if page is cleared
# Wait for flash word read to finish.
with timeout.Timeout(5.0) as t_o:
while t_o.check():
if (self.read32(base + FLASH_INT_STATUS) & 0x00000004) != 0:
break
sleep(0.01)
# Check for error reading flash word.
if (self.read32(base + FLASH_INT_STATUS) & 0xB) == 0:
LOG.info("required flash area is erased")
halt_only = True
# Use the flash programming model to check if the first flash page is readable, since
# attempted accesses to erased pages result in bus faults. The start and stop address
# are both set to 0x0 to probe the sector containing the reset vector.
self.write32(base + FLASH_STARTA, 0x00000000) # Program flash word start address to 0x0
self.write32(base + FLASH_STOPA, 0x00000000) # Program flash word stop address to 0x0
self.write_memory_block32(base + FLASH_DATAW0, [0x00000000] * 8) # Prepare for read
self.write32(base + FLASH_INT_CLR_STATUS, 0x0000000F) # Clear Flash controller status
if not halt_only:
self.write32(base + FLASH_CMD, FLASH_CMD_READ_SINGLE_WORD) # Read single flash word
# Wait for flash word read to finish.
with timeout.Timeout(5.0) as t_o:
while t_o.check():
if (self.read32(base + FLASH_INT_STATUS) & 0x00000004) != 0:
break
sleep(0.01)
# Check for error reading flash word.
if (self.read32(base + FLASH_INT_STATUS) & 0xB) == 0:
# Read the reset vector address.
reset_vector = self.read32(0x00000004)
# Break on user application reset vector if we have a valid breakpoint address.
if reset_vector != 0xFFFFFFFF:
catch_mode = 1
self.write32(FPB_COMP0, reset_vector|1) # Program FPB Comparator 0 with reset handler address
self.write32(FPB_CTRL, 0x00000003) # Enable FPB
# No valid user application so use watchpoint to break at end of boot ROM. The ROM
# writes a special address to signal when it's done.
else:
catch_mode = 2
self.write32(DWT_FUNCTION0, 0)
self.write32(DWT_COMP0, BOOTROM_MAGIC_ADDR)
self.write32(DWT_FUNCTION0, (DWT_FUNCTION_MATCH | DWT_FUNCTION_ACTION | DWT_FUNCTION_DATAVSIZE))
# Read DHCSR to clear potentially set DHCSR.S_RESET_ST bit
self.read32(CortexM.DHCSR)
if not halt_only:
self.reset(reset_type)
else:
self.halt()
# wait until the unit resets
with timeout.Timeout(2.0) as t_o:
while t_o.check():
if self.get_state() not in (Target.State.RESET, Target.State.RUNNING):
break
sleep(0.01)
# Make sure the thumb bit is set in XPSR in case the reset handler
# points to an invalid address.
xpsr = self.read_core_register('xpsr')
if xpsr is not None and xpsr & self.XPSR_THUMB == 0:
self.write_core_register('xpsr', xpsr | self.XPSR_THUMB)
self.call_delegate('clear_reset_catch', core=self, reset_type=reset_type)
# Clear breakpoint or watchpoint.
if catch_mode == 1:
self.write32(0xE0002008, 0)
elif catch_mode == 2:
self.write32(DWT_COMP0, 0)
self.write32(DWT_FUNCTION0, 0)
# restore vector catch setting
self.write_memory(CortexM.DEMCR, demcr)
def reset(self, reset_type):
# unlock debug access after reset
super(CortexM_LPC5500, self).reset(reset_type)
self.session.target.check_locked_state(None)
|
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = 2000
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_equal(len(dec_tx['vin']) > 0, True) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_equal(len(dec_tx['vin']) > 0, True) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_equal(len(dec_tx['vin']) > 0, True)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(len(dec_tx['vin']) > 0, True)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if outputs.has_key(out['scriptPubKey']['addresses'][0]):
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if outputs.has_key(out['scriptPubKey']['addresses'][0]):
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if outputs.has_key(out['scriptPubKey']['addresses'][0]):
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
errorString = ""
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
except JSONRPCException,e:
errorString = e.error['message']
assert("Insufficient" in errorString)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 DAS to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(4, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
error = False
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
except:
error = True
assert(error)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert_equal("fee" in result.keys(), True)
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
|
|
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import code
import datetime
import os
import socket
import sys
__all__ = [ 'options', 'arguments', 'main' ]
usage="%prog [gem5 options] script.py [script options]"
version="%prog 2.0"
brief_copyright=\
"gem5 is copyrighted software; use the --copyright option for details."
def parse_options():
import config
from options import OptionParser
options = OptionParser(usage=usage, version=version,
description=brief_copyright)
option = options.add_option
group = options.set_group
# Help options
option('-B', "--build-info", action="store_true", default=False,
help="Show build information")
option('-C', "--copyright", action="store_true", default=False,
help="Show full copyright information")
option('-R', "--readme", action="store_true", default=False,
help="Show the readme")
# Options for configuring the base simulator
option('-d', "--outdir", metavar="DIR", default="m5out",
help="Set the output directory to DIR [Default: %default]")
option('-r', "--redirect-stdout", action="store_true", default=False,
help="Redirect stdout (& stderr, without -e) to file")
option('-e', "--redirect-stderr", action="store_true", default=False,
help="Redirect stderr to file")
option("--stdout-file", metavar="FILE", default="simout",
help="Filename for -r redirection [Default: %default]")
option("--stderr-file", metavar="FILE", default="simerr",
help="Filename for -e redirection [Default: %default]")
option('-i', "--interactive", action="store_true", default=False,
help="Invoke the interactive interpreter after running the script")
option("--pdb", action="store_true", default=False,
help="Invoke the python debugger before running the script")
option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':',
help="Prepend PATH to the system path when invoking the script")
option('-q', "--quiet", action="count", default=0,
help="Reduce verbosity")
option('-v', "--verbose", action="count", default=0,
help="Increase verbosity")
# Statistics options
group("Statistics Options")
option("--stats-file", metavar="FILE", default="stats.txt",
help="Sets the output file for statistics [Default: %default]")
# Configuration Options
group("Configuration Options")
option("--dump-config", metavar="FILE", default="config.ini",
help="Dump configuration output file [Default: %default]")
option("--json-config", metavar="FILE", default="config.json",
help="Create JSON output of the configuration [Default: %default]")
option("--dot-config", metavar="FILE", default="config.dot",
help="Create DOT & pdf outputs of the configuration [Default: %default]")
# Debugging options
group("Debugging Options")
option("--debug-break", metavar="TIME[,TIME]", action='append', split=',',
help="Tick to create a breakpoint")
option("--debug-help", action='store_true',
help="Print help on debug flags")
option("--debug-flags", metavar="FLAG[,FLAG]", action='append', split=',',
help="Sets the flags for debug output (-FLAG disables a flag)")
option("--debug-start", metavar="TIME", type='int',
help="Start debug output at TIME (must be in ticks)")
option("--debug-file", metavar="FILE", default="cout",
help="Sets the output file for debug [Default: %default]")
option("--debug-ignore", metavar="EXPR", action='append', split=':',
help="Ignore EXPR sim objects")
option("--remote-gdb-port", type='int', default=7000,
help="Remote gdb base port (set to 0 to disable listening)")
# Help options
group("Help Options")
option("--list-sim-objects", action='store_true', default=False,
help="List all built-in SimObjects, their params and default values")
# load the options.py config file to allow people to set their own
# default options
options_file = config.get('options.py')
if options_file:
scope = { 'options' : options }
execfile(options_file, scope)
arguments = options.parse_args()
return options,arguments
def interact(scope):
banner = "gem5 Interactive Console"
ipshell = None
prompt_in1 = "gem5 \\#> "
prompt_out = "gem5 \\#: "
# Is IPython version 0.10 or earlier available?
try:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(argv=["-prompt_in1", prompt_in1,
"-prompt_out", prompt_out],
banner=banner, user_ns=scope)
except ImportError:
pass
# Is IPython version 0.11 or later available?
if not ipshell:
try:
import IPython
from IPython.config.loader import Config
from IPython.frontend.terminal.embed import InteractiveShellEmbed
cfg = Config()
cfg.PromptManager.in_template = prompt_in1
cfg.PromptManager.out_template = prompt_out
ipshell = InteractiveShellEmbed(config=cfg, user_ns=scope,
banner1=banner)
except ImportError:
pass
if ipshell:
ipshell()
else:
# Use the Python shell in the standard library if IPython
# isn't available.
code.InteractiveConsole(scope).interact(banner)
def main(*args):
import m5
import core
import debug
import defines
import event
import info
import stats
import trace
from util import fatal
if len(args) == 0:
options, arguments = parse_options()
elif len(args) == 2:
options, arguments = args
else:
raise TypeError, "main() takes 0 or 2 arguments (%d given)" % len(args)
m5.options = options
def check_tracing():
if defines.TRACING_ON:
return
fatal("Tracing is not enabled. Compile with TRACING_ON")
# Set the main event queue for the main thread.
event.mainq = event.getEventQueue(0)
event.setEventQueue(event.mainq)
if not os.path.isdir(options.outdir):
os.makedirs(options.outdir)
# These filenames are used only if the redirect_std* options are set
stdout_file = os.path.join(options.outdir, options.stdout_file)
stderr_file = os.path.join(options.outdir, options.stderr_file)
# Print redirection notices here before doing any redirection
if options.redirect_stdout and not options.redirect_stderr:
print "Redirecting stdout and stderr to", stdout_file
else:
if options.redirect_stdout:
print "Redirecting stdout to", stdout_file
if options.redirect_stderr:
print "Redirecting stderr to", stderr_file
# Now redirect stdout/stderr as desired
if options.redirect_stdout:
redir_fd = os.open(stdout_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stdout.fileno())
if not options.redirect_stderr:
os.dup2(redir_fd, sys.stderr.fileno())
if options.redirect_stderr:
redir_fd = os.open(stderr_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stderr.fileno())
done = False
if options.build_info:
done = True
print 'Build information:'
print
print 'compiled %s' % defines.compileDate;
print 'build options:'
keys = defines.buildEnv.keys()
keys.sort()
for key in keys:
val = defines.buildEnv[key]
print ' %s = %s' % (key, val)
print
if options.copyright:
done = True
print info.COPYING
print
if options.readme:
done = True
print 'Readme:'
print
print info.README
print
if options.debug_help:
done = True
check_tracing()
debug.help()
if options.list_sim_objects:
import SimObject
done = True
print "SimObjects:"
objects = SimObject.allClasses.keys()
objects.sort()
for name in objects:
obj = SimObject.allClasses[name]
print " %s" % obj
params = obj._params.keys()
params.sort()
for pname in params:
param = obj._params[pname]
default = getattr(param, 'default', '')
print " %s" % pname
if default:
print " default: %s" % default
print " desc: %s" % param.desc
print
print
if done:
sys.exit(0)
# setting verbose and quiet at the same time doesn't make sense
if options.verbose > 0 and options.quiet > 0:
options.usage(2)
verbose = options.verbose - options.quiet
if verbose >= 0:
print "gem5 Simulator System. http://gem5.org"
print brief_copyright
print
print "gem5 compiled %s" % defines.compileDate;
print "gem5 started %s" % \
datetime.datetime.now().strftime("%b %e %Y %X")
print "gem5 executing on %s" % socket.gethostname()
# in Python 3 pipes.quote() is moved to shlex.quote()
import pipes
print "command line:", " ".join(map(pipes.quote, sys.argv))
print
# check to make sure we can find the listed script
if not arguments or not os.path.isfile(arguments[0]):
if arguments and not os.path.isfile(arguments[0]):
print "Script %s not found" % arguments[0]
options.usage(2)
# tell C++ about output directory
core.setOutputDir(options.outdir)
# update the system path with elements from the -p option
sys.path[0:0] = options.path
# set stats options
stats.initText(options.stats_file)
# set debugging options
debug.setRemoteGDBPort(options.remote_gdb_port)
for when in options.debug_break:
debug.schedBreak(int(when))
if options.debug_flags:
check_tracing()
on_flags = []
off_flags = []
for flag in options.debug_flags:
off = False
if flag.startswith('-'):
flag = flag[1:]
off = True
if flag not in debug.flags:
print >>sys.stderr, "invalid debug flag '%s'" % flag
sys.exit(1)
if off:
debug.flags[flag].disable()
else:
debug.flags[flag].enable()
if options.debug_start:
check_tracing()
e = event.create(trace.enable, event.Event.Debug_Enable_Pri)
event.mainq.schedule(e, options.debug_start)
else:
trace.enable()
trace.output(options.debug_file)
for ignore in options.debug_ignore:
check_tracing()
trace.ignore(ignore)
sys.argv = arguments
sys.path = [ os.path.dirname(sys.argv[0]) ] + sys.path
filename = sys.argv[0]
filedata = file(filename, 'r').read()
filecode = compile(filedata, filename, 'exec')
scope = { '__file__' : filename,
'__name__' : '__m5_main__' }
# we want readline if we're doing anything interactive
if options.interactive or options.pdb:
exec "import readline" in scope
# if pdb was requested, execfile the thing under pdb, otherwise,
# just do the execfile normally
if options.pdb:
import pdb
import traceback
pdb = pdb.Pdb()
try:
pdb.run(filecode, scope)
except SystemExit:
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
t = sys.exc_info()[2]
while t.tb_next is not None:
t = t.tb_next
pdb.interaction(t.tb_frame,t)
else:
exec filecode in scope
# once the script is done
if options.interactive:
interact(scope)
if __name__ == '__main__':
from pprint import pprint
options, arguments = parse_options()
print 'opts:'
pprint(options, indent=4)
print
print 'args:'
pprint(arguments, indent=4)
|
|
#!/usr/bin/env python2
# The MIT License (MIT)
#
# Copyright (c) 2015 Kyle A. Barlow, Shane O'Connor
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""\
The script kicks off the ddg step of the benchmark run using the ddg_monomer application from the
Rosetta suite. The command lines used herein are intended to reproduce the protocol from row 16 of the original paper by Kellogg et al.:
Kellogg, EH, Leaver-Fay, A, Baker, D. Role of conformational sampling in computing mutation-induced changes in protein
structure and stability. 2011. Proteins. 79(3):830-8. doi: 10.1002/prot.22921.
Usage:
run_ddg.py [options]...
Options:
-o --output_directory OUTPUT_DIR
The path to a directory previously generated from the run_preminimization script. This defaults to the most recent directory in job_output, if this exists.
-n --num_struct NUM_STRUCT
This specifies the number of wildtype/mutant structures generated. If this is used with --test then the --test value for this option takes priority. [default: 50]
--force
When this option is set, the most recent directory in job_output, if it exists, will be used without prompting the user.
--test
When this option is set, a shorter version of the benchmark will run with fewer input structures, less fewer DDG experiments, and fewer generated structures. This should be used to test the scripts but not for analysis.
--talaris2014
When this option is set, the talaris2014 score function will be used rather than the default score function. Warning: This option may break when talaris2014 becomes the default Rosetta score function.
--beta_july15
When this option is set, the July 2015 beta score function will be used rather than the default score function. Warning: This option may break when this score function is removed.
--beta_nov15
When this option is set, the November 2015 beta score function will be used rather than the default score function. Warning: This option may break when this score function is removed.
Authors:
Kyle Barlow
Shane O'Connor
"""
import sys
import os
import re
import shutil
import time
import datetime
import inspect
import multiprocessing
import glob
import cPickle as pickle
import getpass
import gzip
import rosetta.parse_settings
from rosetta.write_run_file import process as write_run_file
from analysis.libraries import docopt
from analysis.stats import read_file, write_file, prompt_yn
from run_preminimization import task_subfolder as preminimization_task_subfolder, mutfiles_subfolder
try:
import json
except:
import simplejson as json
task_subfolder = 'ddg'
generated_scriptname = 'ddg_step'
def create_constraints_file(preminimization_log, outfile_path):
'''This does the work of the convert_to_cst_file.sh script in the Rosetta repository.'''
constraints = []
contents = read_file(preminimization_log)
for line in contents.split('\n'):
if line.startswith("c-alpha"):
line = line.split()
constraints.append("AtomPair CA %s CA %s HARMONIC %s %s" % (line[5], line[7], line[9], line[12]))
write_file(outfile_path, '\n'.join(constraints))
return outfile_path
def create_constraints_files(preminimized_pdb_data_dir, constraints_data_dir):
constraints_files = {}
preminimized_structures = {}
for pdir in os.listdir(preminimized_pdb_data_dir):
if len(pdir) == 6 and pdir[4] == '_':
pdb_id = pdir.split('_')[0]
chain_id = pdir.split('_')[1]
pcase_path = os.path.join(preminimized_pdb_data_dir, pdir)
output_files = os.listdir(pcase_path)
output_structure = 'min_cst_0.5.%s_0001.pdb.gz' % pdir
if 'rosetta.out.gz' not in output_files:
raise Exception('The expected output file rosetta.out.gz was not found in %s.' % pcase_path)
if (output_structure) not in output_files:
raise Exception('The expected preminimized structure %s was not found in %s.' % (output_structure, pcase_path))
constraints_files[(pdb_id, chain_id)] = create_constraints_file(os.path.join(pcase_path, 'rosetta.out.gz'), os.path.join(constraints_data_dir, '%s_%s.cst' % (pdb_id, chain_id)))
preminimized_structures[(pdb_id, chain_id)] = os.path.join(pcase_path, output_structure)
return constraints_files, preminimized_structures
if __name__ == '__main__':
import pprint
try:
arguments = docopt.docopt(__doc__.format(**locals()))
except Exception, e:
print('Failed while parsing arguments: %s.' % str(e))
sys.exit(1)
# Determine the output directory
output_dir = None
if arguments.get('--output_directory'):
output_dir = arguments['--output_directory'][0]
if not(os.path.exists(output_dir)):
raise Exception('The directory %s does not exist.' % output_dir)
else:
output_dir = os.path.abspath('job_output')
if os.path.exists(output_dir):
existing_dirs = [os.path.join(output_dir, d) for d in os.listdir(output_dir) if d.find('ddg_monomer_16')!=-1 and os.path.isdir(os.path.join(output_dir, d))]
most_recent_directory = sorted(existing_dirs)[-1]
if most_recent_directory:
answer = None
if arguments.get('--force'):
answer = True
print('\nRunning the ddg_monomer step in %s.' % most_recent_directory)
else:
answer = prompt_yn('\nNo output path was specified. Use %s (y/n)?' % most_recent_directory)
if not answer:
print('No output path was specified. Exiting.\n')
sys.exit(1)
output_dir = most_recent_directory
else:
print('No preminimization output could be found in the job_output directory. Exiting.\n')
sys.exit(1)
# Read the settings file
settings = rosetta.parse_settings.get_dict()
# Set the job output directories
output_data_dir = os.path.join(output_dir, 'data')
mutfile_data_dir = os.path.join(output_data_dir, mutfiles_subfolder)
preminimized_pdb_data_dir = os.path.join(output_dir, preminimization_task_subfolder)
constraints_data_dir = os.path.join(output_data_dir, 'constraints')
try: os.mkdir(constraints_data_dir)
except: pass
for p in [output_data_dir, mutfile_data_dir, preminimized_pdb_data_dir, constraints_data_dir]:
if not os.path.exists(p):
raise Exception('The folder %s was expected to exist after the preminimization step but could not be found.' % p)
# Read in the dataset file
try:
dataset_filepath = os.path.join(output_dir, 'dataset.json')
dataset = json.loads(read_file(dataset_filepath))
dataset_cases = dataset['data']
except Exception, e:
raise Exception('An error occurred parsing the JSON file: %s..' % str(e))
# Run all cases with associated mutfiles
mutfiles = glob.glob(os.path.join(mutfile_data_dir, '*.mutfile'))
existing_case_ids = []
for m in mutfiles:
try:
filename = os.path.split(m)[1]
assert(filename.endswith('.mutfile'))
record_id = int(filename[:-8])
existing_case_ids.append(record_id)
except:
raise Exception('The file %s was expected to have a name like [record_id].mutfile e.g. 1245.mutfile.' % m)
count_by_pdb = {}
job_dict = {}
dataset_cases_by_id = {}
for ddg_case in dataset_cases:
dataset_cases_by_id[ddg_case['RecordID']] = ddg_case
for existing_case_id in existing_case_ids:
if existing_case_id not in dataset_cases_by_id:
raise Exception('The dataset case corresponding to %d.mutfile could not be found in %s.' % (existing_case_id, dataset_filepath))
# Write job dict and setup self-contained data directory
extra_s = ''
if arguments['--talaris2014']:
extra_s = ' (using talaris2014)'
if arguments['--beta_july15']:
assert(not(extra_s))
extra_s = ' (using beta_july15)'
if arguments['--beta_nov15']:
assert(not(extra_s))
extra_s = ' (using beta_nov15)'
print('Creating constraint files...%s' % extra_s)
constraints_files, preminimized_structures = create_constraints_files(preminimized_pdb_data_dir, constraints_data_dir)
number_of_structural_pairs = arguments['--num_struct'][0]
if arguments['--test']:
number_of_structural_pairs = 2 # only create two wildtype/mutant pairs in test mode
for existing_case_id in existing_case_ids:
dataset_case = dataset_cases_by_id[existing_case_id]
pdb_id = dataset_case['PDBFileID']
chains = set([m['Chain'] for m in dataset_case['Mutations']])
if not len(chains) == 1:
raise Exception('This script is only intended for monomeric structures but the set of mutations in case %d of %s uses more than one chain.' % (existing_case_id, dataset_filepath))
chain_id = chains.pop()
sys.stdout.write('.')
sys.stdout.flush()
sub_dict = {}
constraints_file = constraints_files.get((pdb_id, chain_id))
preminimized_structure = preminimized_structures.get((pdb_id, chain_id))
mutfile = os.path.join(mutfile_data_dir, '%d.mutfile' % existing_case_id)
if not constraints_file:
raise Exception('Could not determine the constraints file for %s, chain %s.' % (pdb_id, chain_id))
if not preminimized_structure:
raise Exception('Could not determine the preminimized structure file for %s, chain %s.' % (pdb_id, chain_id))
if not os.path.exists(mutfile):
raise Exception('Could not locate the mutfile %s for dataset record %d.' % (mutfile, existing_case_id))
sub_dict['-constraints::cst_file'] = os.path.relpath(constraints_file, output_dir)
sub_dict['-in:file:s'] = os.path.relpath(preminimized_structure, output_dir)
sub_dict['-ddg::mut_file'] = os.path.relpath(mutfile, output_dir)
job_dict[os.path.join(task_subfolder, str(existing_case_id))] = sub_dict
sys.stdout.write('\n')
# Keep a copy of the preminimization step pickle for debugging
pickle_file = os.path.join(output_data_dir, 'job_dict.pickle')
if os.path.exists(pickle_file):
existing_job_keys = pickle.load(open(pickle_file, 'r')).keys()
for k in existing_job_keys:
if k.startswith(preminimization_task_subfolder):
shutil.copy(pickle_file, os.path.join(output_data_dir, '%s_step_dict.pickle' % preminimization_task_subfolder))
break
with open(pickle_file, 'w') as f:
pickle.dump(job_dict, f)
settings['numjobs'] = '%d' % len(existing_case_ids)
settings['mem_free'] = '5.0G'
settings['scriptname'] = generated_scriptname
settings['appname'] = 'ddg_monomer'
settings['rosetta_args_list'] = [
'-in:file:fullatom', '-ignore_unrecognized_res', '-fa_max_dis', '9.0',
'-ddg::dump_pdbs' ,'true', '-ddg::suppress_checkpointing' ,'true',
'-ddg:weight_file' ,'soft_rep_design' ,'-ddg::iterations' ,str(number_of_structural_pairs),
'-ddg::local_opt_only' ,'false' ,'-ddg::min_cst' ,'true',
'-ddg::mean' ,'false' ,'-ddg::min', 'true',
'-ddg::sc_min_only' ,'false',
'-ddg::ramp_repulsive', 'true'
]
if arguments['--talaris2014']:
settings['rosetta_args_list'].extend(['-talaris2014', 'true'])
elif arguments['--beta_july15']:
settings['rosetta_args_list'].extend(['-beta_july15'])
elif arguments['--beta_nov15']:
settings['rosetta_args_list'].extend(['-beta_nov15'])
settings['output_dir'] = output_dir
write_run_file(settings)
job_path = os.path.abspath(output_dir)
print('''Job files written to directory: %s.\n\nTo launch this job locally (this will take some time):
cd %s
python %s.py
It is recommended to run this on an SGE cluster in which case use these commands instead:
cd %s
qsub %s.py\n''' % (job_path, job_path, generated_scriptname, job_path, generated_scriptname))
|
|
"""Wrappers and utils for PatchMatch
Originally written by Neeraj Kumar <me@neerajkumar.org>
Licensed under the 3-clause BSD License:
Copyright (c) 2014, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys, time
from PIL import Image
from subprocess import Popen, PIPE, call
from nkpylib.utils import *
from nkpylib.imageutils import imageiter, imagelociter
def pm(src, tgt, annf, annd):
"""Wrapper on patchmatch"""
for f in [annf, annd]:
try:
os.makedirs(os.path.dirname(f))
except OSError: pass
call(['pm', src, tgt, annf, annd])
def seg(src, tgt, k, min=100, sigma=0.8):
"""Wrapper on segmentation"""
try:
os.makedirs(os.path.dirname(tgt))
except OSError: pass
args = 'segment %(sigma)s %(k)s %(min)s <(convert "%(src)s" ppm:- ) >(convert - "%(tgt)s")' % (locals())
#print args
#call(args, shell=True, executable='/bin/bash') #FIXME doesn't work for some reason
call(['/bin/bash', '-c', args])
time.sleep(0.5)
def annf2data(annf):
"""Converts a raw ann field to a list of (tx, ty) pairs.
This is in same order and size as getdata().
Returns (data, maxval)
"""
im = Image.open(annf) if isinstance(annf, basestring) else annf
data = []
maxval = 0
for val in im.getdata():
r, g, b = val[:3]
n = (b << 16) + (g << 8) + r
ty = (n >> 12)
tx = n % (1 << 12)
data.append((tx,ty))
maxval = max(maxval, tx, ty)
return data, maxval
def saveandretim(im, fname=None):
"""Saves the given image if fname is not None.
Returns it as well"""
if fname:
try:
os.makedirs(os.path.dirname(fname))
except OSError: pass
im.save(fname)
return im
def annd2data(annd):
"""Converts raw ANN distance map to list of distances.
This is in same order and size as getdata().
"""
im = Image.open(annd) if isinstance(annd, basestring) else annd
data = []
for val in im.getdata():
r, g, b = val[:3]
n = (b << 16) + (g << 8) + r
data.append(n)
return data
def drawmatches(annf, outfname, im1fname, im2fname, scale=5, ss=111, orientation='vertical'):
"""Draws the computed nearest neighbor field using a pair of images"""
from PIL import ImageDraw
from imageutils import ImagePair, randcolor
scale = float(scale)
ss = int(ss)
data, maxval = annf2data(annf)
im1 = Image.open(im1fname).convert('RGB')
im2 = Image.open(im2fname).convert('RGB')
dims = im1.size
im1 = im1.resize((int(im1.size[0]*scale), int(im1.size[1]*scale)), Image.ANTIALIAS)
im2 = im2.resize((int(im2.size[0]*scale), int(im2.size[1]*scale)), Image.ANTIALIAS)
ip = ImagePair(im1, im2, orientation=orientation, background=(255,255,255))
draw = ImageDraw.Draw(ip.outim)
pairs = []
i = 0
assert len(data) == dims[0]*dims[1]
cs = lambda x: int((x+0.5)*scale)
for y in range(dims[1]):
for x in range(dims[0]):
tx, ty = data[i]
i += 1
if i % ss != 0: continue
c1, c2 = (0, cs(x), cs(y)), (1, cs(tx), cs(ty))
color = randcolor('RGB')
if (tx,ty) != (0,0):
#print x, y, tx, ty, i, c1, c2
pass
ip.drawline([c1, c2], fill=color)
try:
os.makedirs(os.path.dirname(outfname))
except OSError: pass
ip.outim.save(outfname)
def annf2im(annf, outfname=None):
"""Converts an ANN field image (output from PatchMatch) to a readable image.
If outfname is given, then saves to disk.
Returns the output image regardless."""
im = Image.open(annf) if isinstance(annf, basestring) else annf
data, maxval = annf2data(im)
fac = 255.0/maxval
data = [(int(tx*fac), 0, int(ty*fac)) for tx, ty in data]
outim = Image.new('RGB', im.size)
outim.putdata(data)
return saveandretim(outim, outfname)
def segannf(annfim, segfname):
"""Segments an ANN field image (output of annf2im) to get regions."""
seg(annfim, segfname, k=300)
def scorefuncE(locs, offsets, dists):
"""Given offsets and distances within a region, computes a score.
This version uses entropy on distances"""
from nkpylib.hist import histogram
#print offsets[:5], dists[:5]
dh = histogram(dists, binwidth=10000, incr=1, normalize=1000.0)
vals = [v for k, v in sorted(dh.items())]
if 1:
e = 30.0*entropy(vals)
else:
e = 100.0/entropy(vals) # this is definitely wrong
#spark(vals)
return e
def consistency(locs, trans):
"""Returns the consistency between the given pair of locations and transformations"""
num = l2dist(*trans)
den = l2dist(*locs)
ret = (num+1.0)/(den+1.0)
#print locs, trans, num, den, ret
return ret
def scorefuncC(locs, offsets, dists, thresh=0.8):
"""Given offsets and distances within a region, computes a score.
This version uses the coherence as defined in the NRDC paper."""
from math import sqrt
#print locs[:5], offsets[:5], dists[:5], len(offsets)
# sample sqrt() of the pairs
if len(offsets) < 2: return 0
n = int(sqrt(len(offsets)))
pairs = set()
while len(pairs) < n:
i, j = minsample(xrange(len(offsets)), 2)
pairs.add((i,j))
pairs = sorted(pairs)
#pairs = [(i, j) for i, u in enumerate(offsets) for j, v in enumerate(offsets[i+1:])]
#pairs = minsample(pairs, n)
#print len(pairs), pairs[:5]
cons = [consistency((locs[i], locs[j]), (offsets[i], offsets[j])) for i, j in pairs]
ncons = sum(1.0 for c in cons if c > thresh)
#print cons, ncons, ncons/len(cons)
error = ncons/len(cons) # 0-1, 1=totally incoherent
ret = int(255*(error))
#print len(cons), ret
return ret
scorefunc = scorefuncC
def score(annseg, annf, annd, outfname=None):
"""Scores regions from the given segmented ANN field.
'annf' should be the raw field, not the processed one.
Outputs an 'L' image with scores.
If outfname is given, writes to that fname.
Returns the image regardless."""
from collections import defaultdict
# read annf and annd and convert to lists
offs, maxval = annf2data(annf)
dists = annd2data(annd)
# aggregate into lists for each region
locs = defaultdict(list)
offh = defaultdict(list)
disth = defaultdict(list)
seg = Image.open(annseg) if isinstance(annseg, basestring) else annseg
w, h = seg.size
for col, off, d, (x,y) in zip(seg.getdata(), offs, dists, imagelociter(seg)):
if x>=w-7 or y>=h-7: # bottom-right border, with no vals, so ignore
#print x,w,y,h,col,off,d
continue
locs[col].append((x,y))
offh[col].append(off)
disth[col].append(d)
# compute scores per region
scores = {}
for col in offh:
scores[col] = int(min(scorefunc(locs[col], offh[col], disth[col]), 255))
print 'Got %d regions, with min score %s and max score %s' % (len(scores), min(scores.values()), max(scores.values()))
# create output
outim = Image.new('L', seg.size)
data = [scores.get(col, 0) for col in seg.getdata()]
outim.putdata(data)
return saveandretim(outim, outfname)
def offsetimg(fname, off=(3,3)):
"""Offsets the given image by the given amount"""
from PIL import ImageChops
im = Image.open(fname)
im = ImageChops.offset(im, off[0], off[1])
im.save(fname)
def match(src, dst, dir, *tgts):
"""Matches pixels from src to all tgts and outputs match likelihood to dst.
The output is the same size as the source, and is an 'L' image."""
times = [time.time()]
scoreims = []
Image.open(src).save('%s-orig.png' % (dst))
# operate on each image
for i, tgt in enumerate(tgts):
def makefname(suffix):
tgtbase = os.path.basename(tgt).rsplit('.',1)[0]
fname = os.path.join(dir, '%s-%s.png' % (tgtbase, suffix))
return fname
annffname, anndfname = 'annf.png', 'annd.png'
pm(src, tgt, annffname, anndfname)
times.append(time.time())
annfimfname = makefname('annif')
annfim = annf2im(annffname, annfimfname)
times.append(time.time())
anndimfname = makefname('annid')
dists = annd2data(anndfname)
dim = Image.new('L', annfim.size)
fac = 255.0/1000000
dim.putdata([int(fac*d) for d in dists])
dim.save(anndimfname)
times.append(time.time())
annsegfname = makefname('anns')
segannf(annfimfname, annsegfname)
times.append(time.time())
scoreimfname = makefname('annsc')
scoreim = score(annsegfname, annffname, anndfname, outfname=scoreimfname)
times.append(time.time())
scoreims.append(scoreim)
for fname in [annfimfname, anndimfname, annsegfname, scoreimfname]:
offsetimg(fname)
# add all together
outim = Image.new('L', scoreims[0].size, 0)
data = outim.getdata()
for sim in scoreims:
data = [d+s for d, s in zip(data, sim.getdata())]
data = [int(float(d)/len(scoreims)) for d in data]
outim.putdata(data)
saveandretim(outim, dst)
offsetimg(dst)
print 'Finished matching (%s, %s) -> %s in times %s' % (src, tgts, dst, getTimeDiffs(times, percs=1))
def matchmain(placedir, num=10):
"""Runs matches from the given match directory"""
datadir = '/'.join(placedir.split('/')[:2])
j = lambda d, p: os.path.join(d, p)
tgts = [os.path.join(placedir, 'gt-%03d.jpg' % (i)) for i in range(int(num))]
matchdir = placedir.replace('/google/', '/matches/')
match(j(datadir, 'thumb.jpg'), j(matchdir, 'annscored.png'), matchdir, *tgts)
if __name__ == '__main__':
TASKS = 'matchmain match annf2im drawmatches seg segannf score'.split(' ')
if len(sys.argv) < 3:
print 'Usage: python %s <%s> [args]' % (sys.argv[0], '|'.join(TASKS))
print ' python %s match <src> <dst> <tgt> [<tgt> ...]' % (sys.argv[0])
print ' python %s annf2im <annf> <dst>' % (sys.argv[0])
print ' python %s drawmatches <annf> <dst> <im1> <im2> [<scale=5> <ss=111> <orientation=vertical>]' % (sys.argv[0])
|
|
import unittest
from labella.distributor import Distributor
from labella.node import Node
class DistributorTestCase(unittest.TestCase):
def test_computeRequiredWidth_1(self):
options = {
'algorithm': 'overlap',
'layerWidth': 960,
'density': 0.85,
'nodeSpacing': 3,
'stubWidth': 1
}
nodes = [
Node(1, 50),
Node(2, 50),
Node(3, 50),
Node(3, 50),
Node(3, 50),
Node(304, 50),
Node(454, 50),
Node(454, 50),
Node(454, 50),
Node(804, 50),
Node(804, 70),
Node(804, 50),
Node(804, 50),
Node(854, 50),
Node(854, 50)]
exp_out = 812
dist = Distributor(options)
self.assertEqual(exp_out, dist.computeRequiredWidth(nodes))
def test_computeRequiredWidth_2(self):
options = {
'algorithm': 'overlap',
'layerWidth': 960,
'density': 0.85,
'nodeSpacing': 3,
'stubWidth': 1
}
nodes = [
Node(1, 100),
Node(2, 100),
Node(3, 100),
Node(3, 100),
Node(3, 100),
Node(304, 100),
Node(454, 100),
Node(454, 100),
Node(454, 100),
Node(804, 100),
Node(804, 100),
Node(804, 100),
Node(804, 100),
Node(854, 100),
Node(854, 100)]
exp_out = 1542
dist = Distributor(options)
self.assertEqual(exp_out, dist.computeRequiredWidth(nodes))
def test_estimatedRequiredLayers_1(self):
options = {
'algorithm': 'overlap',
'layerWidth': 960,
'density': 0.85,
'nodeSpacing': 3,
'stubWidth': 1
}
nodes = [
Node(1, 50),
Node(2, 50),
Node(3, 50),
Node(3, 50),
Node(3, 50),
Node(304, 50),
Node(454, 50),
Node(454, 50),
Node(454, 50),
Node(804, 50),
Node(804, 70),
Node(804, 50),
Node(804, 50),
Node(854, 50),
Node(854, 50)]
exp_out = 1
dist = Distributor(options)
self.assertEqual(exp_out, dist.estimateRequiredLayers(nodes))
def test_estimateRequiredLayers_2(self):
options = {
'algorithm': 'overlap',
'layerWidth': 960,
'density': 0.85,
'nodeSpacing': 3,
'stubWidth': 1
}
nodes = [
Node(1, 100),
Node(2, 100),
Node(3, 100),
Node(3, 100),
Node(3, 100),
Node(304, 100),
Node(454, 100),
Node(454, 100),
Node(454, 100),
Node(804, 100),
Node(804, 100),
Node(804, 100),
Node(804, 100),
Node(854, 100),
Node(854, 100)]
exp_out = 2
dist = Distributor(options)
self.assertEqual(exp_out, dist.estimateRequiredLayers(nodes))
def test_estimateRequiredLayers_3(self):
options = {
'algorithm': 'overlap',
'layerWidth': None,
'density': 0.85,
'nodeSpacing': 3,
'stubWidth': 1
}
nodes = [
Node(1, 100),
Node(2, 100),
Node(3, 100),
Node(3, 100),
Node(3, 100),
Node(304, 100),
Node(454, 100),
Node(454, 100),
Node(454, 100),
Node(804, 100),
Node(804, 100),
Node(804, 100),
Node(804, 100),
Node(854, 100),
Node(854, 100)]
exp_out = 1
dist = Distributor(options)
self.assertEqual(exp_out, dist.estimateRequiredLayers(nodes))
def test_countIdealOverlaps(self):
options = {
'algorithm': 'overlap',
'layerWidth': 960,
'density': 0.85,
'nodeSpacing': 3,
'stubWidth': 1
}
nodes = [
Node(1, 100),
Node(2, 100),
Node(3, 100),
Node(3, 100),
Node(3, 100),
Node(304, 100),
Node(454, 100),
Node(454, 100),
Node(454, 100),
Node(804, 100),
Node(804, 100),
Node(804, 100),
Node(804, 100),
Node(854, 100),
Node(854, 100)]
dist = Distributor(options)
exp_out = [5, 5, 5, 5, 5, 1, 3, 3, 3, 6, 6, 6, 6, 6, 6]
dist.countIdealOverlaps(nodes)
for i in range(len(exp_out)):
self.assertEqual(exp_out[i], nodes[i].overlapCount)
def test_algorithm_simple_1(self):
options = {
'algorithm': 'simple',
'layerWidth': 960,
'density': 0.85,
'nodeSpacing': 3,
'stubWidth': 1
}
nodes = [
Node(1, 100), # 0
Node(2, 100), # 1
Node(3, 100), # 2
Node(3, 100), # 3
Node(3, 100), # 4
Node(304, 100), # 5
Node(454, 100), # 6
Node(454, 100), # 7
Node(454, 100), # 8
Node(804, 100), # 9
Node(804, 100), # 10
Node(804, 100), # 11
Node(804, 100), # 12
Node(854, 100), # 13
Node(854, 100)] # 14
nodedict = {i:n for i, n in enumerate(nodes)}
dist = Distributor(options)
layers = dist.distribute(nodes)
self.assertEqual(layers[0][0], nodedict[0])
self.assertTrue(layers[0][1].isStub())
self.assertEqual(layers[0][2], nodedict[2])
self.assertTrue(layers[0][3].isStub())
self.assertEqual(layers[0][4], nodedict[4])
self.assertTrue(layers[0][5].isStub())
self.assertEqual(layers[0][6], nodedict[6])
self.assertTrue(layers[0][7].isStub())
self.assertEqual(layers[0][8], nodedict[8])
self.assertTrue(layers[0][9].isStub())
self.assertEqual(layers[0][10], nodedict[10])
self.assertTrue(layers[0][11].isStub())
self.assertEqual(layers[0][12], nodedict[12])
self.assertTrue(layers[0][13].isStub())
self.assertEqual(layers[0][14], nodedict[14])
self.assertEqual(layers[1][0], nodedict[1])
self.assertEqual(layers[1][1], nodedict[3])
self.assertEqual(layers[1][2], nodedict[5])
self.assertEqual(layers[1][3], nodedict[7])
self.assertEqual(layers[1][4], nodedict[9])
self.assertEqual(layers[1][5], nodedict[11])
self.assertEqual(layers[1][6], nodedict[13])
def test_algorithm_simple_2(self):
options = {
'algorithm': 'simple',
'layerWidth': 960,
'density': 0.85,
'nodeSpacing': 3,
'stubWidth': 1
}
nodes = [
Node(1, 100), # 0
Node(2, 200), # 1
Node(3, 100), # 2
Node(3, 200), # 3
Node(3, 50), # 4
Node(304, 200), # 5
Node(454, 50), # 6
Node(454, 200), # 7
Node(454, 90), # 8
Node(804, 200), # 9
Node(804, 90), # 10
Node(804, 200), # 11
Node(804, 50), # 12
Node(854, 200), # 13
Node(854, 70)] # 14
nodedict = {i:n for i, n in enumerate(nodes)}
dist = Distributor(options)
layers = dist.distribute(nodes)
self.assertEqual(layers[0][0], nodedict[0])
self.assertTrue(layers[0][1].isStub())
self.assertTrue(layers[0][2].isStub())
self.assertEqual(layers[0][3], nodedict[3])
self.assertTrue(layers[0][4].isStub())
self.assertTrue(layers[0][5].isStub())
self.assertEqual(layers[0][6], nodedict[6])
self.assertTrue(layers[0][7].isStub())
self.assertTrue(layers[0][8].isStub())
self.assertEqual(layers[0][9], nodedict[9])
self.assertTrue(layers[0][10].isStub())
self.assertTrue(layers[0][11].isStub())
self.assertEqual(layers[0][12], nodedict[12])
self.assertTrue(layers[0][13].isStub())
self.assertTrue(layers[0][14].isStub())
self.assertEqual(layers[1][0], nodedict[1])
self.assertTrue(layers[1][1].isStub())
self.assertEqual(layers[1][2], nodedict[4])
self.assertTrue(layers[1][3].isStub())
self.assertEqual(layers[1][4], nodedict[7])
self.assertTrue(layers[1][5].isStub())
self.assertEqual(layers[1][6], nodedict[10])
self.assertTrue(layers[1][7].isStub())
self.assertEqual(layers[1][8], nodedict[13])
self.assertEqual(layers[2][0], nodedict[2])
self.assertEqual(layers[2][1], nodedict[5])
self.assertEqual(layers[2][2], nodedict[8])
self.assertEqual(layers[2][3], nodedict[11])
self.assertEqual(layers[2][4], nodedict[14])
def test_algorithm_overlap_1(self):
options = {
'algorithm': 'overlap',
'layerWidth': 960,
'density': 0.85,
'nodeSpacing': 3,
'stubWidth': 1
}
nodes = [
Node(1, 100), # 0
Node(2, 100), # 1
Node(3, 99), # 2
Node(3, 100), # 3
Node(3, 101), # 4
Node(304, 100), # 5
Node(454, 99), # 6
Node(454, 100), # 7
Node(454, 101), # 8
Node(804, 98), # 9
Node(804, 99), # 10
Node(804, 100), # 11
Node(804, 101), # 12
Node(854, 99), # 13
Node(854, 100)] # 14
nodedict = {i:n for i, n in enumerate(nodes)}
dist = Distributor(options)
layers = dist.distribute(nodes)
self.assertEqual(layers[0][0], nodedict[7])
self.assertEqual(layers[0][1], nodedict[8])
self.assertEqual(layers[0][2], nodedict[3])
self.assertEqual(layers[0][3], nodedict[4])
self.assertEqual(layers[0][4], nodedict[13])
self.assertEqual(layers[0][5], nodedict[14])
self.assertEqual(layers[0][6], nodedict[5])
self.assertTrue(layers[0][7].isStub())
self.assertTrue(layers[0][8].isStub())
self.assertTrue(layers[0][9].isStub())
self.assertTrue(layers[0][10].isStub())
self.assertTrue(layers[0][11].isStub())
self.assertTrue(layers[0][12].isStub())
self.assertTrue(layers[0][13].isStub())
self.assertTrue(layers[0][14].isStub())
self.assertEqual(layers[1][0], nodedict[10])
self.assertEqual(layers[1][1], nodedict[11])
self.assertEqual(layers[1][2], nodedict[12])
self.assertEqual(layers[1][3], nodedict[0])
self.assertEqual(layers[1][4], nodedict[1])
self.assertEqual(layers[1][5], nodedict[2])
self.assertEqual(layers[1][6], nodedict[6])
self.assertTrue(layers[1][7].isStub())
self.assertEqual(layers[2][0], nodedict[9])
def test_algorithm_overlap_2(self):
options = {
'algorithm': 'overlap',
'layerWidth': 960,
'density': 0.85,
'nodeSpacing': 3,
'stubWidth': 1
}
nodes = [
Node(1, 100), # 0
Node(2, 200), # 1
Node(3, 100), # 2
Node(3, 200), # 3
Node(3, 50), # 4
Node(304, 200), # 5
Node(454, 50), # 6
Node(454, 200), # 7
Node(454, 90), # 8
Node(804, 200), # 9
Node(804, 90), # 10
Node(804, 200), # 11
Node(804, 50), # 12
Node(854, 200), # 13
Node(854, 70)] # 14
nodedict = {i:n for i, n in enumerate(nodes)}
dist = Distributor(options)
layers = dist.distribute(nodes)
self.assertEqual(layers[0][0], nodedict[4])
self.assertEqual(layers[0][1], nodedict[13])
self.assertEqual(layers[0][2], nodedict[14])
self.assertEqual(layers[0][3], nodedict[6])
self.assertEqual(layers[0][4], nodedict[8])
self.assertEqual(layers[0][5], nodedict[5])
self.assertTrue(layers[0][6].isStub())
self.assertTrue(layers[0][7].isStub())
self.assertTrue(layers[0][8].isStub())
self.assertTrue(layers[0][9].isStub())
self.assertTrue(layers[0][10].isStub())
self.assertTrue(layers[0][11].isStub())
self.assertTrue(layers[0][12].isStub())
self.assertTrue(layers[0][13].isStub())
self.assertTrue(layers[0][14].isStub())
self.assertEqual(layers[1][0], nodedict[11])
self.assertEqual(layers[1][1], nodedict[12])
self.assertEqual(layers[1][2], nodedict[2])
self.assertEqual(layers[1][3], nodedict[3])
self.assertEqual(layers[1][4], nodedict[7])
self.assertTrue(layers[1][5].isStub())
self.assertTrue(layers[1][6].isStub())
self.assertTrue(layers[1][7].isStub())
self.assertTrue(layers[1][8].isStub())
self.assertEqual(layers[2][0], nodedict[9])
self.assertEqual(layers[2][1], nodedict[0])
self.assertEqual(layers[2][2], nodedict[1])
self.assertEqual(layers[2][3], nodedict[10])
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from nova.compute import claims
from nova.compute import monitors
from nova.compute import resources as ext_resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import conductor
from nova import exception
from nova.i18n import _, _LI, _LW
from nova import objects
from nova.objects import base as obj_base
from nova.pci import manager as pci_manager
from nova.pci import whitelist as pci_whitelist
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova import utils
from nova.virt import hardware
resource_tracker_opts = [
cfg.IntOpt('reserved_host_disk_mb', default=0,
help='Amount of disk in MB to reserve for the host'),
cfg.IntOpt('reserved_host_memory_mb', default=512,
help='Amount of memory in MB to reserve for the host'),
cfg.StrOpt('compute_stats_class',
default='nova.compute.stats.Stats',
help='Class that will manage stats for the local compute host'),
cfg.ListOpt('compute_resources',
default=['vcpu'],
help='The names of the extra resources to track.'),
]
CONF = cfg.CONF
CONF.register_opts(resource_tracker_opts)
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
CONF.import_opt('my_ip', 'nova.netconf')
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver, nodename):
self.host = host
self.driver = driver
self.pci_tracker = None
self.pci_filter = pci_whitelist.get_pci_devices_filter()
self.nodename = nodename
self.compute_node = None
self.stats = importutils.import_object(CONF.compute_stats_class)
self.tracked_instances = {}
self.tracked_migrations = {}
self.conductor_api = conductor.API()
monitor_handler = monitors.ResourceMonitorHandler()
self.monitors = monitor_handler.choose_monitors(self)
self.ext_resources_handler = \
ext_resources.ResourceHandler(CONF.compute_resources)
self.old_resources = objects.ComputeNode()
self.scheduler_client = scheduler_client.SchedulerClient()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance_ref, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance_ref: instance to reserve resources for.
:type instance_ref: nova.objects.instance.Instance object
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled:
# compute_driver doesn't support resource tracking, just
# set the 'host' and node fields and continue the build:
self._set_instance_host_and_node(context, instance_ref)
return claims.NopClaim()
# sanity checks:
if instance_ref.host:
LOG.warning(_LW("Host field should not be set on the instance "
"until resources have been claimed."),
instance=instance_ref)
if instance_ref.node:
LOG.warning(_LW("Node field should not be set on the instance "
"until resources have been claimed."),
instance=instance_ref)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance_ref)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance_ref.memory_mb,
'overhead': overhead['memory_mb']})
claim = claims.Claim(context, instance_ref, self, self.compute_node,
overhead=overhead, limits=limits)
# self._set_instance_host_and_node() will save instance_ref to the DB
# so set instance_ref['numa_topology'] first. We need to make sure
# that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
# so that the resource audit knows about any cpus we've pinned.
instance_ref.numa_topology = claim.claimed_numa_topology
self._set_instance_host_and_node(context, instance_ref)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance_ref)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type,
image_meta=None, limits=None):
"""Indicate that resources are needed for a resize operation to this
compute host.
:param context: security context
:param instance: instance object to reserve resources for
:param instance_type: new instance_type being resized to
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if self.disabled:
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
migration = self._create_migration(context, instance,
instance_type)
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance_type['memory_mb'],
'overhead': overhead['memory_mb']})
claim = claims.MoveClaim(context, instance, instance_type,
image_meta, self, self.compute_node,
overhead=overhead, limits=limits)
migration = self._create_migration(context, instance,
instance_type)
claim.migration = migration
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, image_meta,
migration)
elevated = context.elevated()
self._update(elevated)
return claim
def _create_migration(self, context, instance, instance_type):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = self.nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = instance_type['id']
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
migration.migration_type = (
migration.old_instance_type_id != migration.new_instance_type_id
and 'resize' or 'migration')
migration.create()
return migration
def _set_instance_host_and_node(self, context, instance):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
instance.host = self.host
instance.launched_on = self.host
instance.node = self.nodename
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance):
"""Remove usage from the given instance."""
# flag the instance as deleted to revert the resource usage
# and associated stats:
instance['vm_state'] = vm_states.DELETED
self._update_usage_from_instance(context, instance)
self._update(context.elevated())
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_move_claim(self, context, instance, instance_type=None,
image_meta=None, prefix='new_'):
"""Remove usage for an incoming/outgoing migration."""
if instance['uuid'] in self.tracked_migrations:
migration, itype = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix)
if image_meta is None:
image_meta = utils.get_image_from_system_metadata(
instance['system_metadata'])
if (instance_type is not None and
instance_type['id'] == itype['id']):
numa_topology = hardware.numa_get_constraints(
itype, image_meta)
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(context,
instance,
sign=-1)
self._update_usage(usage, sign=-1)
ctxt = context.elevated()
self._update(ctxt)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled:
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance)
self._update(context.elevated())
@property
def disabled(self):
return self.compute_node is None
def _init_compute_node(self, context, resources):
"""Initialise the compute node if it does not already exist.
The resource tracker will be inoperable if compute_node
is not defined. The compute_node will remain undefined if
we fail to create it or if there is no associated service
registered.
If this method has to create a compute node it needs initial
values - these come from resources.
:param context: security context
:param resources: initial values
"""
# if there is already a compute node just use resources
# to initialize
if self.compute_node:
self._copy_resources(resources)
return
# TODO(pmurray): this lookup should be removed when the service_id
# field in the compute node goes away. At the moment it is deprecated
# but still a required field, so it has to be assigned below.
service = self._get_service(context)
if not service:
# no service record, disable resource
return
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
self.compute_node = self._get_compute_node(context)
if self.compute_node:
self._copy_resources(resources)
return
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialised with resource values.
self.compute_node = objects.ComputeNode(context)
# TODO(pmurray) service_id is deprecated but is still a required field.
# This should be removed when the field is changed.
self.compute_node.service_id = service.id
self.compute_node.host = self.host
self._copy_resources(resources)
self.compute_node.create()
LOG.info(_LI('Compute_service record created for '
'%(host)s:%(node)s'),
{'host': self.host, 'node': self.nodename})
def _copy_resources(self, resources):
"""Copy resource values to initialise compute_node and related
data structures.
"""
# purge old stats and init with anything passed in by the driver
self.stats.clear()
self.stats.digest_stats(resources.get('stats'))
# now copy rest to compute_node
self.compute_node.update_from_virt_driver(resources)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.add_metrics_to_list(metrics)
except Exception:
LOG.warning(_LW("Cannot get the metrics from %s."), monitor)
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
# to be populated as a JSON-ified string.
metrics = metrics.to_list()
if len(metrics):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metrics
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
return metrics
def update_available_resource(self, context):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
"""
LOG.info(_LI("Auditing locally available compute resources for "
"node %(node)s"),
{'node': self.nodename})
resources = self.driver.get_available_resource(self.nodename)
if not resources:
# The virt driver does not support this function
LOG.info(_LI("Virt driver does not support "
"'get_available_resource'. Compute tracking is disabled."))
self.compute_node = None
return
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if ("cpu_info" not in resources or
resources["cpu_info"] is None):
resources["cpu_info"] = ''
# TODO(berrange): remove this once all virt drivers are updated
# to report topology
if "numa_topology" not in resources:
resources["numa_topology"] = None
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources):
# initialise the compute node object, creating it
# if it does not already exist.
self._init_compute_node(context, resources)
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled:
return
if 'pci_passthrough_devices' in resources:
devs = []
for dev in jsonutils.loads(resources.pop(
'pci_passthrough_devices')):
if dev['dev_type'] == 'type-PF':
continue
if self.pci_filter.device_assignable(dev):
devs.append(dev)
if not self.pci_tracker:
n_id = self.compute_node['id'] if self.compute_node else None
self.pci_tracker = pci_manager.PciDevTracker(context,
node_id=n_id)
self.pci_tracker.set_hvdevs(devs)
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, self.nodename,
expected_attrs=['system_metadata',
'numa_topology'])
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(context, instances)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, self.nodename)
# Only look at resize/migrate migration records
# NOTE(danms): RT should probably examine live migration
# records as well and do something smart. However, ignore
# those for now to avoid them being included in below calculations.
migrations = [migration for migration in migrations
if migration.migration_type in ('resize', 'migrate')]
self._update_usage_from_migrations(context, migrations)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans)
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
if self.pci_tracker:
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_node.pci_device_pools = dev_pools_obj
else:
self.compute_node.pci_device_pools = objects.PciDevicePoolList()
self._report_final_resource_view()
metrics = self._get_host_metrics(context, self.nodename)
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
self.compute_node.metrics = jsonutils.dumps(metrics)
# update the compute_node
self._update(context)
LOG.info(_LI('Compute_service record updated for %(host)s:%(node)s'),
{'host': self.host, 'node': self.nodename})
def _get_compute_node(self, context):
"""Returns compute node for the host and nodename."""
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, self.nodename)
except exception.NotFound:
LOG.warning(_LW("No compute node record for %(host)s:%(node)s"),
{'host': self.host, 'node': self.nodename})
def _write_ext_resources(self, resources):
resources.stats = copy.deepcopy(self.stats)
self.ext_resources_handler.write_resources(resources)
def _get_service(self, context):
try:
return objects.Service.get_by_compute_host(context, self.host)
except exception.NotFound:
LOG.warning(_LW("No service record for host %s"), self.host)
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
LOG.debug("Hypervisor: free VCPUs: %s" % free_vcpus)
else:
free_vcpus = 'unknown'
LOG.debug("Hypervisor: VCPU information unavailable")
if ('pci_passthrough_devices' in resources and
resources['pci_passthrough_devices']):
LOG.debug("Hypervisor: assignable PCI devices: %s" %
resources['pci_passthrough_devices'])
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': self.nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
vcpus = self.compute_node.vcpus
if vcpus:
tcpu = vcpus
ucpu = self.compute_node.vcpus_used
LOG.info(_LI("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s"),
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = self.compute_node.pci_device_pools
LOG.info(_LI("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s"),
{'node': self.nodename,
'phys_ram': self.compute_node.memory_mb,
'used_ram': self.compute_node.memory_mb_used,
'phys_disk': self.compute_node.local_gb,
'used_disk': self.compute_node.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self):
"""Check to see if any resources have changed."""
if not obj_base.obj_equal_prims(self.compute_node, self.old_resources):
self.old_resources = copy.deepcopy(self.compute_node)
return True
return False
def _update(self, context):
"""Update partial stats locally and populate them to Scheduler."""
self._write_ext_resources(self.compute_node)
if not self._resource_change():
return
# Persist the stats to the Scheduler
self.scheduler_client.update_resource_stats(self.compute_node)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, sign=1):
mem_usage = usage['memory_mb']
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
self.compute_node.memory_mb_used += sign * mem_usage
self.compute_node.local_gb_used += sign * usage.get('root_gb', 0)
self.compute_node.local_gb_used += sign * usage.get('ephemeral_gb', 0)
# free ram and disk may be negative, depending on policy:
self.compute_node.free_ram_mb = (self.compute_node.memory_mb -
self.compute_node.memory_mb_used)
self.compute_node.free_disk_gb = (self.compute_node.local_gb -
self.compute_node.local_gb_used)
self.compute_node.running_vms = self.stats.num_instances
self.ext_resources_handler.update_from_instance(usage, sign)
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
self.compute_node, usage, free)
self.compute_node.numa_topology = updated_numa_topology
def _update_usage_from_migration(self, context, instance, image_meta,
migration):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
uuid = migration.instance_uuid
LOG.info(_LI("Updating from migration %s") % uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == self.nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == self.nodename)
same_node = (incoming and outbound)
record = self.tracked_instances.get(uuid, None)
itype = None
if same_node:
# same node resize. record usage for whichever instance type the
# instance is *not* in:
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(context, instance, 'new_',
migration.new_instance_type_id)
else:
# instance record already has new flavor, hold space for a
# possible revert to the old instance type:
itype = self._get_instance_type(context, instance, 'old_',
migration.old_instance_type_id)
elif incoming and not record:
# instance has not yet migrated here:
itype = self._get_instance_type(context, instance, 'new_',
migration.new_instance_type_id)
elif outbound and not record:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(context, instance, 'old_',
migration.old_instance_type_id)
if image_meta is None:
image_meta = utils.get_image_from_system_metadata(
instance['system_metadata'])
if itype:
host_topology = self.compute_node.get('numa_topology')
if host_topology:
host_topology = objects.NUMATopology.obj_from_db_obj(
host_topology)
numa_topology = hardware.numa_get_constraints(itype, image_meta)
numa_topology = (
hardware.numa_fit_instance_to_host(
host_topology, numa_topology))
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(context, instance)
self._update_usage(usage)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_node.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
self.compute_node.pci_device_pools = obj
self.tracked_migrations[uuid] = (migration, itype)
def _update_usage_from_migrations(self, context, migrations):
self.tracked_migrations.clear()
filtered = {}
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
try:
instance = migration.instance
except exception.InstanceNotFound as e:
# migration referencing deleted instance
LOG.debug('Migration instance not found: %s', e)
continue
uuid = instance.uuid
# skip migration if instance isn't in a resize state:
if not self._instance_in_resize_state(instance):
LOG.warning(_LW("Instance not resizing, skipping migration."),
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
m = filtered.get(uuid, None)
if not m or migration.updated_at >= m.updated_at:
filtered[uuid] = migration
for migration in filtered.values():
instance = migration.instance
try:
self._update_usage_from_migration(context, instance, None,
migration)
except exception.FlavorNotFound:
LOG.warning(_LW("Flavor could not be found, skipping "
"migration."), instance_uuid=uuid)
continue
def _update_usage_from_instance(self, context, instance):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
is_deleted_instance = instance['vm_state'] == vm_states.DELETED
if is_new_instance:
self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
sign = 1
if is_deleted_instance:
self.tracked_instances.pop(uuid)
sign = -1
self.stats.update_stats_for_instance(instance)
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context, instance)
# if it's a new or deleted instance:
if is_new_instance or is_deleted_instance:
# new instance, update compute node resource usage:
self._update_usage(instance, sign=sign)
self.compute_node.current_workload = self.stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_node.pci_device_pools = obj
else:
self.compute_node.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
# set some initial values, reserve room for host/hypervisor:
self.compute_node.local_gb_used = CONF.reserved_host_disk_mb / 1024
self.compute_node.memory_mb_used = CONF.reserved_host_memory_mb
self.compute_node.free_ram_mb = (self.compute_node.memory_mb -
self.compute_node.memory_mb_used)
self.compute_node.free_disk_gb = (self.compute_node.local_gb -
self.compute_node.local_gb_used)
self.compute_node.current_workload = 0
self.compute_node.running_vms = 0
# Reset values for extended resources
self.ext_resources_handler.reset_resources(self.compute_node,
self.driver)
for instance in instances:
if instance.vm_state != vm_states.DELETED:
self._update_usage_from_instance(context, instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances.keys())
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning(_LW("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)"),
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _instance_in_resize_state(self, instance):
vm = instance['vm_state']
task = instance['task_state']
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]):
return True
return False
def _get_instance_type(self, context, instance, prefix,
instance_type_id=None):
"""Get the instance type from instance."""
return getattr(instance, '%sflavor' % prefix)
def _get_usage_dict(self, object_or_dict, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
usage = {}
if isinstance(object_or_dict, objects.Instance):
usage = obj_base.obj_to_primitive(object_or_dict)
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
|
|
#!/usr/bin/env python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rendering-related test routines."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import json
import os
import subprocess
import font_caching
from fontTools.pens.boundsPen import BoundsPen
def min_with_none(first, second):
"""Returns the minimum of the two inputs, ignoring Nones."""
if first is None:
return second
elif second is None:
return first
else:
return min(first, second)
def max_with_none(first, second):
"""Returns the maximum of the two inputs, ignoring Nones."""
if first is None:
return second
elif second is None:
return first
else:
return max(first, second)
def transform_y(transform, y_value):
"""Applies a transform matrix to a y coordinate."""
return int(round(y_value * transform[1][1]))
def get_glyph_cleaned_extents(ttglyph, glyf_set):
pen = BoundsPen(glyf_set, ignoreSinglePoints=True)
ttglyph.draw(pen)
if not pen.bounds:
return None, None
return pen.bounds[1], pen.bounds[3]
def get_glyph_cleaned_extents_OLD(glyph, glyf_table):
"""Get the vertical extent of glyphs, ignoring single-point contours.
This is take care of weirdness in the various fonts, who may need the
single-point contours for hinting or glyph positioning, or may have
forgotten to clean them up."""
try:
return glyph.cleanedYMin, glyph.cleanedYMax
except AttributeError:
glyph.expand(glyf_table)
if glyph.numberOfContours == 0: # is empty
glyph.cleanedYMin = None
glyph.cleanedYMax = None
return None, None
elif glyph.numberOfContours == -1: # has components
max_height = None
min_height = None
for component in glyph.components:
component_ymin, component_ymax = get_glyph_cleaned_extents(
glyf_table.glyphs[component.glyphName],
glyf_table)
if hasattr(component, 'transform'):
transform = component.transform
assert transform[1][0] == transform[0][1] == 0, (
"Can't handle complex transforms")
else:
transform = [[1, 0], [0, 1]]
max_height = max_with_none(
max_height,
transform_y(transform, component_ymax) + component.y)
min_height = min_with_none(
min_height,
transform_y(transform, component_ymin) + component.y)
else:
# Set points_to_ignore to the list of all single-point contours
points_to_ignore = set()
previous_end_point = -1
for end_point in glyph.endPtsOfContours:
if end_point == previous_end_point + 1:
points_to_ignore.add(end_point)
previous_end_point = end_point
max_height = None
min_height = None
for index, point in enumerate(glyph.coordinates):
if index in points_to_ignore:
continue
y_value = point[1]
max_height = max_with_none(max_height, y_value)
min_height = min_with_none(min_height, y_value)
glyph.cleanedYMin = min_height
glyph.cleanedYMax = max_height
return min_height, max_height
def get_glyph_vertical_extents(glyph_id, font_file_name):
"""Returns visible vertical extents given a glyph ID and font name."""
font = font_caching.open_font(font_file_name)
glyf_set = font.getGlyphSet()
glyph_name = font.getGlyphName(glyph_id)
ttglyph = glyf_set[glyph_name]
return get_glyph_cleaned_extents(ttglyph, glyf_set)
# FIXME: figure out how to make this configurable
HARFBUZZ_DIR = os.getenv('HOME') + os.sep + 'harfbuzz'
HB_SHAPE_PATH = HARFBUZZ_DIR + os.sep + 'util' + os.sep + 'hb-shape'
def run_harfbuzz_on_text(text, font_file_name, language, extra_parameters=None):
"""Runs HarfBuzz on input text and return JSON shaping information."""
hb_parameters = [
HB_SHAPE_PATH,
'--output-format=json',
'--no-glyph-names', # Some fonts have empty glyph names
'--font-file=%s' % font_file_name]
if language:
hb_parameters.append('--language=%s' % language)
if extra_parameters is not None:
hb_parameters += extra_parameters
hb_process = subprocess.Popen(
hb_parameters,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return hb_process.communicate(input=text.encode('UTF-8'))[0]
def get_line_extents_from_json(json_data, font_file_name):
"""Find the vertical extents of a line based on HarfBuzz JSON output."""
max_height = None
min_height = None
for glyph_position in json.loads(json_data):
glyph_id = glyph_position['g']
glyph_ymin, glyph_ymax = get_glyph_vertical_extents(
glyph_id, font_file_name)
if glyph_ymax is not None:
glyph_vertical_offset = glyph_position['dy']
max_height = max_with_none(
max_height, glyph_ymax + glyph_vertical_offset)
min_height = min_with_none(
min_height, glyph_ymin + glyph_vertical_offset)
return min_height, max_height
def test_text_vertical_extents(
text, font_file_name, min_allowed, max_allowed, language=None):
"""Runs given text through HarfBuzz to find cases that go out of bounds."""
hb_output = run_harfbuzz_on_text(text, font_file_name, language)
split_text = text.split('\n')
exceeding_lines = []
for line_no, output_line in enumerate(hb_output.split('\n')):
if not output_line:
continue
min_height, max_height = get_line_extents_from_json(
output_line, font_file_name)
if min_height is None:
continue
if min_height < min_allowed or max_height > max_allowed:
exceeding_lines.append(((min_height, max_height),
split_text[line_no]))
return exceeding_lines
|
|
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for long attention classes."""
import dataclasses
from typing import Optional
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
from flaxformer.architectures.longt5 import long_attention
from flaxformer.architectures.longt5 import relative_position_biases_general
from flaxformer.components.attention import dense_attention
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
RelativePositionBiasesGeneral = (
relative_position_biases_general.RelativePositionBiasesGeneral)
@dataclasses.dataclass(frozen=True)
class EncoderLocalSelfAttArgs:
num_heads: int = 1
local_radius: int = 2
batch_size: int = 2
qkv_features: int = 3
out_features: int = 4
q_len: int = 5
features: int = 6
broadcast_dropout: bool = True
dropout_rate: float = 0.1
enable_dropout: bool = True
use_bias: bool = True
rescale_logits: bool = True
float32_logits: bool = False
split_head_kernel: bool = False
kernels_to_fuse: Optional[str] = None # Only 'qkv' is supported.
relpos_bias: Optional[RelativePositionBiasesGeneral] = None
def init_args(self):
return dict(
num_heads=self.num_heads,
local_radius=self.local_radius,
qkv_features=self.qkv_features,
out_features=self.out_features,
broadcast_dropout=self.broadcast_dropout,
dropout_rate=self.dropout_rate,
use_bias=self.use_bias,
rescale_logits=self.rescale_logits,
float32_logits=self.float32_logits,
split_head_kernel=self.split_head_kernel,
kernels_to_fuse=self.kernels_to_fuse,
relpos_bias=self.relpos_bias)
def apply_args(self):
inputs = jnp.ones((self.batch_size, self.q_len, self.features))
inputs_mask = jnp.ones((self.batch_size, self.q_len))
return {
'inputs': inputs,
'inputs_mask': inputs_mask,
'enable_dropout': self.enable_dropout
}
class LongAttentionTest(parameterized.TestCase):
def test_local_self_attention_shape(self):
# This test only checks for shape but tries to make sure all code paths are
# reached.
dropout_rng = random.PRNGKey(0)
batch_size, num_heads, seq_len, qk_depth, v_depth = 1, 2, 8, 3, 5
local_radius = 1
block_len = local_radius + 1 # 2
num_blocks = seq_len // block_len + bool(seq_len % block_len) # 4
query = jnp.ones((batch_size, seq_len, num_heads, qk_depth))
key = jnp.ones((batch_size, seq_len, num_heads, qk_depth))
value = jnp.ones((batch_size, seq_len, num_heads, v_depth))
bias = jnp.ones(
(batch_size, num_blocks, num_heads, block_len, 3 * block_len))
args = dict(
query=query,
key=key,
value=value,
local_radius=local_radius,
bias=bias,
rescale_logits=True,
dropout_rng=dropout_rng,
dropout_rate=0.5,
enable_dropout=True,
)
output = long_attention._local_self_attention(
**args, broadcast_dropout=True)
self.assertEqual(output.shape, (batch_size, seq_len, num_heads, v_depth))
# Make sure we also reach the code path where we don't broadcast dropout.
output = long_attention._local_self_attention(
**args, broadcast_dropout=False)
self.assertEqual(output.shape, (batch_size, seq_len, num_heads, v_depth))
def test_encoder_local_self_attention(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = EncoderLocalSelfAttArgs()
model = long_attention.EncoderLocalSelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_encoder_local_self_attention_cast_logits_float32(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = EncoderLocalSelfAttArgs(float32_logits=True)
model = long_attention.EncoderLocalSelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_encoder_local_self_attention_no_rescale_logits(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = EncoderLocalSelfAttArgs(rescale_logits=False)
model = long_attention.EncoderLocalSelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_encoder_local_self_attention_no_out_features(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = EncoderLocalSelfAttArgs(out_features=None)
model = long_attention.EncoderLocalSelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.features))
def test_encoder_local_self_attention_with_kernel_fusion(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = EncoderLocalSelfAttArgs(
split_head_kernel=True, kernels_to_fuse='qkv')
model = long_attention.EncoderLocalSelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
@parameterized.named_parameters(
('even_blocking', 15),
('uneven_blocking', 16),
('degenerate_blocking', 35),
)
def test_encoder_local_self_attention_logic(self, local_radius):
# This test checks the logic of the local attention calculations by
# comparing with the output of `MultiHeadDotProductAttention`
# (full attention) after manually applying the local sparsity pattern.
# The outputs should be identical for non-padding tokens.
keys = random.split(random.PRNGKey(0), 4)
batch_size = 3
seq_len = 64
in_features = 11
out_features = 12
num_heads = 5
dtype = jnp.float32
inputs = random.normal(keys[0], (batch_size, seq_len, in_features))
inputs_mask = random.bernoulli(keys[1], 0.9, (batch_size, seq_len))
inputs_mask = inputs_mask.astype(jnp.bool_)
segment_ids = jnp.cumsum(
random.bernoulli(keys[2], 0.1, (batch_size, seq_len)), axis=-1)
# `positions` is unused in `EncoderLocalSelfAttention`, so we set to zeros.
positions = jnp.zeros_like(segment_ids)
att_config = dict(
num_heads=num_heads,
dtype=dtype,
qkv_features=15,
out_features=out_features)
relpos_bias = RelativePositionBiasesGeneral(
num_heads=num_heads, num_buckets=32, max_distance=128, dtype=dtype)
local_att = long_attention.EncoderLocalSelfAttention(
local_radius=local_radius, relpos_bias=relpos_bias, **att_config)
full_att = dense_attention.MultiHeadDotProductAttention(
use_bias=True, **att_config)
local_att_output, local_att_vars = local_att.init_with_output(
keys[3],
inputs,
inputs_mask,
segment_ids=segment_ids,
positions=positions,
enable_dropout=False)
relpos_bias_vars = dict(params=local_att_vars['params']['relpos_bias'])
# Full attention uses the same variables as local attention (ignoring
# `relpos_bias`).
full_att_vars = local_att_vars
rp_bucket = relpos_bias.full_att_rp_bucket(
qlen=seq_len, klen=seq_len, bidirectional=True)
bias = relpos_bias.apply(relpos_bias_vars, rp_bucket)
mask = dense_attention.make_attention_mask(
inputs_mask, inputs_mask, dtype=dtype)
mask = dense_attention.combine_masks(
mask,
dense_attention.make_attention_mask(
segment_ids, segment_ids, jnp.equal, dtype=dtype))
# Overlay local sparsity attention mask for full attention case.
range_array = np.arange(seq_len)
locality_mask = np.abs(range_array[np.newaxis, :] -
range_array[:, np.newaxis]) <= local_radius
# [1, 1, seq_len, seq_len] shape
locality_mask = locality_mask[np.newaxis, np.newaxis, :, :]
mask = dense_attention.combine_masks(mask, locality_mask)
full_att_output = full_att.apply(
full_att_vars, inputs, inputs, mask, bias, enable_dropout=False)
np.testing.assert_array_equal(local_att_output.shape,
(batch_size, seq_len, out_features))
np.testing.assert_array_equal(local_att_output.shape, full_att_output.shape)
# Padding tokens may have different embeddings which we'll want to ignore
# in our comparison, so we "clear" them to zero.
def clear_padding(array):
return array * inputs_mask[..., jnp.newaxis].astype(dtype)
np.testing.assert_allclose(
clear_padding(local_att_output),
clear_padding(full_att_output),
atol=1e-5)
@parameterized.named_parameters(
('even_blocking', 15),
('uneven_blocking', 16),
('degenerate_blocking', 35),
('uneven_blocking_use_kernel_fusion', 16, True),
('even_blocking_causal', 15, False, True),
('uneven_blocking_causal', 16, False, True),
('degenerate_blocking_causal', 35, False, True),
('uneven_blocking_use_kernel_fusion_causal', 16, True, True),
)
def test_etc_transient_global_self_attention(self,
local_radius,
use_kernel_fusion=False,
causal=False):
# This test just makes sure the layer successfully runs with different
# input sizes.
keys = random.split(random.PRNGKey(0), 3)
batch_size = 3
seq_len = 64
tokens_per_block = 4
in_features = 11
out_features = 12
num_heads = 5
dtype = jnp.float32
inputs = random.normal(keys[0], (batch_size, seq_len, in_features))
# Construct realistic packed inputs.
new_segment_marker = random.bernoulli(keys[2], 0.1, (batch_size, seq_len))
segment_ids = jnp.cumsum(new_segment_marker, axis=-1)
# We make the last segment padding.
is_padding = segment_ids == jnp.max(segment_ids, axis=-1, keepdims=True)
inputs_mask = jnp.logical_not(is_padding)
# Create positions based on segments.
arange = np.broadcast_to(np.arange(seq_len), segment_ids.shape)
positions = arange - np.maximum.accumulate(
new_segment_marker * arange, axis=-1)
positions *= inputs_mask
relpos_bias = RelativePositionBiasesGeneral(
num_heads=num_heads, num_buckets=32, max_distance=128, dtype=dtype)
side_relpos_bias = RelativePositionBiasesGeneral(
num_heads=num_heads, num_buckets=32, max_distance=128, dtype=dtype)
att_layer = long_attention.EtcTransientGlobalSelfAttention(
num_heads=num_heads,
tokens_per_block=tokens_per_block,
local_radius=local_radius,
dtype=dtype,
causal=causal,
qkv_features=15,
out_features=out_features,
rescale_logits=use_kernel_fusion,
split_head_kernel=use_kernel_fusion,
kernels_to_fuse='kv' if use_kernel_fusion else None,
relpos_bias=relpos_bias,
side_relpos_bias=side_relpos_bias,
)
output, _ = att_layer.init_with_output(
keys[3],
inputs,
inputs_mask,
segment_ids=segment_ids,
positions=positions,
enable_dropout=False)
np.testing.assert_array_equal(output.shape,
(batch_size, seq_len, out_features))
def test_make_etc_fixed_block_ids(self):
# See this documentation for an example of what packed inputs look like:
# https://github.com/google/seqio/blob/main/seqio/utils.py#L292
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
],
dtype=np.bool_)
segment_ids = [
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3], #
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 0, 0], #
[1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
]
positions = [
[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 0, 1, 2, 3, 0, 0], #
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7], #
list(range(16)), #
]
block_ids, global_segment_ids = long_attention.make_etc_fixed_block_ids(
tokens_per_block=3,
inputs_mask=inputs_mask,
segment_ids=segment_ids,
positions=positions)
np.testing.assert_array_equal(
block_ids,
[
[0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], #
[0, 0, 0, 1, 1, 1, 1, 1, -1, -1, 2, 2, 2, 2, -1, -1], #
[-1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 1, 1, 1, 1, 1], #
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4], #
])
np.testing.assert_array_equal(
global_segment_ids,
[
[1, 1, 2, 2, 3], #
[1, 1, 3, 0, 0], #
[6, 6, 0, 0, 0], #
[1, 1, 1, 1, 1], #
])
def test_make_etc_fixed_block_ids_without_packing(self):
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 0, 0, 0], #
],
dtype=np.bool_)
block_ids, global_segment_ids = long_attention.make_etc_fixed_block_ids(
tokens_per_block=3, inputs_mask=inputs_mask)
np.testing.assert_array_equal(
block_ids,
[
[0, 0, 0, 1, 1, 1, 1, 1], #
[0, 0, 0, 1, 1, 1, -1, -1], #
[0, 0, 0, 0, 0, -1, -1, -1], #
])
np.testing.assert_array_equal(
global_segment_ids,
[
[1, 1], #
[1, 1], #
[1, 0], #
])
def test_make_etc_fixed_block_ids_without_orphan_adoption(self):
# See this documentation for an example of what packed inputs look like:
# https://github.com/google/seqio/blob/main/seqio/utils.py#L292
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
],
dtype=np.bool_)
segment_ids = [
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3], #
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 0, 0], #
[1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
]
positions = [
[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 0, 1, 2, 3, 0, 0], #
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7], #
list(range(16)), #
]
block_ids, global_segment_ids = long_attention.make_etc_fixed_block_ids(
tokens_per_block=3,
inputs_mask=inputs_mask,
segment_ids=segment_ids,
positions=positions,
adopt_orphan_tokens=False)
np.testing.assert_array_equal(
block_ids,
[
[0, 0, 0, 1, 1, 1, -1, 2, 2, 2, 3, 3, 3, 4, 4, 4], #
[0, 0, 0, 1, 1, 1, -1, -1, -1, -1, 2, 2, 2, -1, -1, -1], #
[-1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1], #
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, -1], #
])
np.testing.assert_array_equal(
global_segment_ids,
[
[1, 1, 2, 2, 3], #
[1, 1, 3, 0, 0], #
[6, 6, 0, 0, 0], #
[1, 1, 1, 1, 1], #
])
def test_make_etc_fixed_block_ids_without_packing_nor_adoption(self):
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 0, 0, 0], #
],
dtype=np.bool_)
block_ids, global_segment_ids = long_attention.make_etc_fixed_block_ids(
tokens_per_block=3, inputs_mask=inputs_mask, adopt_orphan_tokens=False)
np.testing.assert_array_equal(
block_ids,
[
[0, 0, 0, 1, 1, 1, -1, -1], #
[0, 0, 0, 1, 1, 1, -1, -1], #
[0, 0, 0, -1, -1, -1, -1, -1], #
])
np.testing.assert_array_equal(
global_segment_ids,
[
[1, 1], #
[1, 1], #
[1, 0], #
])
def test_orphan_token_identification(self):
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 0, 0, 0], #
],
dtype=np.bool_)
orphan_tokens = long_attention.identify_orphan_tokens(
tokens_per_block=3, inputs_mask=inputs_mask)
np.testing.assert_array_equal(
orphan_tokens,
[
[0, 0, 0, 0, 0, 0, 1, 1], #
[0, 0, 0, 0, 0, 0, 0, 0], #
[0, 0, 0, 1, 1, 0, 0, 0], #
])
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
],
dtype=np.bool_)
positions = [
[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 0, 1, 2, 3, 0, 0], #
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7], #
list(range(16)), #
]
orphan_tokens = long_attention.identify_orphan_tokens(
tokens_per_block=3, inputs_mask=inputs_mask, positions=positions)
np.testing.assert_array_equal(
orphan_tokens,
[
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1], #
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], #
])
def test_mask_to_bias(self):
mask1 = np.array([1, 0], dtype=bool)
bias1 = long_attention.mask_to_bias(mask1, dtype=np.float32)
np.testing.assert_array_equal(bias1, np.array([0, -1e10], dtype=np.float32))
assert bias1.dtype == np.float32
mask2 = np.array([[1, 0], [0, 0]], dtype=bool)
bias2 = long_attention.mask_to_bias(mask2, dtype=np.float32)
np.testing.assert_array_equal(
bias2, np.array([[0, -1e10], [-1e10, -1e10]], dtype=np.float32))
assert bias2.dtype == np.float32
def test_make_side_relpos(self):
tokens_per_block = 3
inputs_mask = np.array([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]) #
positions = np.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], #
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0], #
[0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3]
]) #
segment_ids = np.array([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2], #
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2]
]) #
side_relative_positions = long_attention._make_side_relpos(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=True)
# certain biases are not important b/c they will be masked out; we represent
# these with NaNs and ignore their positions in testing.
x = np.nan
expected_relative_positions = np.array([
[
[0, 1, 2, 3], #
[0, 1, 2, 3], #
[0, 1, 2, 3], #
[-1, 0, 1, 2], #
[-1, 0, 1, 2], #
[-1, 0, 1, 2], #
[-2, -1, 0, 1], #
[-2, -1, 0, 1], #
[-2, -1, 0, 1], #
[-3, -2, -1, 0], #
[-3, -2, -1, 0], #
[-3, -2, -1, 0]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[x, x, x, x], #
[x, x, x, x]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[x, x, x, 0], #
[x, x, x, 0], #
[x, x, x, 0]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[x, x, 0, x], #
[x, x, 0, x], #
[x, x, 0, x], #
[x, x, 0, x]
]
]) #
positions_to_compare = np.isfinite(expected_relative_positions)
np.testing.assert_array_equal(
side_relative_positions[positions_to_compare],
expected_relative_positions[positions_to_compare])
side_relative_positions = long_attention._make_side_relpos(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=False)
expected_relative_positions = np.array([
[
[0, 1, 2, 3], #
[0, 1, 2, 3], #
[0, 1, 2, 3], #
[-1, 0, 1, 2], #
[-1, 0, 1, 2], #
[-1, 0, 1, 2], #
[-2, -1, 0, 1], #
[-2, -1, 0, 1], #
[-2, -1, 0, 1], #
[-3, -2, -1, 0], #
[-3, -2, -1, 0], #
[-3, -2, -1, 0]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-3, -2, -1, x], #
[x, x, x, x], #
[x, x, x, x]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[x, x, x, 0], #
[x, x, x, 0], #
[x, x, x, 0]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[x, x, 0, x], #
[x, x, 0, x], #
[x, x, 0, x], #
[x, x, -1, x]
]
]) #
positions_to_compare = np.isfinite(expected_relative_positions)
np.testing.assert_array_equal(
side_relative_positions[positions_to_compare],
expected_relative_positions[positions_to_compare])
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
],
dtype=np.bool_)
segment_ids = [
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3], #
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 0, 0], #
[1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
]
positions = [
[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 0, 1, 2, 3, 0, 0], #
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7], #
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] #
]
side_relative_positions = long_attention._make_side_relpos(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=True)
expected_relative_positions = np.array([
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0]
], #
[
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x]
], #
[
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3]
], #
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0]
]
]) #
positions_to_compare = np.isfinite(expected_relative_positions)
np.testing.assert_array_equal(
side_relative_positions[positions_to_compare],
expected_relative_positions[positions_to_compare])
side_relative_positions = long_attention._make_side_relpos(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=False)
expected_relative_positions = np.array([
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0]
], #
[
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-2, -1, x, x, x], #
[-2, -1, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, -1, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x]
], #
[
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2]
], #
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-5, -4, -3, -2, -1]
]
]) #
positions_to_compare = np.isfinite(expected_relative_positions)
np.testing.assert_array_equal(
side_relative_positions[positions_to_compare],
expected_relative_positions[positions_to_compare])
if __name__ == '__main__':
absltest.main()
|
|
from __future__ import absolute_import
import unittest
from six.moves import xrange
import copy
from bokeh.core.properties import List, String, Instance, Dict, Any, Int
from bokeh.model import Model
from bokeh.embed import _ModelInDocument
from bokeh.document import Document
from bokeh.core.property.containers import PropertyValueList, PropertyValueDict
from bokeh.util.future import with_metaclass
def large_plot(n):
from bokeh.models import (
Plot, LinearAxis, Grid, GlyphRenderer,
ColumnDataSource, DataRange1d, PanTool, ZoomInTool, ZoomOutTool, WheelZoomTool, BoxZoomTool,
BoxSelectTool, ResizeTool, SaveTool, ResetTool
)
from bokeh.models.layouts import Column
from bokeh.models.glyphs import Line
col = Column()
objects = set([col])
for i in xrange(n):
source = ColumnDataSource(data=dict(x=[0, i + 1], y=[0, i + 1]))
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr)
xaxis = LinearAxis(plot=plot)
yaxis = LinearAxis(plot=plot)
xgrid = Grid(plot=plot, dimension=0)
ygrid = Grid(plot=plot, dimension=1)
tickers = [xaxis.ticker, xaxis.formatter, yaxis.ticker, yaxis.formatter]
glyph = Line(x='x', y='y')
renderer = GlyphRenderer(data_source=source, glyph=glyph)
plot.renderers.append(renderer)
pan = PanTool()
zoom_in = ZoomInTool()
zoom_out = ZoomOutTool()
wheel_zoom = WheelZoomTool()
box_zoom = BoxZoomTool()
box_select = BoxSelectTool()
resize = ResizeTool()
save = SaveTool()
reset = ResetTool()
tools = [pan, zoom_in, zoom_out, wheel_zoom, box_zoom, box_select, resize, save, reset]
plot.add_tools(*tools)
col.children.append(plot)
objects |= set([
source, xdr, ydr, plot, xaxis, yaxis, xgrid, ygrid, renderer, glyph,
plot.toolbar, plot.tool_events, plot.title, box_zoom.overlay, box_select.overlay] +
tickers + tools)
return col, objects
class TestMetaModel(unittest.TestCase):
def setUp(self):
from bokeh.model import MetaModel
self.metamodel = MetaModel
self.old_map = copy.copy(self.metamodel.model_class_reverse_map)
def tearDown(self):
self.metamodel.model_class_reverse_map = self.old_map
def mkclass(self):
class Test_Class(with_metaclass(self.metamodel)):
foo = 1
return Test_Class
def test_metaclassing(self):
tclass = self.mkclass()
self.assertTrue(hasattr(tclass, '__view_model__'))
self.assertRaises(Warning, self.mkclass)
def test_get_class(self):
from bokeh.model import get_class
self.mkclass()
tclass = get_class('Test_Class')
self.assertTrue(hasattr(tclass, 'foo'))
self.assertRaises(KeyError, get_class, 'Imaginary_Class')
class DeepModel(Model):
child = Instance(Model)
class TestCollectModels(unittest.TestCase):
def test_references_large(self):
root, objects = large_plot(10)
self.assertEqual(set(root.references()), objects)
def test_references_deep(self):
root = DeepModel()
objects = set([root])
parent = root
# in a previous implementation, about 400 would blow max
# recursion depth, so we double that and a little bit,
# here.
for i in xrange(900):
model = DeepModel()
objects.add(model)
parent.child = model
parent = model
self.assertEqual(set(root.references()), objects)
class SomeModelToJson(Model):
child = Instance(Model)
foo = Int()
bar = String()
class TestModel(unittest.TestCase):
def setUp(self):
from bokeh.models import Model
self.pObjectClass = Model
self.maxDiff = None
def test_init(self):
testObject = self.pObjectClass(id='test_id')
self.assertEqual(testObject._id, 'test_id')
testObject2 = self.pObjectClass()
self.assertIsNot(testObject2._id, None)
self.assertEqual(set(["name", "tags", "js_property_callbacks",
"subscribed_events", "js_event_callbacks"]),
testObject.properties())
self.assertDictEqual(dict(name=None, tags=[], js_property_callbacks={},
js_event_callbacks={}, subscribed_events=[]),
testObject.properties_with_values(include_defaults=True))
self.assertDictEqual(dict(), testObject.properties_with_values(include_defaults=False))
def test_ref(self):
testObject = self.pObjectClass(id='test_id')
self.assertEqual({'type': 'Model', 'id': 'test_id'}, testObject.ref)
def test_references_by_ref_by_value(self):
from bokeh.core.has_props import HasProps
from bokeh.core.properties import Instance, Int
class T(self.pObjectClass):
t = Int(0)
class Y(self.pObjectClass):
t1 = Instance(T)
class Z1(HasProps):
t2 = Instance(T)
class Z2(self.pObjectClass):
t2 = Instance(T)
class X1(self.pObjectClass):
y = Instance(Y)
z1 = Instance(Z1)
class X2(self.pObjectClass):
y = Instance(Y)
z2 = Instance(Z2)
t1, t2 = T(t=1), T(t=2)
y = Y(t1=t1)
z1, z2 = Z1(t2=t2), Z2(t2=t2)
x1 = X1(y=y, z1=z1)
x2 = X2(y=y, z2=z2)
self.assertEqual(x1.references(), {t1, y, t2, x1})
self.assertEqual(x2.references(), {t1, y, t2, z2, x2})
def test_references_in_containers(self):
from bokeh.core.properties import Int, String, Instance, List, Tuple, Dict
# XXX: can't use Y, because of:
#
# Warning: Duplicate __view_model__ declaration of 'Y' for class Y.
# Previous definition: <class 'bokeh.tests.test_objects.Y'>
class U(self.pObjectClass):
a = Int
class V(self.pObjectClass):
u1 = Instance(U)
u2 = List(Instance(U))
u3 = Tuple(Int, Instance(U))
u4 = Dict(String, Instance(U))
u5 = Dict(String, List(Instance(U)))
u1, u2, u3, u4, u5 = U(a=1), U(a=2), U(a=3), U(a=4), U(a=5)
v = V(u1=u1, u2=[u2], u3=(3, u3), u4={"4": u4}, u5={"5": [u5]})
self.assertEqual(v.references(), set([v, u1, u2, u3, u4, u5]))
def test_to_json(self):
child_obj = SomeModelToJson(foo=57, bar="hello")
obj = SomeModelToJson(child=child_obj,
foo=42, bar="world")
json = obj.to_json(include_defaults=True)
json_string = obj.to_json_string(include_defaults=True)
self.assertEqual({ "child" : { "id" : child_obj._id, "type" : "SomeModelToJson" },
"id" : obj._id,
"name" : None,
"tags" : [],
'js_property_callbacks': {},
"js_event_callbacks" : {},
"subscribed_events" : [],
"foo" : 42,
"bar" : "world" },
json)
self.assertEqual(('{"bar":"world",' +
'"child":{"id":"%s","type":"SomeModelToJson"},' +
'"foo":42,"id":"%s","js_event_callbacks":{},"js_property_callbacks":{},' +
'"name":null,"subscribed_events":[],"tags":[]}') %
(child_obj._id, obj._id),
json_string)
def test_no_units_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
json = obj.to_json(include_defaults=True)
self.assertTrue('start_angle' in json)
self.assertTrue('start_angle_units' not in json)
self.assertTrue('outer_radius' in json)
self.assertTrue('outer_radius_units' not in json)
def test_dataspec_field_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
obj.start_angle = "fieldname"
json = obj.to_json(include_defaults=True)
self.assertTrue('start_angle' in json)
self.assertTrue('start_angle_units' not in json)
self.assertDictEqual(dict(units='rad', field='fieldname'), json['start_angle'])
def test_dataspec_value_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
obj.start_angle = 60
json = obj.to_json(include_defaults=True)
self.assertTrue('start_angle' in json)
self.assertTrue('start_angle_units' not in json)
self.assertDictEqual(dict(units='rad', value=60), json['start_angle'])
def test_list_default(self):
class HasListDefault(Model):
value = List(String, default=["hello"])
obj = HasListDefault()
self.assertEqual(obj.value, obj.value)
# 'value' should not be included because we haven't modified it
self.assertFalse('value' in obj.properties_with_values(include_defaults=False))
# (but should be in include_defaults=True)
self.assertTrue('value' in obj.properties_with_values(include_defaults=True))
obj.value.append("world")
# 'value' should now be included
self.assertTrue('value' in obj.properties_with_values(include_defaults=False))
def test_dict_default(self):
class HasDictDefault(Model):
value = Dict(String, Int, default=dict(hello=42))
obj = HasDictDefault()
self.assertDictEqual(obj.value, obj.value)
self.assertDictEqual(dict(hello=42), obj.value)
# 'value' should not be included because we haven't modified it
self.assertFalse('value' in obj.properties_with_values(include_defaults=False))
# (but should be in include_defaults=True)
self.assertTrue('value' in obj.properties_with_values(include_defaults=True))
obj.value['world'] = 57
# 'value' should now be included
self.assertTrue('value' in obj.properties_with_values(include_defaults=False))
self.assertDictEqual(dict(hello=42, world=57), obj.value)
def test_func_default_with_counter(self):
counter = dict(value=0)
def next_value():
counter['value'] += 1
return counter['value']
class HasFuncDefaultInt(Model):
value = Int(default=next_value)
obj1 = HasFuncDefaultInt()
obj2 = HasFuncDefaultInt()
self.assertEqual(obj1.value+1, obj2.value)
# 'value' is a default, but it gets included as a
# non-default because it's unstable.
self.assertTrue('value' in obj1.properties_with_values(include_defaults=False))
def test_func_default_with_model(self):
class HasFuncDefaultModel(Model):
child = Instance(Model, lambda: Model())
obj1 = HasFuncDefaultModel()
obj2 = HasFuncDefaultModel()
self.assertNotEqual(obj1.child._id, obj2.child._id)
# 'child' is a default, but it gets included as a
# non-default because it's unstable.
self.assertTrue('child' in obj1.properties_with_values(include_defaults=False))
class SomeModelInTestObjects(Model):
child = Instance(Model)
class TestModelInDocument(unittest.TestCase):
def test_single_model(self):
p = Model()
self.assertIs(p.document, None)
with _ModelInDocument([p]):
self.assertIsNot(p.document, None)
self.assertIs(p.document, None)
def test_list_of_model(self):
p1 = Model()
p2 = Model()
self.assertIs(p1.document, None)
self.assertIs(p2.document, None)
with _ModelInDocument([p1, p2]):
self.assertIsNot(p1.document, None)
self.assertIsNot(p2.document, None)
self.assertIs(p1.document, None)
self.assertIs(p2.document, None)
def test_uses_precedent(self):
# it's deliberate that the doc is on p2, so _ModelInDocument
# has to be smart about looking for a doc anywhere in the list
# before it starts inventing new documents
doc = Document()
p1 = Model()
p2 = Model()
doc.add_root(p2)
self.assertIs(p1.document, None)
self.assertIsNot(p2.document, None)
with _ModelInDocument([p1, p2]):
self.assertIsNot(p1.document, None)
self.assertIsNot(p2.document, None)
self.assertIs(p1.document, doc)
self.assertIs(p2.document, doc)
self.assertIs(p1.document, None)
self.assertIsNot(p2.document, None)
def test_uses_doc_precedent(self):
doc = Document()
p1 = Model()
p2 = Model()
self.assertIs(p1.document, None)
self.assertIs(p2.document, None)
with _ModelInDocument([p1, p2, doc]):
self.assertIsNot(p1.document, None)
self.assertIsNot(p2.document, None)
self.assertIs(p1.document, doc)
self.assertIs(p2.document, doc)
self.assertIs(p1.document, None)
self.assertIs(p2.document, None)
def test_with_doc_in_child_raises_error(self):
doc = Document()
p1 = Model()
p2 = SomeModelInTestObjects(child=Model())
doc.add_root(p2.child)
self.assertIs(p1.document, None)
self.assertIs(p2.document, None)
self.assertIs(p2.child.document, doc)
with self.assertRaisesRegexp(RuntimeError, p2._id):
with _ModelInDocument([p1, p2]):
self.assertIsNot(p1.document, None)
self.assertIsNot(p2.document, None)
self.assertIs(p1.document, doc)
self.assertIs(p2.document, doc)
class TestContainerMutation(unittest.TestCase):
def _check_mutation(self, obj, attr, mutator, expected_event_old, expected_event_new):
result = dict(calls=[])
def record_trigger(attr, old, new_):
result['calls'].append((attr, old, new_))
obj.on_change(attr, record_trigger)
try:
actual_old = getattr(obj, attr)
self.assertEqual(expected_event_old, actual_old)
mutator(actual_old)
self.assertEqual(expected_event_new, getattr(obj, attr))
finally:
obj.remove_on_change(attr, record_trigger)
self.assertEqual(1, len(result['calls']))
call = result['calls'][0]
self.assertEqual(attr, call[0])
self.assertEqual(expected_event_old, call[1])
self.assertEqual(expected_event_new, call[2])
class HasListProp(Model):
foo = List(String)
def __init__(self, **kwargs):
super(HasListProp, self).__init__(**kwargs)
class TestListMutation(TestContainerMutation):
def test_whether_included_in_props_with_values(self):
obj = HasListProp()
self.assertFalse('foo' in obj.properties_with_values(include_defaults=False))
self.assertTrue('foo' in obj.properties_with_values(include_defaults=True))
# simply reading the property creates a new wrapper, so be
# sure that doesn't count as replacing the default
foo = obj.foo
self.assertEqual(foo, foo) # this is to calm down flake's unused var warning
self.assertFalse('foo' in obj.properties_with_values(include_defaults=False))
self.assertTrue('foo' in obj.properties_with_values(include_defaults=True))
# but changing the list should count as replacing the default
obj.foo.append("hello")
self.assertTrue('foo' in obj.properties_with_values(include_defaults=False))
self.assertTrue('foo' in obj.properties_with_values(include_defaults=True))
def test_assignment_maintains_owners(self):
obj = HasListProp()
old_list = obj.foo
self.assertTrue(isinstance(old_list, PropertyValueList))
self.assertEqual(1, len(old_list._owners))
obj.foo = ["a"]
new_list = obj.foo
self.assertTrue(isinstance(new_list, PropertyValueList))
self.assertIsNot(old_list, new_list)
self.assertEqual(0, len(old_list._owners))
self.assertEqual(1, len(new_list._owners))
def test_list_delitem(self):
obj = HasListProp(foo=["a", "b", "c"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
def mutate(x):
del x[1]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c"],
["a", "c"])
def test_list_delslice(self):
obj = HasListProp(foo=["a", "b", "c", "d"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
def mutate(x):
del x[1:3]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c", "d"],
["a", "d"])
def test_list_iadd(self):
obj = HasListProp(foo=["a"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
def mutate(x):
x += ["b"]
self._check_mutation(obj, 'foo', mutate,
["a"],
["a", "b"])
def test_list_imul(self):
obj = HasListProp(foo=["a"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
def mutate(x):
x *= 3
self._check_mutation(obj, 'foo', mutate,
["a"],
["a", "a", "a"])
def test_list_setitem(self):
obj = HasListProp(foo=["a"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
def mutate(x):
x[0] = "b"
self._check_mutation(obj, 'foo', mutate,
["a"],
["b"])
def test_list_setslice(self):
obj = HasListProp(foo=["a", "b", "c", "d"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
def mutate(x):
x[1:3] = ["x"]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c", "d"],
["a", "x", "d"])
def test_list_append(self):
obj = HasListProp()
self.assertTrue(isinstance(obj.foo, PropertyValueList))
self._check_mutation(obj, 'foo', lambda x: x.append("bar"), [], ["bar"])
def test_list_extend(self):
obj = HasListProp()
self.assertTrue(isinstance(obj.foo, PropertyValueList))
self._check_mutation(obj, 'foo', lambda x: x.extend(["x", "y"]), [], ["x", "y"])
def test_list_insert(self):
obj = HasListProp(foo=["a", "b"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
self._check_mutation(obj, 'foo', lambda x: x.insert(1, "x"),
["a", "b"],
["a", "x", "b"])
def test_list_pop(self):
obj = HasListProp(foo=["a", "b"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
self._check_mutation(obj, 'foo', lambda x: x.pop(),
["a", "b"],
["a"])
def test_list_remove(self):
obj = HasListProp(foo=["a", "b"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
self._check_mutation(obj, 'foo', lambda x: x.remove("b"),
["a", "b"],
["a"])
def test_list_reverse(self):
obj = HasListProp(foo=["a", "b"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
self._check_mutation(obj, 'foo', lambda x: x.reverse(),
["a", "b"],
["b", "a"])
def test_list_sort(self):
obj = HasListProp(foo=["b", "a"])
self.assertTrue(isinstance(obj.foo, PropertyValueList))
self._check_mutation(obj, 'foo', lambda x: x.sort(),
["b", "a"],
["a", "b"])
class HasStringDictProp(Model):
foo = Dict(String, Any)
def __init__(self, **kwargs):
super(HasStringDictProp, self).__init__(**kwargs)
class HasIntDictProp(Model):
foo = Dict(Int, Any)
def __init__(self, **kwargs):
super(HasIntDictProp, self).__init__(**kwargs)
class TestDictMutation(TestContainerMutation):
def test_whether_included_in_props_with_values(self):
obj = HasStringDictProp()
self.assertFalse('foo' in obj.properties_with_values(include_defaults=False))
self.assertTrue('foo' in obj.properties_with_values(include_defaults=True))
# simply reading the property creates a new wrapper, so be
# sure that doesn't count as replacing the default
foo = obj.foo
self.assertEqual(foo, foo) # this is to calm down flake's unused var warning
self.assertFalse('foo' in obj.properties_with_values(include_defaults=False))
self.assertTrue('foo' in obj.properties_with_values(include_defaults=True))
# but changing the dict should count as replacing the default
obj.foo['bar'] = 42
self.assertTrue('foo' in obj.properties_with_values(include_defaults=False))
self.assertTrue('foo' in obj.properties_with_values(include_defaults=True))
def test_assignment_maintains_owners(self):
obj = HasStringDictProp()
old_dict = obj.foo
self.assertTrue(isinstance(old_dict, PropertyValueDict))
self.assertEqual(1, len(old_dict._owners))
obj.foo = dict(a=1)
new_dict = obj.foo
self.assertTrue(isinstance(new_dict, PropertyValueDict))
self.assertIsNot(old_dict, new_dict)
self.assertEqual(0, len(old_dict._owners))
self.assertEqual(1, len(new_dict._owners))
def test_dict_delitem_string(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
self.assertTrue(isinstance(obj.foo, PropertyValueDict))
def mutate(x):
del x['b']
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, c=3))
def test_dict_delitem_int(self):
obj = HasIntDictProp(foo={ 1 : "a", 2 : "b", 3 : "c" })
self.assertTrue(isinstance(obj.foo, PropertyValueDict))
def mutate(x):
del x[1]
self._check_mutation(obj, 'foo', mutate,
{ 1 : "a", 2 : "b", 3 : "c" },
{ 2 : "b", 3 : "c" })
def test_dict_setitem_string(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
self.assertTrue(isinstance(obj.foo, PropertyValueDict))
def mutate(x):
x['b'] = 42
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=42, c=3))
def test_dict_setitem_int(self):
obj = HasIntDictProp(foo={ 1 : "a", 2 : "b", 3 : "c" })
self.assertTrue(isinstance(obj.foo, PropertyValueDict))
def mutate(x):
x[2] = "bar"
self._check_mutation(obj, 'foo', mutate,
{ 1 : "a", 2 : "b", 3 : "c" },
{ 1 : "a", 2 : "bar", 3 : "c" })
def test_dict_clear(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
self.assertTrue(isinstance(obj.foo, PropertyValueDict))
def mutate(x):
x.clear()
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict())
def test_dict_pop(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
self.assertTrue(isinstance(obj.foo, PropertyValueDict))
def mutate(x):
x.pop('b')
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, c=3))
def test_dict_pop_default_works(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
self.assertTrue(isinstance(obj.foo, PropertyValueDict))
self.assertEqual(42, obj.foo.pop('z', 42))
def test_dict_popitem_works(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
self.assertTrue(isinstance(obj.foo, PropertyValueDict))
i = obj.foo.popitem()
self.assertTrue(i == ('a', 1) or i == ('b', 2) or i == ('c', 3))
# we don't _check_mutation since the end value is nondeterministic
def test_dict_setdefault(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
self.assertTrue(isinstance(obj.foo, PropertyValueDict))
def mutate(x):
b = x.setdefault('b', 43)
self.assertEqual(2, b)
z = x.setdefault('z', 44)
self.assertEqual(44, z)
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=2, c=3, z=44))
def test_dict_update(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
self.assertTrue(isinstance(obj.foo, PropertyValueDict))
def mutate(x):
x.update(dict(b=7, c=8))
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=7, c=8))
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
#
# Generated by generateDS.py.
#
import sys
from string import lower as str_lower
from xml.dom import minidom
import supers as supermod
#
# Globals
#
ExternalEncoding = 'utf-8'
#
# Data representation classes
#
class stageTypeSub(supermod.stageType):
def __init__(self, labelSuffix=None, name=None, valueOf_=''):
supermod.stageType.__init__(self, labelSuffix, name, valueOf_)
supermod.stageType.subclass = stageTypeSub
# end class stageTypeSub
class stageListTypeSub(supermod.stageListType):
def __init__(self, stage=None):
supermod.stageListType.__init__(self, stage)
supermod.stageListType.subclass = stageListTypeSub
# end class stageListTypeSub
class upstreamSourceTypeSub(supermod.upstreamSourceType):
def __init__(self, troveName=None, label=None, valueOf_=''):
supermod.upstreamSourceType.__init__(self, troveName, label, valueOf_)
supermod.upstreamSourceType.subclass = upstreamSourceTypeSub
# end class upstreamSourceTypeSub
class upstreamSourceListTypeSub(supermod.upstreamSourceListType):
def __init__(self, upstreamSource=None):
supermod.upstreamSourceListType.__init__(self, upstreamSource)
supermod.upstreamSourceListType.subclass = upstreamSourceListTypeSub
# end class upstreamSourceListTypeSub
class factorySourceListTypeSub(supermod.factorySourceListType):
def __init__(self, factorySource=None):
supermod.factorySourceListType.__init__(self, factorySource)
supermod.factorySourceListType.subclass = factorySourceListTypeSub
# end class factorySourceListTypeSub
class amiImageTypeSub(supermod.amiImageType):
def __init__(self, autoResolve=None, freespace=None, name=None, baseFileName=None, installLabelPath=None, amiHugeDiskMountpoint=None, valueOf_=''):
supermod.amiImageType.__init__(self, autoResolve, freespace, name, baseFileName, installLabelPath, amiHugeDiskMountpoint, valueOf_)
supermod.amiImageType.subclass = amiImageTypeSub
# end class amiImageTypeSub
class applianceIsoImageTypeSub(supermod.applianceIsoImageType):
def __init__(self, maxIsoSize=None, autoResolve=None, bugsUrl=None, name=None, anacondaCustomTrove=None, betaNag=None, mediaTemplateTrove=None, installLabelPath=None, anacondaTemplatesTrove=None, baseFileName=None, showMediaCheck=None, valueOf_=''):
supermod.applianceIsoImageType.__init__(self, maxIsoSize, autoResolve, bugsUrl, name, anacondaCustomTrove, betaNag, mediaTemplateTrove, installLabelPath, anacondaTemplatesTrove, baseFileName, showMediaCheck, valueOf_)
supermod.applianceIsoImageType.subclass = applianceIsoImageTypeSub
# end class applianceIsoImageTypeSub
class installableIsoImageTypeSub(supermod.installableIsoImageType):
def __init__(self, maxIsoSize=None, autoResolve=None, bugsUrl=None, name=None, anacondaCustomTrove=None, betaNag=None, mediaTemplateTrove=None, installLabelPath=None, anacondaTemplatesTrove=None, baseFileName=None, showMediaCheck=None, valueOf_=''):
supermod.installableIsoImageType.__init__(self, maxIsoSize, autoResolve, bugsUrl, name, anacondaCustomTrove, betaNag, mediaTemplateTrove, installLabelPath, anacondaTemplatesTrove, baseFileName, showMediaCheck, valueOf_)
supermod.installableIsoImageType.subclass = installableIsoImageTypeSub
# end class installableIsoImageTypeSub
class liveIsoImageTypeSub(supermod.liveIsoImageType):
def __init__(self, autoResolve=None, name=None, zisofs=None, baseFileName=None, unionfs=None, installLabelPath=None, valueOf_=''):
supermod.liveIsoImageType.__init__(self, autoResolve, name, zisofs, baseFileName, unionfs, installLabelPath, valueOf_)
supermod.liveIsoImageType.subclass = liveIsoImageTypeSub
# end class liveIsoImageTypeSub
class netbootImageTypeSub(supermod.netbootImageType):
def __init__(self, autoResolve=None, baseFileName=None, installLabelPath=None, name=None, valueOf_=''):
supermod.netbootImageType.__init__(self, autoResolve, baseFileName, installLabelPath, name, valueOf_)
supermod.netbootImageType.subclass = netbootImageTypeSub
# end class netbootImageTypeSub
class rawFsImageTypeSub(supermod.rawFsImageType):
def __init__(self, autoResolve=None, freespace=None, name=None, swapSize=None, baseFileName=None, installLabelPath=None, valueOf_=''):
supermod.rawFsImageType.__init__(self, autoResolve, freespace, name, swapSize, baseFileName, installLabelPath, valueOf_)
supermod.rawFsImageType.subclass = rawFsImageTypeSub
# end class rawFsImageTypeSub
class rawHdImageTypeSub(supermod.rawHdImageType):
def __init__(self, autoResolve=None, freespace=None, name=None, swapSize=None, baseFileName=None, installLabelPath=None, valueOf_=''):
supermod.rawHdImageType.__init__(self, autoResolve, freespace, name, swapSize, baseFileName, installLabelPath, valueOf_)
supermod.rawHdImageType.subclass = rawHdImageTypeSub
# end class rawHdImageTypeSub
class tarballImageTypeSub(supermod.tarballImageType):
def __init__(self, autoResolve=None, baseFileName=None, installLabelPath=None, name=None, swapSize=None, valueOf_=''):
supermod.tarballImageType.__init__(self, autoResolve, baseFileName, installLabelPath, name, swapSize, valueOf_)
supermod.tarballImageType.subclass = tarballImageTypeSub
# end class tarballImageTypeSub
class updateIsoImageTypeSub(supermod.updateIsoImageType):
def __init__(self, mediaTemplateTrove=None, baseFileName=None, valueOf_=''):
supermod.updateIsoImageType.__init__(self, mediaTemplateTrove, baseFileName, valueOf_)
supermod.updateIsoImageType.subclass = updateIsoImageTypeSub
# end class updateIsoImageTypeSub
class vhdImageTypeSub(supermod.vhdImageType):
def __init__(self, autoResolve=None, freespace=None, name=None, vhdDiskType=None, swapSize=None, baseFileName=None, installLabelPath=None, valueOf_=''):
supermod.vhdImageType.__init__(self, autoResolve, freespace, name, vhdDiskType, swapSize, baseFileName, installLabelPath, valueOf_)
supermod.vhdImageType.subclass = vhdImageTypeSub
# end class vhdImageTypeSub
class virtualIronImageTypeSub(supermod.virtualIronImageType):
def __init__(self, autoResolve=None, freespace=None, name=None, vhdDiskType=None, swapSize=None, baseFileName=None, installLabelPath=None, valueOf_=''):
supermod.virtualIronImageType.__init__(self, autoResolve, freespace, name, vhdDiskType, swapSize, baseFileName, installLabelPath, valueOf_)
supermod.virtualIronImageType.subclass = virtualIronImageTypeSub
# end class virtualIronImageTypeSub
class vmwareEsxImageTypeSub(supermod.vmwareEsxImageType):
def __init__(self, autoResolve=None, freespace=None, name=None, natNetworking=None, vmMemory=None, swapSize=None, installLabelPath=None, baseFileName=None, valueOf_=''):
supermod.vmwareEsxImageType.__init__(self, autoResolve, freespace, name, natNetworking, vmMemory, swapSize, installLabelPath, baseFileName, valueOf_)
supermod.vmwareEsxImageType.subclass = vmwareEsxImageTypeSub
# end class vmwareEsxImageTypeSub
class vmwareImageTypeSub(supermod.vmwareImageType):
def __init__(self, autoResolve=None, freespace=None, name=None, natNetworking=None, vmMemory=None, swapSize=None, diskAdapter=None, installLabelPath=None, baseFileName=None, vmSnapshots=None, valueOf_=''):
supermod.vmwareImageType.__init__(self, autoResolve, freespace, name, natNetworking, vmMemory, swapSize, diskAdapter, installLabelPath, baseFileName, vmSnapshots, valueOf_)
supermod.vmwareImageType.subclass = vmwareImageTypeSub
# end class vmwareImageTypeSub
class xenOvaImageTypeSub(supermod.xenOvaImageType):
def __init__(self, autoResolve=None, freespace=None, name=None, vmMemory=None, swapSize=None, baseFileName=None, installLabelPath=None, valueOf_=''):
supermod.xenOvaImageType.__init__(self, autoResolve, freespace, name, vmMemory, swapSize, baseFileName, installLabelPath, valueOf_)
supermod.xenOvaImageType.subclass = xenOvaImageTypeSub
# end class xenOvaImageTypeSub
class buildDefinitionTypeSub(supermod.buildDefinitionType):
def __init__(self, build_=None):
supermod.buildDefinitionType.__init__(self, build_)
supermod.buildDefinitionType.subclass = buildDefinitionTypeSub
# end class buildDefinitionTypeSub
class buildTypeSub(supermod.buildType):
def __init__(self, baseFlavor=None, name=None, amiImage=None, applianceIsoImage=None, installableIsoImage=None, liveIsoImage=None, netbootImage=None, rawFsImage=None, rawHdImage=None, tarballImage=None, updateIsoImage=None, vhdImage=None, virtualIronImage=None, vmwareImage=None, vmwareEsxImage=None, xenOvaImage=None, stage=None, imageGroup=None):
supermod.buildType.__init__(self, baseFlavor, name, amiImage, applianceIsoImage, installableIsoImage, liveIsoImage, netbootImage, rawFsImage, rawHdImage, tarballImage, updateIsoImage, vhdImage, virtualIronImage, vmwareImage, vmwareEsxImage, xenOvaImage, stage, imageGroup)
supermod.buildType.subclass = buildTypeSub
# end class buildTypeSub
class stageSub(supermod.stage):
def __init__(self, ref=None, valueOf_=''):
supermod.stage.__init__(self, ref, valueOf_)
supermod.stage.subclass = stageSub
# end class stageSub
class productDefinitionSub(supermod.productDefinition):
def __init__(self, version=None, productName=None, productShortname=None, productDescription=None, productVersion=None, productVersionDescription=None, conaryRepositoryHostname=None, conaryNamespace=None, imageGroup=None, baseFlavor=None, stages=None, upstreamSources=None, factorySources=None, buildDefinition=None):
supermod.productDefinition.__init__(self, version, productName, productShortname, productDescription, productVersion, productVersionDescription, conaryRepositoryHostname, conaryNamespace, imageGroup, baseFlavor, stages, upstreamSources, factorySources, buildDefinition)
supermod.productDefinition.subclass = productDefinitionSub
# end class productDefinitionSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.stageType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_="stageType",
## namespacedef_='')
doc = None
return rootObj
def parseString(inString):
doc = minidom.parseString(inString)
rootNode = doc.documentElement
rootObj = supermod.stageType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_="stageType",
## namespacedef_='')
return rootObj
def parseLiteral(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.stageType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('#from supers import *\n\n')
## sys.stdout.write('import supers as model_\n\n')
## sys.stdout.write('rootObj = model_.stageType(\n')
## rootObj.exportLiteral(sys.stdout, 0, name_="stageType")
## sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
root = parse(infilename)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
# pyflakes=ignore-file
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project Compiler
#
import os, sys, re, shutil, time, run, sgmllib, codecs, tempfile, subprocess
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
sys.path.append(os.path.abspath(os.path.join(template_dir,'..')))
sys.path.append(os.path.abspath(os.path.join(template_dir,'..', 'common')))
from tiapp import *
import jspacker
from csspacker import CSSPacker
import traceback
try:
import json
except:
import simplejson as json
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store', '_svn'];
ignoreDirs = ['iphone', 'android', 'mobileweb', '.git', '.svn', 'CVS'];
HEADER = """/**
* Appcelerator Titanium Mobile
* This is generated code. Do not modify. Your changes *will* be lost.
* Generated code is Copyright (c) 2009-2012 by Appcelerator, Inc.
* All Rights Reserved.
*/
#import <Foundation/Foundation.h>
"""
INTERFACE_HEADER= """
@interface ApplicationRouting : NSObject {
}
+ (NSData*) resolveAppAsset:(NSString*)path;
"""
IMPL_HEADER= """#import "ApplicationRouting.h"
extern NSData* filterDataInRange(NSData* thedata, NSRange range);
@implementation ApplicationRouting
"""
FOOTER ="""
@end
"""
MODULE_IMPL_HEADER = """#import "ApplicationMods.h"
@implementation ApplicationMods
+ (NSArray*) compiledMods
{
NSMutableArray *modules = [NSMutableArray array];
"""
class HTMLParser(sgmllib.SGMLParser):
def parse(self, s):
self.feed(s)
self.close()
def __init__(self, verbose=0):
sgmllib.SGMLParser.__init__(self, verbose)
self.scripts = []
def start_script(self, attributes):
for name, value in attributes:
if name == "src":
self.scripts.append(value)
def get_scripts(self):
return self.scripts
def read_module_properties(dir):
file = os.path.join(dir,'manifest')
dict = {}
if os.path.exists(file):
contents = open(file).read()
for line in contents.splitlines(True):
if line[0:1]=='#': continue
idx = line.find(':')
if idx==-1: continue
k=line[0:idx]
v=line[idx+1:].strip()
dict[k]=v
return dict
#Convert non-unicode obj to unicode encoded in utf-8.
def to_unicode_or_not(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
# Need to pre-parse xcconfig files to mangle variable names, and then
# dump them into a map so that we can re-assemble them later
def parse_xcconfig(xcconfig, moduleId, variables):
module_xcconfig = open(xcconfig)
new_xcconfig = ''
local_variables = {}
prefix = moduleId.upper().replace('.','_')
for line in module_xcconfig:
# Strip comments
comment = line.find('//')
if comment != -1:
line = line[0:comment]
# Generate new varname / value pairings
# The regular expression parses a valid line into components
# <var>=<value>
# <var>[<key>=<keyvalue>]=<value>
# e.g.
# OTHER_LDFLAGS=-framework EventKit
# OTHER_LDFLAGS[sdk=iphoneos4*]=-liconv
splitline = re.split('(([^\[=]+)(\[[^\]]+\])?) *=? *(.+)', line)
if len(splitline) >= 5:
varname = splitline[1]
value = splitline[4]
name = prefix + '_' + varname.strip()
name = re.sub(r'[^\w]', '_', name)
local_variables[varname] = name
new_xcconfig += name + '=' + value + '\n'
module_xcconfig.close()
# Update any local variable references with new varname
# and add variables to the global variables map
for (varname, name) in local_variables.iteritems():
source = '$(%s)' % varname
target = '$(%s)' % name
new_xcconfig = new_xcconfig.replace(source,target)
# Add new varname to the list
if not varname in variables:
variables[varname] = [name]
else:
variables[varname].append(name)
new_xcconfig += '\n'
return new_xcconfig
def softlink_resources(source,target,use_ignoreDirs=True):
if not os.path.exists(target):
os.makedirs(target)
for file in os.listdir(source):
if (use_ignoreDirs and (file in ignoreDirs)) or (file in ignoreFiles):
continue
from_ = to_unicode_or_not(os.path.join(source, file))
to_ = to_unicode_or_not(os.path.join(target, file))
if os.path.isdir(from_):
print "[DEBUG] creating: %s" % (to_)
softlink_resources(from_,to_,use_ignoreDirs)
else:
print "[DEBUG] linking: %s to %s" % (from_,to_)
if os.path.exists(to_):
if os.path.islink(to_):
os.remove(to_)
os.symlink(from_, to_)
else:
os.symlink(from_, to_)
def clear_application_routing(classes_dir):
impf = open(os.path.join(classes_dir,'ApplicationRouting.m'),'w+')
impf.write(HEADER)
impf.write(IMPL_HEADER)
impf.write("+ (NSData*) resolveAppAsset:(NSString*)path;\n{\n")
impf.write(" return nil;\n")
impf.write('}\n')
impf.write(FOOTER)
impf.close()
def softlink_for_simulator(project_dir,app_dir):
resources_dir = os.path.join(project_dir,'Resources')
iphone_resources_dir = os.path.join(resources_dir,'iphone')
iphone_platform_dir = os.path.join(project_dir,'platform','iphone')
softlink_resources(resources_dir,app_dir)
if(os.path.exists(iphone_resources_dir)):
softlink_resources(iphone_resources_dir,app_dir,False)
dest_mod_dir = os.path.join(app_dir,'modules')
src_mod_dir = os.path.join(project_dir,'modules')
if(os.path.exists(src_mod_dir)):
softlink_resources(src_mod_dir,dest_mod_dir)
src_mod_iphone_dir = os.path.join(src_mod_dir,'iphone')
if(os.path.exists(src_mod_iphone_dir)):
softlink_resources(os.path.join(project_dir,'modules','iphone'),dest_mod_dir,False)
iphone_classes_dir = os.path.join(project_dir,'build','iphone','Classes')
clear_application_routing(iphone_classes_dir)
#
# TODO/FIXME
#
# - encryptor
#
class Compiler(object):
def __init__(self, project_dir, appid, name, deploytype):
self.deploytype = deploytype
self.project_dir = project_dir
self.project_name = name
self.appid = appid
if deploytype != 'export-build' and deploytype != 'commonjs':
self.iphone_dir = os.path.join(project_dir,'build','iphone')
else:
self.iphone_dir = project_dir
self.classes_dir = os.path.join(self.iphone_dir,'Classes')
self.assets_dir = os.path.join(self.iphone_dir,'assets')
self.modules = []
self.modules_metadata = []
self.exports = []
# for now, these are required
self.defines = ['USE_TI_ANALYTICS','USE_TI_NETWORK','USE_TI_PLATFORM','USE_TI_UI', 'USE_TI_API']
def compileProject(self,xcode=False,devicefamily='ios',iphone_version='iphoneos',silent=False,sdk=None):
tiapp_xml = os.path.join(self.project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
if sdk is None:
sdk_version = os.path.basename(os.path.abspath(os.path.join(template_dir,'../')))
else:
sdk_version = sdk
if xcode:
app_name = os.environ['FULL_PRODUCT_NAME']
app_dir = os.path.join(os.environ['TARGET_BUILD_DIR'],os.environ['CONTENTS_FOLDER_PATH'])
else:
target = 'Debug'
if self.deploytype == 'production':
target = 'Release'
app_name = self.project_name+'.app'
app_folder_name = '%s-iphoneos' % target
app_dir = os.path.abspath(os.path.join(self.iphone_dir,'build',app_folder_name,app_name))
if not silent:
print "[INFO] Titanium SDK version: %s" % sdk_version
print "[INFO] iPhone Device family: %s" % devicefamily
print "[INFO] iPhone SDK version: %s" % iphone_version
if self.deploytype != 'export-build':
main_template_file = os.path.join(template_dir,'main.m')
main_template = codecs.open(main_template_file, encoding='utf-8').read()
main_template = main_template.replace('__PROJECT_NAME__',self.project_name)
main_template = main_template.replace('__PROJECT_ID__',self.appid)
main_template = main_template.replace('__DEPLOYTYPE__',self.deploytype)
main_template = main_template.replace('__APP_ID__',self.appid)
main_template = main_template.replace('__APP_ANALYTICS__',ti.properties['analytics'])
main_template = main_template.replace('__APP_PUBLISHER__',ti.properties['publisher'])
main_template = main_template.replace('__APP_URL__',ti.properties['url'])
main_template = main_template.replace('__APP_NAME__',ti.properties['name'])
main_template = main_template.replace('__APP_VERSION__',ti.properties['version'])
main_template = main_template.replace('__APP_DESCRIPTION__',ti.properties['description'])
main_template = main_template.replace('__APP_COPYRIGHT__',ti.properties['copyright'])
main_template = main_template.replace('__APP_GUID__',ti.properties['guid'])
main_template = main_template.replace('__APP_RESOURCE_DIR__','')
main_template_out = os.path.join(self.iphone_dir,'main.m')
main_file = codecs.open(main_template_out,'w+',encoding='utf-8')
main_file_contents = main_file.read()
if main_file_contents!=main_template:
main_file.write(main_template)
main_file.close()
resources_dir = os.path.join(self.project_dir,'Resources')
iphone_resources_dir = os.path.join(resources_dir,'iphone')
iphone_platform_dir = os.path.join(self.project_dir,'platform','iphone')
# copy in any resources in our module like icons
# NOTE: This means that any JS-only modules in the local project
# are hashed up and dumped into the export.
has_modules = False
missing_modules, modules, module_js = ([], [], [])
module_js_dir = os.path.join(self.project_dir,'modules')
if os.path.exists(module_js_dir):
for file in os.listdir(module_js_dir):
if file.endswith('.js'):
module_js.append({'from':os.path.join(module_js_dir,file),'to':os.path.join(app_dir,file),'path':'modules/'+file})
if self.deploytype != 'export-build':
# Have to load the module detection here, in order to
# prevent distributing even MORE stuff in export/transport
sys.path.append(os.path.join(template_dir,'../module'))
from module import ModuleDetector
detector = ModuleDetector(self.project_dir)
missing_modules, modules = detector.find_app_modules(ti, 'iphone', self.deploytype)
# we have to copy these even in simulator given the path difference
if os.path.exists(app_dir):
self.copy_resources([iphone_resources_dir],app_dir,False)
if os.path.exists(app_dir):
self.copy_resources([iphone_platform_dir],app_dir,False)
# generate the includes for all compiled modules
xcconfig_c = "// this is a generated file - DO NOT EDIT\n\n"
if len(modules) > 0:
mods = open(os.path.join(self.classes_dir,'ApplicationMods.m'),'w+')
variables = {}
mods.write(MODULE_IMPL_HEADER)
for module in modules:
if module.js:
# CommonJS module
module_js.append({'from': module.js, 'path': 'modules/' + os.path.basename(module.js)})
module_id = module.manifest.moduleid.lower()
module_name = module.manifest.name.lower()
module_version = module.manifest.version
module_guid = ''
module_licensekey = ''
if module.manifest.has_property('guid'):
module_guid = module.manifest.guid
if module.manifest.has_property('licensekey'):
module_licensekey = module.manifest.licensekey
self.modules_metadata.append({'guid':module_guid,'name':module_name,'id':module_id,'dir':module.path,'version':module_version,'licensekey':module_licensekey})
xcfile = module.get_resource('module.xcconfig')
if os.path.exists(xcfile):
xcconfig_contents = parse_xcconfig(xcfile, module_id, variables)
xcconfig_c += xcconfig_contents
xcfile = os.path.join(self.project_dir,'modules','iphone',"%s.xcconfig" % module_name)
if os.path.exists(xcfile):
xcconfig_contents = parse_xcconfig(xcfile, module_id, variables)
xcconfig_c += xcconfig_contents
mods.write(" [modules addObject:[NSDictionary dictionaryWithObjectsAndKeys:@\"%s\",@\"name\",@\"%s\",@\"moduleid\",@\"%s\",@\"version\",@\"%s\",@\"guid\",@\"%s\",@\"licensekey\",nil]];\n" % (module_name,module_id,module_version,module_guid,module_licensekey));
# Load export symbols from modules...
metadata_path = os.path.join(module.path, 'metadata.json')
if os.path.exists(metadata_path):
self.load_metadata(metadata_path)
mods.write(" return modules;\n")
mods.write("}\n")
mods.write(FOOTER)
mods.close()
for (name, values) in variables.iteritems():
xcconfig_c += name + '=$(inherited) '
for value in values:
xcconfig_c += '$(%s) ' % value
xcconfig_c += '\n'
has_modules = True
xcconfig = os.path.join(self.iphone_dir,"module.xcconfig")
make_xcc = True
if os.path.exists(xcconfig):
existing_xcc = open(xcconfig).read()
# only copy if different so we don't trigger re-compile in xcode
make_xcc = existing_xcc!=xcconfig_c
if make_xcc:
xcconfig = open(xcconfig,'w')
xcconfig.write(xcconfig_c)
xcconfig.close()
#endif deploytype != 'export-build'
else:
# ... And for exported projects, load export symbols from
# the 'metadata' dir.
metadata_dir = os.path.join(self.iphone_dir, 'metadata')
if os.path.isdir(metadata_dir):
for file in os.listdir(metadata_dir):
self.load_metadata(os.path.join(metadata_dir,file))
if self.deploytype=='simulator' or self.deploytype=='export':
shutil.copy(os.path.join(template_dir,'Classes','defines.h'),os.path.join(self.classes_dir,'defines.h'))
if self.deploytype!='development' or has_modules:
if os.path.exists(app_dir) and self.deploytype != 'development':
self.copy_resources([resources_dir],app_dir,self.deploytype != 'test',module_js)
if self.deploytype == 'production':
debugger_plist = os.path.join(app_dir,'debugger.plist')
if os.path.exists(debugger_plist):
os.remove(debugger_plist)
if self.deploytype!='development' and self.deploytype!='export':
defines_file = os.path.join(self.classes_dir, 'defines.h')
defines_header = open(defines_file,'w+')
defines_content = "// Warning: this is generated file. Do not modify!\n\n"
defines_content+= "#define TI_VERSION %s\n"%sdk_version
for sym in self.defines:
defines_content+="#define %s\n" % sym
if defines_content!=defines_header.read():
defines_header.write(defines_content)
defines_header.close()
# deploy any module image files
for module in self.modules:
img_dir = os.path.join(template_dir,'modules',module.lower(),'images')
print "[DEBUG] module image = %s" % img_dir
if not os.path.exists(img_dir): continue
dest_img_dir = os.path.join(app_dir,'modules',module.lower(),'images')
if not os.path.exists(dest_img_dir):
os.makedirs(dest_img_dir)
self.copy_resources([img_dir],dest_img_dir,False)
if self.deploytype!='development' and os.path.exists(app_dir):
# optimize PNGs - since we don't include them in the Resources of the xcodeproj
# the ones we copy in won't get optimized so we need to run it manually
# we can skip this on the simulator but should do it on device
dev_path = "/Developer"
# we need to ask xcode where the root path is
path = run.run(["/usr/bin/xcode-select","-print-path"],True,False)
if path:
dev_path = path.strip()
run.run(["%s/Platforms/iPhoneOS.platform/Developer/usr/bin/iphoneos-optimize"%dev_path,app_dir],False)
# remove empty directories
os.chdir(app_dir)
os.system("find . -type d -empty -delete")
else:
print "[INFO] Skipping JS compile, running from simulator"
if self.deploytype=='development':
softlink_for_simulator(self.project_dir,app_dir)
def compile_module(self):
appid_js_file = os.path.join(self.assets_dir, self.appid+'.js')
if not os.path.exists(appid_js_file):
appid_js_file = os.path.join(self.project_dir, '..', 'assets', self.appid+'.js')
root_asset = self.compile_commonjs_file(self.appid+'.js', appid_js_file)
js_files = []
for root, dirs, files in os.walk(self.assets_dir, True, None, True):
for file in [f for f in files if os.path.splitext(f)[1] == '.js']:
full_path = os.path.join(root, file)
self.compile_js_file(os.path.relpath(full_path, self.assets_dir), full_path, js_files)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
module_assets = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
# Clean up the generated assets
for file in js_files:
os.remove(os.path.join(self.assets_dir, file))
return (root_asset, module_assets)
def load_metadata(self, file):
module_metadata = open(file,'r')
metadata = json.load(module_metadata)
module_metadata.close()
for symbol in metadata['exports']:
self.add_symbol(symbol)
def add_symbol(self,api):
print "[DEBUG] detected symbol: %s" % api
curtoken = ''
tokens = api.split(".")
try:
self.modules.index(tokens[0])
except:
self.modules.append(tokens[0])
for token in tokens:
curtoken+=token+"."
symbol = 'USE_TI_%s' % (curtoken.replace('.create','').replace('.','').replace('-','_').upper())
try:
self.defines.index(symbol)
except:
self.defines.append(symbol)
def extract_tokens(self,sym,line):
# sloppy joe parsing coooode
# could be prettier and faster but it works and rather reliable
c = 0
tokens = []
search = sym + "."
size = len(search)
while True:
i = line.find(search,c)
if i < 0:
break
found = False
buf = ''
x = 0
for n in line[i+size:]:
# look for a terminal - this could probably be easier
if n in ['(',')','{','}','=',',',' ',':','!','[',']','+','*','/','~','^','%','\n','\t','\r']:
found = True
break
buf+=n
x+=1
tokens.append(buf)
if found:
c = i + x + 1
continue
break
return sorted(set(tokens))
def compile_js(self,file_contents):
for line in file_contents.split(';'):
for symbol in ('Titanium','Ti'):
for sym in self.extract_tokens(symbol,line):
self.add_symbol(sym)
self.exports.append(sym)
def process_html_files(self,data,source_root):
compile = []
if data.has_key('.js'):
for entry in data['.html']:
html_file = entry['from']
file_contents = open(os.path.expanduser(html_file)).read()
parser = HTMLParser()
parser.parse(file_contents)
# extract all our scripts that are dependencies and we
# don't compile these
scripts = parser.get_scripts()
if len(scripts) > 0:
js_files = data['.js']
for script in scripts:
# if a remote script, ignore
if script.startswith('http:') or script.startswith('https:'):
continue
if script.startswith('app://'):
script = script[6:]
# build a file relative to the html file
fullpath = os.path.abspath(os.path.join(os.path.dirname(html_file),script))
# remove this script from being compiled
for f in js_files:
if f['from']==fullpath:
# target it to be compiled
compile.append(f)
js_files.remove(f)
break
return compile
def compile_js_asset_file(self,path,file):
file_contents = open(os.path.expanduser(file)).read()
if self.deploytype == 'production' or self.deploytype == 'commonjs':
file_contents = jspacker.jsmin(file_contents)
file_contents = file_contents.replace('Titanium.','Ti.')
self.compile_js(file_contents)
path = os.path.join(self.assets_dir,path)
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
tfile = open(path,'w+')
tfile.write(file_contents)
tfile.close()
# TODO: We should remove this when we can "safely" say we no longer support
# versions prior to 2.1, and also change the module loader code in iOS to
# no longer check for moduleAsset.
def compile_commonjs_file(self,path,from_):
js_files = []
self.compile_js_file(path, from_, js_files)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
so = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
return so
def compile_js_file(self, path, from_, js_files):
print "[DEBUG] compiling: %s" % from_
path = path.replace('.','_')
self.compile_js_asset_file(path,from_)
js_files.append(path);
def copy_resources(self,sources,target,write_routing=True,module_js=[]):
js_files = []
if write_routing:
intf = open(os.path.join(self.classes_dir,'ApplicationRouting.h'),'w+')
impf = open(os.path.join(self.classes_dir,'ApplicationRouting.m'),'w+')
intf.write(HEADER)
intf.write(INTERFACE_HEADER)
impf.write(HEADER)
impf.write(IMPL_HEADER)
impf.write("+ (NSData*) resolveAppAsset:(NSString*)path;\n{\n")
if not os.path.exists(os.path.expanduser(target)):
os.makedirs(os.path.expanduser(target))
if not os.path.exists(self.assets_dir):
os.makedirs(self.assets_dir)
def compile_js_file(path,from_):
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) compiling: %s" % (hour, minute, second, from_)
path = path.replace('.','_')
self.compile_js_asset_file(path,from_)
js_files.append(path);
def compile_js_files():
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) packaging javascript" % (hour, minute, second)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
so = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
impf.write(so)
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) packaging finished" % (hour, minute, second)
def add_compiled_resources(source,target):
print "[DEBUG] copy resources from %s to %s" % (source,target)
compiled_targets = {}
for root, dirs, files in os.walk(source, True, None, True):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles:
continue
prefix = root[len(source):]
from_ = to_unicode_or_not(os.path.join(root, file))
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(os.path.split(to_)[0])
if not os.path.exists(to_directory):
os.makedirs(to_directory)
fp = os.path.splitext(file)
ext = fp[1]
if ext == '.jss': continue
if len(fp)>1 and ext in ['.html','.js','.css']:
path = prefix + os.sep + file
path = path[1:]
entry = {'path':path,'from':from_,'to':to_}
if compiled_targets.has_key(ext):
compiled_targets[ext].append(entry)
else:
compiled_targets[ext]=[entry]
if not (write_routing and len(fp)>1 and ext in ['.html','.js','.css']):
# only copy if different filesize or doesn't exist
if not os.path.exists(to_) or os.path.getsize(from_)!=os.path.getsize(to_):
print "[DEBUG] copying: %s to %s" % (from_,to_)
shutil.copyfile(from_, to_)
if compiled_targets.has_key('.html'):
compiled = self.process_html_files(compiled_targets,source)
if len(compiled) > 0:
for c in compiled:
from_ = c['from']
to_ = c['to']
path = c['path']
print "[DEBUG] copying: %s to %s" % (from_,to_)
file_contents = open(from_).read()
file_contents = jspacker.jsmin(file_contents)
file_contents = file_contents.replace('Titanium.','Ti.')
to = open(to_,'w')
to.write(file_contents)
to.close()
for ext in ('.css','.html'):
if compiled_targets.has_key(ext):
for css_file in compiled_targets[ext]:
from_ = css_file['from']
to_ = css_file['to']
print "[DEBUG] copying: %s to %s" % (from_,to_)
if path.endswith('.css'):
file_contents = open(from_).read()
packer = CSSPacker(file_contents)
file_contents = packer.pack()
to = open(to_,'w')
to.write(file_contents)
to.close()
else:
shutil.copyfile(from_, to_)
if compiled_targets.has_key('.js'):
for js_file in compiled_targets['.js']:
path = js_file['path']
from_ = js_file['from']
compile_js_file(path, from_)
# copy in any module assets
for metadata in self.modules_metadata:
tp_dir = os.path.join(metadata['dir'],'assets')
if not os.path.exists(tp_dir): continue
tp_id = metadata['id']
t = '%s/modules/%s' %(target,tp_id)
add_compiled_resources(tp_dir,t)
for source in sources:
add_compiled_resources(source,target)
for js_file in module_js:
compile_js_file(js_file['path'], js_file['from'])
if write_routing:
compile_js_files();
impf.write("\tNSNumber *index = [map objectForKey:path];\n")
impf.write("\tif (index == nil) { return nil; }\n")
impf.write("\treturn filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);\n")
impf.write('}\n')
intf.write(FOOTER)
impf.write(FOOTER)
intf.close()
impf.close()
if __name__ == "__main__":
argv = sys.argv
if len(argv) < 3:
print "[USAGE] %s <dir> <deploytype> [devicetype] [ios_version] [sdk_version]" % argv[0]
exit(1)
project_dir = argv[1]
deploytype = argv[2]
if deploytype == 'export-build':
xcode = True
else:
xcode = False
if len(argv) >= 4:
devicefamily = argv[3]
else:
devicefamily = 'unknown'
if len(argv) >= 5:
ios = argv[4]
else:
ios = 'unknown'
if len(argv) >= 6:
sdk = argv[5]
else:
sdk = None
tiapp_xml = os.path.join(project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
appid = ti.properties['id']
name = ti.properties['name']
c = Compiler(project_dir,appid,name,deploytype)
c.compileProject(xcode,devicefamily,ios,sdk=sdk)
|
|
#!/usr/bin/env python
# BEGIN_COPYRIGHT
#
# Copyright 2009-2015 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import sys
import os
import argparse
import logging
logging.basicConfig(level=logging.INFO)
import pydoop
import pydoop.hadut as hadut
import pydoop.hdfs as hdfs
import pydoop.test_support as pts
from timer import Timer
DEFAULT_SCRIPT = "../../examples/wordcount/bin/wordcount-full.py"
CONF = {
"mapred.map.tasks": "2",
"mapred.reduce.tasks": "2",
"mapred.job.name": "wordcount",
"hadoop.pipes.java.recordreader": "false",
"hadoop.pipes.java.recordwriter": "false"
}
DATASET_DIR = "dataset"
HADOOP_CONF_DIR = pydoop.hadoop_conf()
PREFIX = os.getenv("PREFIX", pts.get_wd_prefix())
LOCAL_FILE_PREFIX = "file:/"
HDFS_FILE_PREFIX = "hdfs:///"
def update_conf(args):
if args.D:
for kv_pair in args.D:
k, v = [_.strip() for _ in kv_pair.split("=")]
CONF[k] = v
if args.mappers:
CONF["mapred.map.tasks"] = args.mappers
if args.reducers:
CONF["mapred.reduce.tasks"] = args.reducers
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--script", metavar="script",
help="the script to launch")
parser.add_argument("--mappers", metavar="# mappers", type=int,
help="the number of mappers for your mapred job")
parser.add_argument("--reducers", metavar="# reducers", type=int,
help="number of reducers of your mapred job")
parser.add_argument("--dataset", metavar="max file size for the dataset",
help="set to generate the dataset", type=int)
parser.add_argument("-D", metavar="NAME=VALUE", action="append",
help="additional Hadoop configuration parameters")
return parser
def create_dataset(logger, max_file_size_in_mb=200):
logger.info("Creating the dataset")
INPUT_FILE = "../../examples/input/alice.txt"
if not os.path.exists(INPUT_FILE):
raise IOError("input file not found")
with open(INPUT_FILE) as f:
text = f.read()
base_text_file_length = len(text)
if not os.path.exists("dataset"):
os.mkdir("dataset")
step_factor = 2
step_file_length = 0
step_file_length_mb = 0
while step_file_length_mb < max_file_size_in_mb:
step_file_length = (step_file_length
if step_file_length > 0
else base_text_file_length) * step_factor
step_file_length_mb = int(step_file_length/1048576)
if step_file_length_mb == 0:
continue
filename = "dataset/{0}MB".format(step_file_length_mb)
logger.info(" ->generating: %s", filename)
with open(filename, "w") as f:
file_length = 0
while file_length < step_file_length:
f.write(text)
file_length += base_text_file_length
def main(argv):
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
with Timer() as total_time:
parser = make_parser()
args = parser.parse_args(argv)
if args.dataset:
print args.dataset
create_dataset(logger, args.dataset)
if args.script:
piped_code_file = args.script
else:
piped_code_file = DEFAULT_SCRIPT
if not os.path.exists(piped_code_file):
raise IOError("script {0} not found !!!".format(piped_code_file))
with open(piped_code_file) as f:
pipes_code = pts.add_sys_path(f.read())
dataset = [d for d in os.listdir("dataset") if d.endswith("MB")]
dataset.sort(cmp=lambda x, y: cmp(
int(x.replace("MB", "")), int(y.replace("MB", ""))
))
logger.info(" Uploading dataset: { %s }", ', '.join(dataset))
if not hadut.path_exists(os.path.join(DATASET_DIR)):
logger.info(" dataset folder created")
hdfs.mkdir(DATASET_DIR)
for data_filename in dataset:
source_path = os.path.join(DATASET_DIR, data_filename)
dest_path = os.path.join(DATASET_DIR, data_filename)
if not hadut.path_exists(os.path.join(DATASET_DIR, data_filename)):
logger.info(" -> uploading %s...", source_path)
hdfs.put(source_path, dest_path)
update_conf(args)
results = dict()
for data_input in dataset:
with Timer() as t:
runner = hadut.PipesRunner(prefix=PREFIX, logger=logger)
logger.info("Running the script %s with data input %s..",
piped_code_file, data_input)
data_input_path = os.path.join(DATASET_DIR, data_input)
runner.set_input(data_input_path, put=False)
runner.set_exe(pipes_code)
runner.run(properties=CONF, hadoop_conf_dir=HADOOP_CONF_DIR,
logger=logger)
res = runner.collect_output()
print data_input_path
local_wc = pts.LocalWordCount(data_input_path)
logging.info(local_wc.check(res))
#print res
#runner.clean()
results[data_input] = (t.secs, t.msecs)
print "\n\n RESULTs"
print "=" * (len(piped_code_file) + 15)
print " * script: {0}".format(piped_code_file)
print " * mappers: {0}".format(CONF["mapred.map.tasks"])
print " * reducers: {0}".format(CONF["mapred.reduce.tasks"])
print " * dataset: [{0}]".format(",".join(dataset))
print " * times (input -> secs):"
for data_input in dataset:
print " - {0} -> {1} secs.".format(
data_input, results[data_input][0]
)
print "\n => Total execution time: {0}".format(total_time.secs)
print "=" * (len(piped_code_file) + 15)
print "\n"
if __name__ == "__main__":
main(sys.argv[1:])
|
|
import struct
import unittest
import json
from manticore.platforms import evm
from manticore.core import state
from manticore.core.smtlib import Operators, ConstraintSet
import os
class EVMTest_SLOAD(unittest.TestCase):
_multiprocess_can_split_ = True
maxDiff = None
def _execute(self, new_vm):
last_returned = None
last_exception = None
try:
new_vm.execute()
except evm.Stop as e:
last_exception = "STOP"
except evm.NotEnoughGas:
last_exception = "OOG"
except evm.StackUnderflow:
last_exception = "INSUFFICIENT STACK"
except evm.InvalidOpcode:
last_exception = "INVALID"
except evm.SelfDestruct:
last_exception = "SUICIDED"
except evm.Return as e:
last_exception = "RETURN"
last_returned = e.data
except evm.Revert:
last_exception = "REVERT"
return last_exception, last_returned
def test_SLOAD_1(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = b"T"
storage = {
115792089237316195423570985008687907853269984665640564039457584007913129639935: 0
}
world.create_account(address=address, balance=balance, code=code, storage=storage)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"T"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_SLOAD_2(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = b"T"
storage = {0: 15589350798196297794172638215640352209663280458410}
world.create_account(address=address, balance=balance, code=code, storage=storage)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"T"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [15589350798196297794172638215640352209663280458410])
def test_SLOAD_3(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = b"T"
storage = {1: 17148285878015927573589902037204387430629608504251}
world.create_account(address=address, balance=balance, code=code, storage=storage)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"T"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [17148285878015927573589902037204387430629608504251])
def test_SLOAD_4(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = b"T"
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952: 0}
world.create_account(address=address, balance=balance, code=code, storage=storage)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"T"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_SLOAD_5(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = b"T"
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263: 0}
world.create_account(address=address, balance=balance, code=code, storage=storage)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"T"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_SLOAD_6(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = b"T"
storage = {16: 0}
world.create_account(address=address, balance=balance, code=code, storage=storage)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"T"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_SLOAD_7(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = b"T"
storage = {32: 0}
world.create_account(address=address, balance=balance, code=code, storage=storage)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"T"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_SLOAD_8(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = b"T"
storage = {48: 0}
world.create_account(address=address, balance=balance, code=code, storage=storage)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"T"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_SLOAD_9(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = b"T"
storage = {6089590155545428825848686802984512581899718912: 0}
world.create_account(address=address, balance=balance, code=code, storage=storage)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"T"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/frames/tree_reduce.py
# Helper module to "Reduce" tree using frames
# @see Ostap::DataFrame
# @see ROOT::RDataFrame
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2018-06-16
# =============================================================================
"""Helper module to ``reduce'' tree using frames
- see Ostap.DataFrame
- see ROOT.ROOT.RDataFrame
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "Vanya BELYAEV Ivan.Belyaev@itep.ru"
__date__ = "2011-06-07"
__all__ = (
'ReduceTree' ,
'reduce' ,
)
# =============================================================================
import ROOT, os
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger( 'ostap.frames.tree_reduce' )
else : logger = getLogger( __name__ )
# =============================================================================
logger.debug ( "``Reduce'' TTree using ROOT::RDataFrame object")
# =============================================================================
import ostap.trees.trees
from ostap.core.core import cpp, Ostap
from ostap.utils.cleanup import CleanUp
# =============================================================================
## @class ReduceTree
# Reduce TTree object using intermediate (temporary
# @code
# tree = ...
# r = ReduceTree ( tree , cuts , [ 'px', 'py', 'pz' ] , 'new_file.root' )
# reduced = t.tree
# @endcode
class ReduceTree(CleanUp):
"""Reduce ROOT.TTree object
>>> tree = ...
>>> r = ReduceTree ( tree , cuts , [ 'px', 'py', 'pz' ]
>>> reduced = r.tree
"""
def __init__ ( self ,
chain , ## input TChain/TTree
selection = {} , ## selection/cuts
save_vars = () , ## list of variables to save
new_vars = {} , ## new variables
no_vars = () , ## exclude these variables
##
output = '' , ## output file name
name = '' , ## the name
addselvars = False , ## add varibles from selections?
tmp_keep = False , ## keep the temporary file
silent = False ): ## silent processing
from ostap.frames.frames import DataFrame
frame = DataFrame ( chain )
report = None
self.__frame_main = frame
if not silent :
pbar = frame.ProgressBar ( len ( chain ) )
nvars = []
## new variables
for nv in new_vars :
frame = frame.Define ( nv , new_vars [ nv] )
nvars.append ( nv )
from ostap.core.ostap_types import ( string_types ,
listlike_types ,
dictlike_types )
cut_types = string_types + ( ROOT.TCut , )
Lmax = 30
selections = []
if selection and isinstance ( selection , cut_types ) :
ss = str ( selection ).strip()
if len ( ss ) < Lmax : filter_name = ss
else : filter_name = 'SELECTION'
frame = frame.Filter ( ss , filter_name )
selections.append ( ss )
elif selection and isinstance ( selection , dictlike_types ) :
for filter_name in selection :
s = selection [ filter_name ]
assert isinstance ( s , cut_types ),\
'Invalid selection type %s/%s' % ( s , type ( s ) )
ss = str ( s ).strip()
frame = frame.Filter ( ss , str ( filter_name ) )
selections.append ( ss )
elif selection and isinstance ( selection , listlike_types ) :
for i , s in enumerate ( selection ) :
assert isinstance ( s , cut_types ),\
'Invalid selection type %s/%s' % ( s , type ( s ) )
ss = str( s ).strip()
##
if len ( ss ) < Lmax : filter_name = ss
else : filter_name = 'SELECTION%d' % i
#
frame = frame.Filter ( ss , filter_name )
selections.append ( ss )
elif selection :
raise TypeError('Invalid selection type %s/%s' % ( selection , type ( selection ) ) )
if not output :
output = self.tempfile ( prefix = 'ostap-frame-' , suffix = '.root' )
## logger.debug ( 'ReduceTree: output file is %s' % output )
if not tmp_keep : self.trash.add ( output )
## if selections : report = frame.Report()
if selections and addselvars :
bvars = chain.the_variables ( selections )
save_vars = list ( bvars ) + [ v for v in save_vars if not v in bvars ]
save_vars = tuple ( save_vars )
## exclude some variables
if no_vars and not save_vars :
bvars = list ( chain.branches () )
all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ]
save_vars = tuple ( [ v for v in all_vars if not v in no_vars ] )
elif no_vars :
bvars = chain.the_variables ( *save_vars )
all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ]
save_vars = tuple ( [ v for v in all_vars if not v in no_vars ] )
nb_ = len ( chain.branches () )
ne_ = len ( chain )
## chain name:
## FIXME!
# cname = chain.GetName() ## produces ROOT error
if not name :
_ , _ , cname = chain.GetName().rpartition ( '/' )
name = '%s_reduced' % cname
self.__name = name
if not save_vars :
snapshot = frame.Snapshot ( name , output )
else :
bvars = chain.the_variables ( *save_vars )
all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ]
from ostap.core.core import strings as _strings
all_vars = _strings ( all_vars )
snapshot = frame.Snapshot ( name , output , all_vars )
assert os.path.exists ( output ) and\
os.path.isfile ( output ) , 'Invalid file %s' % fname
self.__chain = ROOT.TChain ( name )
self.__chain.Add ( output )
self.__output = output
self.__report = 'Tree -> Frame -> Tree filter/transformation'
self.__table = []
if report :
from ostap.frames.frames import report_print, report_as_table
title = self.__report
self.__report += '\n%s' % report_print ( report , title , '# ')
self.__table = report_as_table ( report )
fs = os.path.getsize ( self.__output )
gb , r = divmod ( fs , 1024 * 1024 * 1024 )
mb , r = divmod ( r , 1024 * 1024 )
kb , r = divmod ( r , 1024 )
if gb : fs = '%.1fGB' % ( float ( fs ) / 1024 / 1024 / 1024 )
elif mb : fs = '%.1fMB' % ( float ( fs ) / 1024 / 1024 )
elif kb : fs = '%.1fkB' % ( float ( fs ) / 1024 )
else : fs = '%sB' % fs
nb = len ( self.__chain.branches () )
ne = len ( self.__chain )
self.__report += '\n# Reduce %d -> %d branches, %d -> %d entries' % ( nb_ , nb , ne_ , ne )
self.__report += '\n# Output:%s size:%s' % ( self.__output , fs )
self.__report += '\n# %s' % str ( self.__chain )
del self.__frame_main
def __str__ ( self ) : return self.__report
def __repr__ ( self ) : return self.__report
@property
def output ( self ) :
"""``output'' : the output file name"""
return self.__output
@property
def chain ( self ) :
"""``chain'': the reduced chain/tree (same as tree)"""
return self.__chain
@property
def name ( self ) :
"""``name'' : the output chain name"""
return self.__name
@property
def tree ( self ) :
"""``tree'': the reduced chain/tree (same as chain)"""
return self.__chain
@property
def table ( self ) :
"""``table'' : get the statitics as table"""
return self.__table
@property
def report ( self ) :
"""``report'' : get the statitics report"""
return self.__report
# ===============================================================================
## Powerful method to reduce/tranform the tree/chain.
# It relies on Ostap.DataFrame ( alias for ROOT.ROOT.DataFrame) and allows
# - filter entries from TTree/TChain
# - add new colums
# - remove unnesessary columns
# @code
# tree = ....
# reduced1 = tree.reduce ( 'pt>1' )
# reduced2 = tree.reduce ( 'pt>1' , save_vars = [ 'p', 'pt' ,'q' ] )
# reduced3 = tree.reduce ( 'pt>1' , no_vars = [ 'Q', 'z' ,'x' ] )
# reduced4 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } )
# reduced5 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } , output = 'OUTPUT.root' )
# @endcode
# @see Ostap::DataFrame
# @see ROOT::RDataFrame
def reduce ( tree ,
selection ,
save_vars = () ,
new_vars = {} ,
no_vars = () ,
output = '' ,
name = '' ,
addselvars = False ,
silent = False ) :
""" Powerful method to reduce/tranform the tree/chain.
It relies on Ostap.DataFrame ( alias for ROOT.ROOT.DataFrame) and allows
- filter entries from TTree/TChain
- add new colums
- remove unnesessary columns
>>> tree = ....
>>> reduced1 = tree.reduce ( 'pt>1' )
>>> reduced2 = tree.reduce ( 'pt>1' , vars = [ 'p', 'pt' ,'q' ] )
>>> reduced3 = tree.reduce ( 'pt>1' , no_vars = [ 'Q', 'z' ,'x' ] )
>>> reduced4 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } )
>>> reduced5 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } , output = 'OUTPUT.root' )
"""
nb0 = len ( tree.branches() )
ne0 = len ( tree )
reduced = ReduceTree ( tree ,
selection = selection ,
save_vars = save_vars ,
new_vars = new_vars ,
no_vars = no_vars ,
output = output ,
name = name ,
addselvars = addselvars ,
tmp_keep = True ,
silent = silent )
from ostap.trees.trees import Chain
result = Chain ( reduced.chain )
if not output : result.trash.add ( reduced.output )
if silent :
nb = len ( result.chain.branches() )
ne = len ( result.chain )
f = float ( nb0 * ne0 ) / ( nb * ne )
logger.info ( 'reduce: (%dx%d) -> (%dx%d) %.1f (branches x entries) ' % ( nb0 , ne0 , nb , ne , f ) )
return result
ROOT.TTree. reduce = reduce
# =============================================================================
_decorated_classes_ = (
ROOT.TTree ,
)
_new_methods_ = (
ROOT.TTree.reduce ,
)
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
# The END
# =============================================================================
|
|
#!/usr/bin/env python
#
# Copyright (C) 2015 Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions, Variable
import string
import random
import itertools
import math
from struct import *
import pdb
class TemplateGenerator(Actions):
def _getToken(self):
requestBits = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
token = 1
for c in range(10):
if(ord(self.magic_page[self.magic_idx+c]) % requestBits[c] == 0):
token = token * requestBits[c]
self.magic_idx += 10
while (token < 10000000):
token = token * 10
while (token > 99999999):
token = token / 10
return str(token)
def start(self):
#self.delay(100)
self.magic_idx = 0
reg_ex = '(.*)\?'
certificate = Variable('certificate')
certificate.set_re(reg_ex, group=1)
self.state['certificate'] = certificate
self.state['token'] = ''
revoked_token = Variable('revokedToken')
revoked_token.set_re(reg_ex, group=1)
self.state['revoked_token'] = revoked_token
revoked_cert = Variable('revokedCert')
revoked_cert.set_re(reg_ex, group=1)
self.state['revoked_cert'] = revoked_cert
issuer = Variable('issuer')
reg_ex = "Issuer=(.*);"
issuer.set_re(reg_ex, group=1)
self.state['issuer'] = issuer
signature = Variable('signature')
self.state['signature'] = signature
expiration = Variable('expiration')
self.state['expiration'] = expiration
self.id = random.randint(1,10000)
pass
def enrollCert(self):
command = "enroll"
auth_type = "PeerCert"
self.id += 1
id_str = str(self.id)
subject = ''.join(random.choice(string.ascii_letters + " ") for _ in range(20))
key = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(9))
use = "enroll:reenroll:crls"
certificate = "Subject=" + subject + ";Key=" + key + ";Use=" + use
body = certificate
message1 = command + "," + auth_type + "," + id_str + ","
message2 = "," + body + "!"
issuer_hdr = "Issuer="
subject_hdr = ";Subject="
key_hdr = ";Key="
signature_hdr = ";Signature="
expiration_hdr = ";Expiration="
use_hdr = ";Use="
self.write(message1)
self.write(issuer_hdr)
self.write(self.state['issuer'])
self.write(subject_hdr)
self.write(self.state['subject'])
self.write(key_hdr)
self.write(self.state['key'])
self.write(signature_hdr)
self.write(self.state['signature'])
self.write(expiration_hdr)
self.write(self.state['expiration'])
self.write(use_hdr)
self.write(self.state['certUse'])
self.write(message2)
self.read(delim=";", assign=self.state['issuer'])
self.state['subject'] = subject
match = "Subject=" + subject + ";"
self.read(delim=";", expect=match)
self.state['key'] = key
match = "Key=" + key + ";"
self.read(delim=";", expect=match)
reg_ex = "Signature=(.*);"
self.state['signature'].set_re(reg_ex, group=1)
self.read(delim=";", assign=self.state['signature'])
reg_ex = "Expiration=(.*);"
self.state['expiration'].set_re(reg_ex, group=1)
self.read(delim=";", assign=self.state['expiration'])
self.state['certUse'] = use
match = "Use=" + use + "?"
self.read(delim="?", expect=match)
def requestTokenUP(self):
command = "newTokens"
auth_type = "UserPass"
self.id += 1
id_str = str(self.id)
user = "user"
password = "pass"
credential = user + "/" + password
use = "enroll:newTokens:refreshToken:revokeToken"
body = "Use=" + use
message = command + "," + auth_type + "," + id_str + "," + credential + "," + body + "!"
self.write(message)
self.state['token'] = self._getToken()
token_re = "Token=" + self.state['token'] + ";Expiration=(.*);Use=enroll:newTokens:refreshToken:revokeToken?"
self.read(delim="?", expect=token_re, expect_format='pcre')
def requestTokenToken(self):
command = "newTokens"
auth_type = "Token"
self.id += 1
id_str = str(self.id)
use = "enroll:newTokens:refreshToken:revokeToken"
body = "Use=" + use
message1 = command + "," + auth_type + "," + id_str + ",Token=" + self.state['token'] + ";Expiration=100;Use=enroll:newTokens:refreshToken:revokeToken"
message2 = "," + body + "!"
self.write(message1)
self.write(message2)
self.state['token'] = self._getToken()
token_re = "Token=" + self.state['token'] + ";Expiration=(.*);Use=enroll:newTokens:refreshToken:revokeToken?"
self.read(delim="?", expect=token_re, expect_format='pcre')
def refreshTokenToken(self):
command = "refreshToken"
auth_type = "Token"
self.id += 1
id_str = str(self.id)
use = "enroll:newTokens:refreshToken:revokeToken"
message1 = command + "," + auth_type + "," + id_str + ",Token=" + self.state['token'] + ";Expiration=100;Use=enroll:newTokens:refreshToken:revokeToken"
message2 = ","
body = "Token=" + self.state['token'] + ";Expiration=100;Use=enroll:newTokens:refreshToken:revokeToken"
self.write(message1)
self.write(message2)
self.write(body)
self.write("!")
token_re = "Token=" + self.state['token'] + ";Expiration=(.*);Use=enroll:newTokens:refreshToken:revokeToken?"
self.read(delim="?", expect=token_re, expect_format='pcre')
def revokeTokenToken(self):
command = "newTokens"
auth_type = "Token"
self.id += 1
id_str = str(self.id)
use = "enroll:newTokens:refreshToken:revokeToken"
body = "Use=" + use
message1 = command + "," + auth_type + "," + id_str + ",Token=" + self.state['token'] + ";Expiration=100;Use=enroll:newTokens:refreshToken:revokeToken"
message2 = "," + body + "!"
self.write(message1)
self.write(message2)
self.state['revoked_token'] = self._getToken()
token_re = "Token=" + self.state['revoked_token'] + ";Expiration=(.*);Use=enroll:newTokens:refreshToken:revokeToken?"
self.read(delim="?", expect=token_re, expect_format='pcre')
command = "revokeT"
auth_type = "Token"
self.id += 1
id_str = str(self.id)
body = "Token=" + self.state['revoked_token'] + ";Expiration=100;Use=enroll:newTokens:refreshToken:revokeToken"
message1 = command + "," + auth_type + "," + id_str + ",Token=" + self.state['revoked_token'] + ";Expiration=100;Use=enroll:newTokens:refreshToken:revokeToken"
message2 = ","
self.write(message1)
self.write(message2)
self.write(body)
self.write("!")
response = "Response=Token Revoked?"
self.read(delim="?", expect=response)
def enrollToken(self):
command = "enroll"
auth_type = "Token"
self.id += 1
id_str = str(self.id)
subject = ''.join(random.choice(string.ascii_letters + " ") for _ in range(20))
key = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(9))
use = "enroll:reenroll:crls"
certificate = "Subject=" + subject + ";Key=" + key + ";Use=" + use
body = certificate
message1 = command + "," + auth_type + "," + id_str + ",Token=" + self.state['token'] + ";Expiration=100;Use=enroll:newTokens:refreshToken:revokeToken"
message2 = "," + body + "!"
self.write(message1)
self.write(message2)
self.read(delim=";", assign=self.state['issuer'])
self.state['subject'] = subject
match = "Subject=" + subject + ";"
self.read(delim=";", expect=match)
self.state['key'] = key
match = "Key=" + key + ";"
self.read(delim=";", expect=match)
reg_ex = "Signature=(.*);"
self.state['signature'].set_re(reg_ex, group=1)
self.read(delim=";", assign=self.state['signature'])
reg_ex = "Expiration=(.*);"
self.state['expiration'].set_re(reg_ex, group=1)
self.read(delim=";", assign=self.state['expiration'])
self.state['certUse'] = use
match = "Use=" + use + "?"
self.read(delim="?", expect=match)
def reenrollCert(self):
command = "reenroll"
auth_type = "PeerCert"
self.id += 1
id_str = str(self.id)
message1 = command + "," + auth_type + "," + id_str + ","
message2 = ","
self.write(message1)
issuer_hdr = "Issuer="
subject_hdr = ";Subject="
key_hdr = ";Key="
signature_hdr = ";Signature="
expiration_hdr = ";Expiration="
use_hdr = ";Use="
self.write(issuer_hdr)
self.write(self.state['issuer'])
self.write(subject_hdr)
self.write(self.state['subject'])
self.write(key_hdr)
self.write(self.state['key'])
self.write(signature_hdr)
self.write(self.state['signature'])
self.write(expiration_hdr)
self.write(self.state['expiration'])
self.write(use_hdr)
self.write(self.state['certUse'])
self.write(message2)
self.write(issuer_hdr)
self.write(self.state['issuer'])
self.write(subject_hdr)
self.write(self.state['subject'])
self.write(key_hdr)
self.write(self.state['key'])
self.write(signature_hdr)
self.write(self.state['signature'])
self.write(expiration_hdr)
self.write(self.state['expiration'])
self.write(use_hdr)
self.write(self.state['certUse'])
self.write("!")
self.read(delim=";", assign=self.state['issuer'])
subject = self.state['subject']
match = "Subject=" + subject + ";"
self.read(delim=";", expect=match)
key = self.state['key']
match = "Key=" + key + ";"
self.read(delim=";", expect=match)
reg_ex = "Signature=(.*);"
self.state['signature'].set_re(reg_ex, group=1)
self.read(delim=";", assign=self.state['signature'])
reg_ex = "Expiration=(.*);"
self.state['expiration'].set_re(reg_ex, group=1)
self.read(delim=";", assign=self.state['expiration'])
use = self.state['certUse']
match = "Use=" + use + "?"
self.read(delim="?", expect=match)
def revokeCert(self):
command = "enroll"
auth_type = "PeerCert"
self.id += 1
id_str = str(self.id)
subject = ''.join(random.choice(string.ascii_letters + " ") for _ in range(20))
key = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(9))
use = "enroll:reenroll:crls"
certificate = "Subject=" + subject + ";Key=" + key + ";Use=" + use
body = certificate
message1 = command + "," + auth_type + "," + id_str + ","
message2 = "," + body + "!"
self.write(message1)
issuer_hdr = "Issuer="
subject_hdr = ";Subject="
key_hdr = ";Key="
signature_hdr = ";Signature="
expiration_hdr = ";Expiration="
use_hdr = ";Use="
self.write(issuer_hdr)
self.write(self.state['issuer'])
self.write(subject_hdr)
self.write(self.state['subject'])
self.write(key_hdr)
self.write(self.state['key'])
self.write(signature_hdr)
self.write(self.state['signature'])
self.write(expiration_hdr)
self.write(self.state['expiration'])
self.write(use_hdr)
self.write(self.state['certUse'])
self.write(message2)
self.read(delim="?", assign=self.state['revoked_cert'])
command = "revoke"
auth_type = "PeerCert"
self.id += 1
id_str = str(self.id)
body = self.state['revoked_cert']
message1 = command + "," + auth_type + "," + id_str + ","
message2 = ","
self.write(message1)
self.write(self.state['revoked_cert'])
self.write(message2)
self.write(body)
self.write("!")
self.read(delim="?", assign=self.state['revoked_cert'])
def pov2(self):
command = "newTokens"
auth_type = "UserPass"
id_str = "4175" #str(self.id)
user = "user"
password = "pass"
credential = user + "/" + password
payload = "AAAABBBBCCCCDDDDAAAABBBBCCCCD"
body = "Use=" + payload
message = command + "," + auth_type + "," + id_str + "," + credential + "," + body + "!"
self.write(message)
flag_re = r'Token=[0-9]{8};Expiration=[0-9]*;Use=AAAABBBBCCCCDDDDAAAABBBBCCCCD\?(.*)'
flag = Variable('flag')
flag.set_re(flag_re, group=1)
self.read(length=104, assign=flag)
def quit(self):
self.write("quit, , , !")
return -1
|
|
#!/usr/bin/env python
""" generated source for module EWrapperMsgGenerator """
#
# Original file copyright original author(s).
# This file copyright Troy Melhase, troy@gci.net.
#
# WARNING: all changes to this file will be lost.
from ib.ext.AnyWrapperMsgGenerator import AnyWrapperMsgGenerator
from ib.ext.EClientSocket import EClientSocket
from ib.ext.MarketDataType import MarketDataType
from ib.ext.TickType import TickType
from ib.ext.Util import Util
from ib.lib import Double
# package: com.ib.client
class EWrapperMsgGenerator(AnyWrapperMsgGenerator):
""" generated source for class EWrapperMsgGenerator """
SCANNER_PARAMETERS = "SCANNER PARAMETERS:"
FINANCIAL_ADVISOR = "FA:"
@classmethod
def tickPrice(cls, tickerId, field, price, canAutoExecute):
""" generated source for method tickPrice """
return "id=" + str(tickerId) + " " + TickType.getField(field) + "=" + str(price) + " " + (" canAutoExecute" if (canAutoExecute != 0) else " noAutoExecute")
@classmethod
def tickSize(cls, tickerId, field, size):
""" generated source for method tickSize """
return "id=" + str(tickerId) + " " + TickType.getField(field) + "=" + str(size)
@classmethod
def tickOptionComputation(cls, tickerId, field, impliedVol, delta, optPrice, pvDividend, gamma, vega, theta, undPrice):
""" generated source for method tickOptionComputation """
toAdd = "id=" + str(tickerId) + " " + TickType.getField(field) \
+ ": vol = " + (str(impliedVol) if (impliedVol >= 0 and impliedVol != Double.MAX_VALUE) else "N/A") \
+ " delta = " + (str(delta) if (abs(delta) <= 1) else "N/A") \
+ " gamma = " + (str(gamma) if (abs(gamma) <= 1) else "N/A") \
+ " vega = " + (str(vega) if (abs(vega) <= 1) else "N/A") \
+ " theta = " + (str(theta) if (abs(theta) <= 1) else "N/A") \
+ " optPrice = " + (str(optPrice) if (optPrice >= 0 and optPrice != Double.MAX_VALUE) else "N/A") \
+ " pvDividend = " + (str(pvDividend) if (pvDividend >= 0 and pvDividend != Double.MAX_VALUE) else "N/A") \
+ " undPrice = " + (str(undPrice) if (undPrice >= 0 and undPrice != Double.MAX_VALUE) else "N/A")
return toAdd
@classmethod
def tickGeneric(cls, tickerId, tickType, value):
""" generated source for method tickGeneric """
return "id=" + str(tickerId) + " " + TickType.getField(tickType) + "=" + str(value)
@classmethod
def tickString(cls, tickerId, tickType, value):
""" generated source for method tickString """
return "id=" + str(tickerId) + " " + TickType.getField(tickType) + "=" + str(value)
@classmethod
def tickEFP(cls, tickerId, tickType, basisPoints, formattedBasisPoints, impliedFuture, holdDays, futureExpiry, dividendImpact, dividendsToExpiry):
""" generated source for method tickEFP """
return "id=" + str(tickerId) + " " + TickType.getField(tickType) \
+ ": basisPoints = " + str(basisPoints) + "/" + formattedBasisPoints \
+ " impliedFuture = " + str(impliedFuture) + " holdDays = " + str(holdDays) \
+ " futureExpiry = " + futureExpiry + " dividendImpact = " + str(dividendImpact) \
+ " dividends to expiry = " + str(dividendsToExpiry)
@classmethod
def orderStatus(cls, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld):
""" generated source for method orderStatus """
return "order status: orderId=" + str(orderId) + " clientId=" + str(clientId) \
+ " permId=" + str(permId) + " status=" + status + " filled=" + str(filled) \
+ " remaining=" + str(remaining) + " avgFillPrice=" + str(avgFillPrice) \
+ " lastFillPrice=" + str(lastFillPrice) + " parent Id=" + str(parentId) \
+ " whyHeld=" + whyHeld
@classmethod
def openOrder(cls, orderId, contract, order, orderState):
""" generated source for method openOrder """
msg = "open order: orderId=" + str(orderId) \
+ " action=" + str(order.m_action) \
+ " quantity=" + str(order.m_totalQuantity) \
+ " conid=" + str(contract.m_conId) \
+ " symbol=" + str(contract.m_symbol) \
+ " secType=" + str(contract.m_secType) \
+ " expiry=" + str(contract.m_expiry) \
+ " strike=" + str(contract.m_strike) \
+ " right=" + str(contract.m_right) \
+ " multiplier=" + str(contract.m_multiplier) \
+ " exchange=" + str(contract.m_exchange) \
+ " primaryExch=" + str(contract.m_primaryExch) \
+ " currency=" + str(contract.m_currency) \
+ " localSymbol=" + str(contract.m_localSymbol) \
+ " tradingClass=" + str(contract.m_tradingClass) \
+ " type=" + str(order.m_orderType) \
+ " lmtPrice=" + Util.DoubleMaxString(order.m_lmtPrice) \
+ " auxPrice=" + Util.DoubleMaxString(order.m_auxPrice) \
+ " TIF=" + str(order.m_tif) \
+ " localSymbol=" + str(contract.m_localSymbol) \
+ " client Id=" + str(order.m_clientId) \
+ " parent Id=" + str(order.m_parentId) \
+ " permId=" + str(order.m_permId) \
+ " outsideRth=" + str(order.m_outsideRth) \
+ " hidden=" + str(order.m_hidden) \
+ " discretionaryAmt=" + str(order.m_discretionaryAmt) \
+ " displaySize=" + str(order.m_displaySize) \
+ " triggerMethod=" + str(order.m_triggerMethod) \
+ " goodAfterTime=" + str(order.m_goodAfterTime) \
+ " goodTillDate=" + str(order.m_goodTillDate) \
+ " faGroup=" + str(order.m_faGroup) \
+ " faMethod=" + str(order.m_faMethod) \
+ " faPercentage=" + str(order.m_faPercentage) \
+ " faProfile=" + str(order.m_faProfile) \
+ " shortSaleSlot=" + str(order.m_shortSaleSlot) \
+ " designatedLocation=" + str(order.m_designatedLocation) \
+ " exemptCode=" + str(order.m_exemptCode) \
+ " ocaGroup=" + str(order.m_ocaGroup) \
+ " ocaType=" + str(order.m_ocaType) \
+ " rule80A=" + str(order.m_rule80A) \
+ " allOrNone=" + str(order.m_allOrNone) \
+ " minQty=" + Util.IntMaxString(order.m_minQty) \
+ " percentOffset=" + Util.DoubleMaxString(order.m_percentOffset) \
+ " eTradeOnly=" + order.m_eTradeOnly \
+ " firmQuoteOnly=" + str(order.m_firmQuoteOnly) \
+ " nbboPriceCap=" + Util.DoubleMaxString(order.m_nbboPriceCap) \
+ " optOutSmartRouting=" + str(order.m_optOutSmartRouting) \
+ " auctionStrategy=" + str(order.m_auctionStrategy) \
+ " startingPrice=" + Util.DoubleMaxString(order.m_startingPrice) \
+ " stockRefPrice=" + Util.DoubleMaxString(order.m_stockRefPrice) \
+ " delta=" + Util.DoubleMaxString(order.m_delta) \
+ " stockRangeLower=" + Util.DoubleMaxString(order.m_stockRangeLower) \
+ " stockRangeUpper=" + Util.DoubleMaxString(order.m_stockRangeUpper) \
+ " volatility=" + Util.DoubleMaxString(order.m_volatility) \
+ " volatilityType=" + str(order.m_volatilityType) \
+ " deltaNeutralOrderType=" + str(order.m_deltaNeutralOrderType) \
+ " deltaNeutralAuxPrice=" + Util.DoubleMaxString(order.m_deltaNeutralAuxPrice) \
+ " deltaNeutralConId=" + str(order.m_deltaNeutralConId) \
+ " deltaNeutralSettlingFirm=" + str(order.m_deltaNeutralSettlingFirm) \
+ " deltaNeutralClearingAccount=" + str(order.m_deltaNeutralClearingAccount) \
+ " deltaNeutralClearingIntent=" + str(order.m_deltaNeutralClearingIntent) \
+ " deltaNeutralOpenClose=" + str(order.m_deltaNeutralOpenClose) \
+ " deltaNeutralShortSale=" + str(order.m_deltaNeutralShortSale) \
+ " deltaNeutralShortSaleSlot=" + str(order.m_deltaNeutralShortSaleSlot) \
+ " deltaNeutralDesignatedLocation=" + str(order.m_deltaNeutralDesignatedLocation) \
+ " continuousUpdate=" + str(order.m_continuousUpdate) \
+ " referencePriceType=" + str(order.m_referencePriceType) \
+ " trailStopPrice=" + Util.DoubleMaxString(order.m_trailStopPrice) \
+ " trailingPercent=" + Util.DoubleMaxString(order.m_trailingPercent) \
+ " scaleInitLevelSize=" + Util.IntMaxString(order.m_scaleInitLevelSize) \
+ " scaleSubsLevelSize=" + Util.IntMaxString(order.m_scaleSubsLevelSize) \
+ " scalePriceIncrement=" + Util.DoubleMaxString(order.m_scalePriceIncrement) \
+ " scalePriceAdjustValue=" + Util.DoubleMaxString(order.m_scalePriceAdjustValue) \
+ " scalePriceAdjustInterval=" + Util.IntMaxString(order.m_scalePriceAdjustInterval) \
+ " scaleProfitOffset=" + Util.DoubleMaxString(order.m_scaleProfitOffset) \
+ " scaleAutoReset=" + str(order.m_scaleAutoReset) \
+ " scaleInitPosition=" + Util.IntMaxString(order.m_scaleInitPosition) \
+ " scaleInitFillQty=" + Util.IntMaxString(order.m_scaleInitFillQty) \
+ " scaleRandomPercent=" + str(order.m_scaleRandomPercent) \
+ " hedgeType=" + str(order.m_hedgeType) \
+ " hedgeParam=" + str(order.m_hedgeParam) \
+ " account=" + str(order.m_account) \
+ " settlingFirm=" + str(order.m_settlingFirm) \
+ " clearingAccount=" + str(order.m_clearingAccount) \
+ " clearingIntent=" + str(order.m_clearingIntent) \
+ " notHeld=" + str(order.m_notHeld) \
+ " whatIf=" + str(order.m_whatIf)
if "BAG" == contract.m_secType:
if contract.m_comboLegsDescrip is not None:
msg += " comboLegsDescrip=" + str(contract.m_comboLegsDescrip)
msg += " comboLegs={"
if contract.m_comboLegs is not None:
i = 0
while i < len(contract.m_comboLegs):
comboLeg = contract.m_comboLegs[i]
msg += " leg " + str(i + 1) + ": "
msg += "conId=" + str(comboLeg.m_conId)
msg += " ratio=" + str(comboLeg.m_ratio)
msg += " action=" + str(comboLeg.m_action)
msg += " exchange=" + str(comboLeg.m_exchange)
msg += " openClose=" + str(comboLeg.m_openClose)
msg += " shortSaleSlot=" + str(comboLeg.m_shortSaleSlot)
msg += " designatedLocation=" + str(comboLeg.m_designatedLocation)
msg += " exemptCode=" + str(comboLeg.m_exemptCode)
if order.m_orderComboLegs is not None and len(contract.m_comboLegs) == len(order.m_orderComboLegs):
orderComboLeg = order.m_orderComboLegs[i]
msg += " price=" + Util.DoubleMaxString(orderComboLeg.m_price)
msg += ";"
i += 1
msg += "}"
if order.m_basisPoints != Double.MAX_VALUE:
msg += " basisPoints=" + Util.DoubleMaxString(order.m_basisPoints)
msg += " basisPointsType=" + Util.IntMaxString(order.m_basisPointsType)
if contract.m_underComp is not None:
underComp = contract.m_underComp
msg += " underComp.conId =" + str(underComp.m_conId) + " underComp.delta =" + str(underComp.m_delta) + " underComp.price =" + str(underComp.m_price)
if not Util.StringIsEmpty(order.m_algoStrategy):
msg += " algoStrategy=" + str(order.m_algoStrategy)
msg += " algoParams={"
if order.m_algoParams is not None:
algoParams = order.m_algoParams
i = 0
while i < len(algoParams):
param = algoParams[i]
if i > 0:
msg += ","
msg += str(param.m_tag) + "=" + str(param.m_value)
i += 1
msg += "}"
if "BAG" == contract.m_secType:
msg += " smartComboRoutingParams={"
if order.m_smartComboRoutingParams is not None:
smartComboRoutingParams = order.m_smartComboRoutingParams
i = 0
while i < len(smartComboRoutingParams):
param = smartComboRoutingParams[i]
if i > 0:
msg += ","
msg += str(param.m_tag) + "=" + str(param.m_value)
i += 1
msg += "}"
orderStateMsg = " status=" + str(orderState.m_status) \
+ " initMargin=" + str(orderState.m_initMargin) \
+ " maintMargin=" + str(orderState.m_maintMargin) \
+ " equityWithLoan=" + str(orderState.m_equityWithLoan) \
+ " commission=" + Util.DoubleMaxString(orderState.m_commission) \
+ " minCommission=" + Util.DoubleMaxString(orderState.m_minCommission) \
+ " maxCommission=" + Util.DoubleMaxString(orderState.m_maxCommission) \
+ " commissionCurrency=" + str(orderState.m_commissionCurrency) \
+ " warningText=" + str(orderState.m_warningText)
return msg + orderStateMsg
@classmethod
def openOrderEnd(cls):
""" generated source for method openOrderEnd """
return " =============== end ==============="
@classmethod
def updateAccountValue(cls, key, value, currency, accountName):
""" generated source for method updateAccountValue """
return "updateAccountValue: " + key + " " + value + " " + currency + " " + accountName
@classmethod
def updatePortfolio(cls, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL, accountName):
""" generated source for method updatePortfolio """
msg = "updatePortfolio: " + cls.contractMsg(contract) + \
str(position) + " " + str(marketPrice) + " " + str(marketValue) + \
" " + str(averageCost) + " " + str(unrealizedPNL) + " " + \
str(realizedPNL) + " " + accountName
return msg
@classmethod
def updateAccountTime(cls, timeStamp):
""" generated source for method updateAccountTime """
return "updateAccountTime: " + timeStamp
@classmethod
def accountDownloadEnd(cls, accountName):
""" generated source for method accountDownloadEnd """
return "accountDownloadEnd: " + accountName
@classmethod
def nextValidId(cls, orderId):
""" generated source for method nextValidId """
return "Next Valid Order ID: " + orderId
@classmethod
def contractDetails(cls, reqId, contractDetails):
""" generated source for method contractDetails """
contract = contractDetails.m_summary
msg = "reqId = " + reqId + " ===================================\n" + \
" ---- Contract Details begin ----\n" + \
cls.contractMsg(contract) + cls.contractDetailsMsg(contractDetails) + \
" ---- Contract Details End ----\n"
return msg
@classmethod
def contractDetailsMsg(cls, contractDetails):
""" generated source for method contractDetailsMsg """
msg = "marketName = " + str(contractDetails.m_marketName) + "\n" \
+ "minTick = " + str(contractDetails.m_minTick) + "\n" \
+ "price magnifier = " + str(contractDetails.m_priceMagnifier) + "\n" \
+ "orderTypes = " + str(contractDetails.m_orderTypes) + "\n" \
+ "validExchanges = " + str(contractDetails.m_validExchanges) + "\n" \
+ "underConId = " + str(contractDetails.m_underConId) + "\n" \
+ "longName = " + str(contractDetails.m_longName) + "\n" \
+ "contractMonth = " + str(contractDetails.m_contractMonth) + "\n" \
+ "industry = " + str(contractDetails.m_industry) + "\n" \
+ "category = " + str(contractDetails.m_category) + "\n" \
+ "subcategory = " + str(contractDetails.m_subcategory) + "\n" \
+ "timeZoneId = " + str(contractDetails.m_timeZoneId) + "\n" \
+ "tradingHours = " + str(contractDetails.m_tradingHours) + "\n" \
+ "liquidHours = " + str(contractDetails.m_liquidHours) + "\n" \
+ "evRule = " + str(contractDetails.m_evRule) + "\n" \
+ "evMultiplier = " + str(contractDetails.m_evMultiplier) + "\n" \
+ cls.contractDetailsSecIdList(contractDetails)
return msg
@classmethod
def contractMsg(cls, contract):
""" generated source for method contractMsg """
msg = "conid = " + str(contract.m_conId) + "\n" \
+ "symbol = " + str(contract.m_symbol) + "\n" \
+ "secType = " + str(contract.m_secType) + "\n" \
+ "expiry = " + str(contract.m_expiry) + "\n" \
+ "strike = " + str(contract.m_strike) + "\n" \
+ "right = " + str(contract.m_right) + "\n" \
+ "multiplier = " + str(contract.m_multiplier) + "\n" \
+ "exchange = " + str(contract.m_exchange) + "\n" \
+ "primaryExch = " + str(contract.m_primaryExch) + "\n" \
+ "currency = " + str(contract.m_currency) + "\n" \
+ "localSymbol = " + str(contract.m_localSymbol) + "\n" \
+ "tradingClass = " + str(contract.m_tradingClass) + "\n"
return msg
@classmethod
def bondContractDetails(cls, reqId, contractDetails):
""" generated source for method bondContractDetails """
contract = contractDetails.m_summary
msg = "reqId = " + str(reqId) + " ===================================\n" \
+ " ---- Bond Contract Details begin ----\n" \
+ "symbol = " + str(contract.m_symbol) + "\n" \
+ "secType = " + str(contract.m_secType) + "\n" \
+ "cusip = " + str(contractDetails.m_cusip) + "\n" \
+ "coupon = " + str(contractDetails.m_coupon) + "\n" \
+ "maturity = " + str(contractDetails.m_maturity) + "\n" \
+ "issueDate = " + str(contractDetails.m_issueDate) + "\n" \
+ "ratings = " + str(contractDetails.m_ratings) + "\n" \
+ "bondType = " + str(contractDetails.m_bondType) + "\n" \
+ "couponType = " + str(contractDetails.m_couponType) + "\n" \
+ "convertible = " + str(contractDetails.m_convertible) + "\n" \
+ "callable = " + str(contractDetails.m_callable) + "\n" \
+ "putable = " + str(contractDetails.m_putable) + "\n" \
+ "descAppend = " + str(contractDetails.m_descAppend) + "\n" \
+ "exchange = " + str(contract.m_exchange) + "\n" \
+ "currency = " + str(contract.m_currency) + "\n" \
+ "marketName = " + str(contractDetails.m_marketName) + "\n" \
+ "tradingClass = " + str(contract.m_tradingClass) + "\n" \
+ "conid = " + str(contract.m_conId) + "\n" \
+ "minTick = " + str(contractDetails.m_minTick) + "\n" \
+ "orderTypes = " + str(contractDetails.m_orderTypes) + "\n" \
+ "validExchanges = " + str(contractDetails.m_validExchanges) + "\n" \
+ "nextOptionDate = " + str(contractDetails.m_nextOptionDate) + "\n" \
+ "nextOptionType = " + str(contractDetails.m_nextOptionType) + "\n" \
+ "nextOptionPartial = " + str(contractDetails.m_nextOptionPartial) + "\n" \
+ "notes = " + str(contractDetails.m_notes) + "\n" \
+ "longName = " + str(contractDetails.m_longName) + "\n" \
+ "evRule = " + str(contractDetails.m_evRule) + "\n" \
+ "evMultiplier = " + str(contractDetails.m_evMultiplier) + "\n" \
+ cls.contractDetailsSecIdList(contractDetails) \
+ " ---- Bond Contract Details End ----\n"
return msg
@classmethod
def contractDetailsSecIdList(cls, contractDetails):
""" generated source for method contractDetailsSecIdList """
msg = "secIdList={"
if contractDetails.m_secIdList is not None:
secIdList = contractDetails.m_secIdList
i = 0
while i < len(secIdList):
param = secIdList[i]
if i > 0:
msg += ","
msg += str(param.m_tag) + "=" + str(param.m_value)
i += 1
msg += "}\n"
return msg
@classmethod
def contractDetailsEnd(cls, reqId):
""" generated source for method contractDetailsEnd """
return "reqId = " + str(reqId) + " =============== end ==============="
@classmethod
def execDetails(cls, reqId, contract, execution):
""" generated source for method execDetails """
msg = " ---- Execution Details begin ----\n" \
+ "reqId = " + str(reqId) + "\n" \
+ "orderId = " + str(execution.m_orderId) + "\n" \
+ "clientId = " + str(execution.m_clientId) + "\n" \
+ cls.contractMsg(contract) \
+ "execId = " + str(execution.m_execId) + "\n" \
+ "time = " + str(execution.m_time) + "\n" \
+ "acctNumber = " + str(execution.m_acctNumber) + "\n" \
+ "executionExchange = " + str(execution.m_exchange) + "\n" \
+ "side = " + str(execution.m_side) + "\n" \
+ "shares = " + str(execution.m_shares) + "\n" \
+ "price = " + str(execution.m_price) + "\n" \
+ "permId = " + str(execution.m_permId) + "\n" \
+ "liquidation = " + str(execution.m_liquidation) + "\n" \
+ "cumQty = " + str(execution.m_cumQty) + "\n" \
+ "avgPrice = " + str(execution.m_avgPrice) + "\n" \
+ "orderRef = " + str(execution.m_orderRef) + "\n" \
+ "evRule = " + str(execution.m_evRule) + "\n" \
+ "evMultiplier = " + str(execution.m_evMultiplier) + "\n" \
" ---- Execution Details end ----\n"
return msg
@classmethod
def execDetailsEnd(cls, reqId):
""" generated source for method execDetailsEnd """
return "reqId = " + str(reqId) + " =============== end ==============="
@classmethod
def updateMktDepth(cls, tickerId, position, operation, side, price, size):
""" generated source for method updateMktDepth """
return "updateMktDepth: " + str(tickerId) + " " + str(position) + " " + str(operation) + " " + str(side) + " " + str(price) + " " + str(size)
@classmethod
def updateMktDepthL2(cls, tickerId, position, marketMaker, operation, side, price, size):
""" generated source for method updateMktDepthL2 """
return "updateMktDepth: " + str(tickerId) + " " + str(position) + " " + marketMaker + " " + str(operation) + " " + str(side) + " " + str(price) + " " + str(size)
@classmethod
def updateNewsBulletin(cls, msgId, msgType, message, origExchange):
""" generated source for method updateNewsBulletin """
return "MsgId=" + str(msgId) + " :: MsgType=" + str(msgType) + " :: Origin=" + origExchange + " :: Message=" + message
@classmethod
def managedAccounts(cls, accountsList):
""" generated source for method managedAccounts """
return "Connected : The list of managed accounts are : [" + accountsList + "]"
@classmethod
def receiveFA(cls, faDataType, xml):
""" generated source for method receiveFA """
return cls.FINANCIAL_ADVISOR + " " + EClientSocket.faMsgTypeName(faDataType) + " " + xml
@classmethod
def historicalData(cls, reqId, date, open, high, low, close, volume, count, WAP, hasGaps):
""" generated source for method historicalData """
return "id=" + str(reqId) \
+ " date = " + date \
+ " open=" + str(open) \
+ " high=" + str(high) \
+ " low=" + str(low) \
+ " close=" + str(close) \
+ " volume=" + str(volume) \
+ " count=" + str(count) \
+ " WAP=" + str(WAP) \
+ " hasGaps=" + str(hasGaps)
@classmethod
def realtimeBar(cls, reqId, time, open, high, low, close, volume, wap, count):
""" generated source for method realtimeBar """
return "id=" + str(reqId) \
+ " time = " + str(time) \
+ " open=" + str(open) \
+ " high=" + str(high) \
+ " low=" + str(low) \
+ " close=" + str(close) \
+ " volume=" + str(volume) \
+ " count=" + str(count) \
+ " WAP=" + str(wap)
@classmethod
def scannerParameters(cls, xml):
""" generated source for method scannerParameters """
return cls.SCANNER_PARAMETERS + "\n" + xml
@classmethod
def scannerData(cls, reqId, rank, contractDetails, distance, benchmark, projection, legsStr):
""" generated source for method scannerData """
contract = contractDetails.m_summary
return "id = " + str(reqId) \
+ " rank=" + str(rank) \
+ " symbol=" + str(contract.m_symbol) \
+ " secType=" + str(contract.m_secType) \
+ " expiry=" + str(contract.m_expiry) \
+ " strike=" + str(contract.m_strike) \
+ " right=" + str(contract.m_right) \
+ " exchange=" + str(contract.m_exchange) \
+ " currency=" + str(contract.m_currency) \
+ " localSymbol=" + str(contract.m_localSymbol) \
+ " marketName=" + str(contractDetails.m_marketName) \
+ " tradingClass=" + str(contract.m_tradingClass) \
+ " distance=" + distance \
+ " benchmark=" + benchmark \
+ " projection=" + projection \
+ " legsStr=" + legsStr
@classmethod
def scannerDataEnd(cls, reqId):
""" generated source for method scannerDataEnd """
return "id = " + str(reqId) + " =============== end ==============="
@classmethod
def currentTime(cls, time):
""" generated source for method currentTime """
return "current time = " + str(time)
@classmethod
def fundamentalData(cls, reqId, data):
""" generated source for method fundamentalData """
return "id = " + str(reqId) + " len = " + str(len(data)) + '\n' + data
@classmethod
def deltaNeutralValidation(cls, reqId, underComp):
""" generated source for method deltaNeutralValidation """
return "id = " + str(reqId) + " underComp.conId =" + str(underComp.m_conId) + " underComp.delta =" + str(underComp.m_delta) + " underComp.price =" + str(underComp.m_price)
@classmethod
def tickSnapshotEnd(cls, tickerId):
""" generated source for method tickSnapshotEnd """
return "id=" + str(tickerId) + " =============== end ==============="
@classmethod
def marketDataType(cls, reqId, marketDataType):
""" generated source for method marketDataType """
return "id=" + str(reqId) + " marketDataType = " + MarketDataType.getField(marketDataType)
@classmethod
def commissionReport(cls, commissionReport):
""" generated source for method commissionReport """
msg = "commission report:" \
+ " execId=" + str(commissionReport.m_execId) \
+ " commission=" + Util.DoubleMaxString(commissionReport.m_commission) \
+ " currency=" + str(commissionReport.m_currency) \
+ " realizedPNL=" + Util.DoubleMaxString(commissionReport.m_realizedPNL) \
+ " yield=" + Util.DoubleMaxString(commissionReport.m_yield) \
+ " yieldRedemptionDate=" \
+ Util.IntMaxString(commissionReport.m_yieldRedemptionDate)
return msg
@classmethod
def position(cls, account, contract, position, avgCost):
""" generated source for method position """
msg = " ---- Position begin ----\n" \
+ "account = " + str(account) + "\n" \
+ cls.contractMsg(contract) \
+ "position = " + Util.IntMaxString(position) + "\n" \
+ "avgCost = " + Util.DoubleMaxString(avgCost) + "\n" + \
" ---- Position end ----\n"
return msg
@classmethod
def positionEnd(cls):
""" generated source for method positionEnd """
return " =============== end ==============="
@classmethod
def accountSummary(cls, reqId, account, tag, value, currency):
""" generated source for method accountSummary """
msg = " ---- Account Summary begin ----\n" \
+ "reqId = " + str(reqId) + "\n" \
+ "account = " + str(account) + "\n" \
+ "tag = " + str(tag) + "\n" \
+ "value = " + str(value) + "\n" \
+ "currency = " + str(currency) + "\n" \
+ " ---- Account Summary end ----\n"
return msg
@classmethod
def accountSummaryEnd(cls, reqId):
""" generated source for method accountSummaryEnd """
return "id=" + str(reqId) + " =============== end ==============="
|
|
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-instance-attributes
class UserConfig(object):
''' Handle user options '''
# pylint: disable=too-many-arguments
def __init__(self,
namespace,
kubeconfig,
username,
full_name,
):
''' constructor for handling user options '''
self.kubeconfig = kubeconfig
self.namespace = namespace
self.username = username
self.full_name = full_name
self.data = {}
self.create_dict()
def create_dict(self):
''' return a user as a dict '''
self.data['apiVersion'] = 'v1'
self.data['fullName'] = self.full_name
self.data['groups'] = None
self.data['identities'] = None
self.data['kind'] = 'User'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.username
# pylint: disable=too-many-instance-attributes
class User(Yedit):
''' Class to wrap the oc command line tools '''
kind = 'user'
def __init__(self, content):
'''User constructor'''
super(User, self).__init__(content=content)
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class OCUser(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'users'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
groups=None,
verbose=False):
''' Constructor for OCVolume '''
super(OCUser, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.groups = groups
self._user = None
@property
def user(self):
''' property function service'''
if not self._user:
self.get()
return self._user
@user.setter
def user(self, data):
''' setter function for yedit var '''
self._user = data
def exists(self):
''' return whether a user exists '''
if self.user:
return True
return False
def get(self):
'''return user information '''
result = self._get(self.kind, self.config.username)
if result['returncode'] == 0:
self.user = User(content=result['results'][0])
elif 'users \"%s\" not found' % self.config.username in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.username)
def create_group_entries(self):
''' make entries for user to the provided group list '''
if self.groups != None:
for group in self.groups:
cmd = ['groups', 'add-users', group, self.config.username]
rval = self.openshift_cmd(cmd, oadm=True)
if rval['returncode'] != 0:
return rval
return rval
return {'returncode': 0}
def create(self):
'''create the object'''
rval = self.create_group_entries()
if rval['returncode'] != 0:
return rval
return self._create_from_content(self.config.username, self.config.data)
def group_update(self):
''' update group membership '''
rval = {'returncode': 0}
cmd = ['get', 'groups', '-n', self.namespace, '-o', 'json']
all_groups = self.openshift_cmd(cmd, output=True)
for group in all_groups['results']['items']:
# If we're supposed to be in this group
if group['metadata']['name'] in self.groups \
and ( group['users'] == None or self.config.username not in group['users']):
cmd = ['groups', 'add-users', group['metadata']['name'],
self.config.username]
rval = self.openshift_cmd(cmd, oadm=True)
if rval['returncode'] != 0:
return rval
# else if we're in the group, but aren't supposed to be
elif self.config.username in group['users'] \
and group['metadata']['name'] not in self.groups:
cmd = ['groups', 'remove-users', group['metadata']['name'],
self.config.username]
rval = self.openshift_cmd(cmd, oadm=True)
if rval['returncode'] != 0:
return rval
return rval
def update(self):
'''update the object'''
rval = self.group_update()
if rval['returncode'] != 0:
return rval
# need to update the user's info
return self._replace_content(self.kind, self.config.username, self.config.data, force=True)
def needs_group_update(self):
''' check if there are group membership changes '''
cmd = ['get', 'groups', '-n', self.namespace, '-o', 'json']
all_groups = self.openshift_cmd(cmd, output=True)
for group in all_groups['results']['items']:
# If we're supposed to be in this group
if group['metadata']['name'] in self.groups \
and ( group['users'] == None or self.config.username not in group['users']):
return True
# else if we're in the group, but aren't supposed to be
elif self.config.username in group['users'] \
and group['metadata']['name'] not in self.groups:
return True
return False
def needs_update(self):
''' verify an update is needed '''
skip = []
if self.needs_group_update() == True:
return True
return not Utils.check_def_equal(self.config.data, self.user.yaml_dict, skip_keys=skip, debug=True)
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: skip-file
#pylint: disable=too-many-branches
def main():
'''
ansible oc module for user
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
namespace=dict(default='default', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
username=dict(default=None, type='str'),
full_name=dict(default=None, type='str'),
# setting groups for user data will not populate the
# 'groups' field in the user data.
# it will call out to the group data and make the user
# entry there
groups=dict(default=[], type='list'),
),
supports_check_mode=True,
)
uconfig = UserConfig(module.params['namespace'],
module.params['kubeconfig'],
module.params['username'],
module.params['full_name'],
)
oc_user = OCUser(uconfig, module.params['groups'],
verbose=module.params['debug'])
state = module.params['state']
api_rval = oc_user.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if oc_user.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_user.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not oc_user.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = oc_user.create()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_user.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if oc_user.needs_update():
api_rval = oc_user.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
orig_cmd = api_rval['cmd']
# return the created object
api_rval = oc_user.get()
# overwrite the get/list cmd
api_rval['cmd'] = orig_cmd
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
|
"""Support for RFXtrx sensors."""
from __future__ import annotations
from dataclasses import dataclass
import logging
from typing import Callable
from RFXtrx import ControlEvent, SensorEvent
from homeassistant.components.sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.const import (
CONF_DEVICES,
DEGREE,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_VOLTAGE,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
LENGTH_MILLIMETERS,
PERCENTAGE,
POWER_WATT,
PRECIPITATION_MILLIMETERS_PER_HOUR,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
SPEED_METERS_PER_SECOND,
TEMP_CELSIUS,
UV_INDEX,
)
from homeassistant.core import callback
from . import (
CONF_DATA_BITS,
RfxtrxEntity,
connect_auto_add,
get_device_id,
get_rfx_object,
)
from .const import ATTR_EVENT
_LOGGER = logging.getLogger(__name__)
def _battery_convert(value):
"""Battery is given as a value between 0 and 9."""
if value is None:
return None
return (value + 1) * 10
def _rssi_convert(value):
"""Rssi is given as dBm value."""
if value is None:
return None
return f"{value*8-120}"
@dataclass
class RfxtrxSensorEntityDescription(SensorEntityDescription):
"""Description of sensor entities."""
convert: Callable = lambda x: x
SENSOR_TYPES = (
RfxtrxSensorEntityDescription(
key="Barometer",
device_class=DEVICE_CLASS_PRESSURE,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=PRESSURE_HPA,
),
RfxtrxSensorEntityDescription(
key="Battery numeric",
device_class=DEVICE_CLASS_BATTERY,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
convert=_battery_convert,
),
RfxtrxSensorEntityDescription(
key="Current",
device_class=DEVICE_CLASS_CURRENT,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
),
RfxtrxSensorEntityDescription(
key="Current Ch. 1",
device_class=DEVICE_CLASS_CURRENT,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
),
RfxtrxSensorEntityDescription(
key="Current Ch. 2",
device_class=DEVICE_CLASS_CURRENT,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
),
RfxtrxSensorEntityDescription(
key="Current Ch. 3",
device_class=DEVICE_CLASS_CURRENT,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
),
RfxtrxSensorEntityDescription(
key="Energy usage",
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=POWER_WATT,
),
RfxtrxSensorEntityDescription(
key="Humidity",
device_class=DEVICE_CLASS_HUMIDITY,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
),
RfxtrxSensorEntityDescription(
key="Rssi numeric",
device_class=DEVICE_CLASS_SIGNAL_STRENGTH,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
convert=_rssi_convert,
),
RfxtrxSensorEntityDescription(
key="Temperature",
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=TEMP_CELSIUS,
),
RfxtrxSensorEntityDescription(
key="Temperature2",
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=TEMP_CELSIUS,
),
RfxtrxSensorEntityDescription(
key="Total usage",
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
RfxtrxSensorEntityDescription(
key="Voltage",
device_class=DEVICE_CLASS_VOLTAGE,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
),
RfxtrxSensorEntityDescription(
key="Wind direction",
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=DEGREE,
),
RfxtrxSensorEntityDescription(
key="Rain rate",
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=PRECIPITATION_MILLIMETERS_PER_HOUR,
),
RfxtrxSensorEntityDescription(
key="Sound",
),
RfxtrxSensorEntityDescription(
key="Sensor Status",
),
RfxtrxSensorEntityDescription(
key="Count",
state_class=STATE_CLASS_TOTAL_INCREASING,
native_unit_of_measurement="count",
),
RfxtrxSensorEntityDescription(
key="Counter value",
state_class=STATE_CLASS_TOTAL_INCREASING,
native_unit_of_measurement="count",
),
RfxtrxSensorEntityDescription(
key="Chill",
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=TEMP_CELSIUS,
),
RfxtrxSensorEntityDescription(
key="Wind average speed",
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=SPEED_METERS_PER_SECOND,
),
RfxtrxSensorEntityDescription(
key="Wind gust",
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=SPEED_METERS_PER_SECOND,
),
RfxtrxSensorEntityDescription(
key="Rain total",
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=LENGTH_MILLIMETERS,
),
RfxtrxSensorEntityDescription(
key="Forecast",
),
RfxtrxSensorEntityDescription(
key="Forecast numeric",
),
RfxtrxSensorEntityDescription(
key="Humidity status",
),
RfxtrxSensorEntityDescription(
key="UV",
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=UV_INDEX,
),
)
SENSOR_TYPES_DICT = {desc.key: desc for desc in SENSOR_TYPES}
async def async_setup_entry(
hass,
config_entry,
async_add_entities,
):
"""Set up platform."""
discovery_info = config_entry.data
data_ids = set()
def supported(event):
return isinstance(event, (ControlEvent, SensorEvent))
entities = []
for packet_id, entity_info in discovery_info[CONF_DEVICES].items():
event = get_rfx_object(packet_id)
if event is None:
_LOGGER.error("Invalid device: %s", packet_id)
continue
if not supported(event):
continue
device_id = get_device_id(
event.device, data_bits=entity_info.get(CONF_DATA_BITS)
)
for data_type in set(event.values) & set(SENSOR_TYPES_DICT):
data_id = (*device_id, data_type)
if data_id in data_ids:
continue
data_ids.add(data_id)
entity = RfxtrxSensor(event.device, device_id, SENSOR_TYPES_DICT[data_type])
entities.append(entity)
async_add_entities(entities)
@callback
def sensor_update(event, device_id):
"""Handle sensor updates from the RFXtrx gateway."""
if not supported(event):
return
for data_type in set(event.values) & set(SENSOR_TYPES_DICT):
data_id = (*device_id, data_type)
if data_id in data_ids:
continue
data_ids.add(data_id)
_LOGGER.info(
"Added sensor (Device ID: %s Class: %s Sub: %s, Event: %s)",
event.device.id_string.lower(),
event.device.__class__.__name__,
event.device.subtype,
"".join(f"{x:02x}" for x in event.data),
)
entity = RfxtrxSensor(
event.device, device_id, SENSOR_TYPES_DICT[data_type], event=event
)
async_add_entities([entity])
# Subscribe to main RFXtrx events
connect_auto_add(hass, discovery_info, sensor_update)
class RfxtrxSensor(RfxtrxEntity, SensorEntity):
"""Representation of a RFXtrx sensor."""
entity_description: RfxtrxSensorEntityDescription
def __init__(self, device, device_id, entity_description, event=None):
"""Initialize the sensor."""
super().__init__(device, device_id, event=event)
self.entity_description = entity_description
self._name = f"{device.type_string} {device.id_string} {entity_description.key}"
self._unique_id = "_".join(
x for x in (*self._device_id, entity_description.key)
)
async def async_added_to_hass(self):
"""Restore device state."""
await super().async_added_to_hass()
if self._event is None:
old_state = await self.async_get_last_state()
if old_state is not None:
event = old_state.attributes.get(ATTR_EVENT)
if event:
self._apply_event(get_rfx_object(event))
@property
def native_value(self):
"""Return the state of the sensor."""
if not self._event:
return None
value = self._event.values.get(self.entity_description.key)
return self.entity_description.convert(value)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def force_update(self) -> bool:
"""We should force updates. Repeated states have meaning."""
return True
@callback
def _handle_event(self, event, device_id):
"""Check if event applies to me and update."""
if device_id != self._device_id:
return
if self.entity_description.key not in event.values:
return
_LOGGER.debug(
"Sensor update (Device ID: %s Class: %s Sub: %s)",
event.device.id_string,
event.device.__class__.__name__,
event.device.subtype,
)
self._apply_event(event)
self.async_write_ha_state()
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Training manager for fever code."""
import json
import os
from absl import logging
import dataclasses
from language.serene import callbacks
from language.serene import fever_tfds
from language.serene import layers
from language.serene import losses
from language.serene import model
from language.serene import preprocessing
from language.serene import tokenizers
from language.serene import util
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tqdm
from official.common import distribute_utils
@dataclasses.dataclass
class ModelConfig:
"""Typed parameters for model."""
fever_experiment_id: int
model_checkpoint: Text
dataset: Text
buffer_size: int
batch_size: int
word_emb_size: int
hidden_size: int
learning_rate: float
positive_class_weight: Optional[float]
max_epochs: int
dropout: float
activation: Text
use_batch_norm: bool
# Model Choice: two_tower or one_tower (not implemented yet).
model: Text
# Preprocessing
tokenizer: Text # EG: Convert strings to list of strings.
text_encoder: Text # EG: Convert list of strings to integers.
basic_lowercase: bool
# Embedder + Contextualizer
embedder: Text
contextualizer: Text
context_num_layers: int
tied_encoders: bool
bidirectional: bool
bert_model_name: Text
bert_max_seq_length: int
bert_vocab_path: Text
bert_model_path: Text
bert_trainable: bool
bert_dropout: float
# Neural Module Configuration
matcher: Text
matcher_hidden_size: int
projection_dim: int
fever_dev_path: Text
max_evidence: int
max_claim_tokens: int
max_evidence_tokens: int
# Whether to include the title/sentence_id in evidence encoding.
include_title: bool
include_sentence_id: bool
n_similar_negatives: int
n_background_negatives: int
scrape_type: Text
include_not_enough_info: bool
title_in_scoring: bool
classify_claim: bool
claim_loss_weight: float
def validate(self):
"""Validate that the arguments to the config are correct, error if not."""
if self.tokenizer not in ['bert', 'basic']:
raise ValueError(f'Invalid tokenizer: "{self.tokenizer}"')
if self.text_encoder not in ['bert', 'basic']:
raise ValueError(f'Invalid text encoder: "{self.text_encoder}"')
if self.matcher not in layers.matcher_registry:
raise ValueError(f'Invalid matcher: "{self.matcher}"')
if self.contextualizer not in ['bert', 'rnn', 'lstm', 'gru']:
raise ValueError(f'Invalid contextualizer: "{self.contextualizer}"')
if self.model not in ['one_tower', 'two_tower']:
raise ValueError(f'Invalid model: "{self.model}"')
if self.bert_model_name not in ['base', 'large']:
raise ValueError(f'Invalid bert model: "{self.bert_model_name}')
if self.embedder not in ['classic_embedder', 'bert_embedder']:
raise ValueError(f'Invalid embedder: "{self.embedder}"')
@classmethod
def from_dict(cls, params):
return ModelConfig(**params)
@classmethod
def from_file(cls,
file_path,
overrides = None):
with util.safe_open(file_path) as f:
params: Dict[Text, Any] = json.load(f)
if overrides is not None:
params.update(overrides)
return ModelConfig.from_dict(params)
def save(self, file_path):
with util.safe_open(file_path, 'w') as f:
json.dump(self.asdict(), f)
def asdict(self):
return dataclasses.asdict(self)
class Trainer:
"""Training wrapper around keras to manage vocab/saving/dataset creation.
The primary methods of this class are:
- train()
- predict()
- embed()
- save()
- load()
The intended use of this is
> trainer = Trainer(my_config)
> trainer.train()
The following methods are primarily for converting TFDS to tf.data.Dataset
for keras training
- _build_tokenizer()
- _build_encoder()
- _encode_and_batch()
- _batch_dataset()
- _encode_dataset()
- _build_vocab()
- _tokenize_example()
These are utilities for embedding different TFDSs
- embed_wiki_dataset()
- embed_claim_dataset()
The following methods deal with preparing the keras model for training
- _compile(): Compile model uner right scope, create callbacks, glue losses
to model
- _build_callbacks(): Keras callbacks
"""
def __init__(
self,
model_config,
debug = False,
tpu = None,
distribution_strategy = None,
tb_log_dir = None):
"""Configure the trainer.
Args:
model_config: ModelConfig parameters for training
debug: Enables certain debug behaviors like dataset subsampling
tpu: The TPU to use or None otherwise
distribution_strategy: Parallel training strategy
tb_log_dir: The directory for Tensorboard to log to
"""
self._debug = debug
if debug:
logging.info('Debug mode enabled on trainer')
self._tpu = tpu
self._distribution_strategy = distribution_strategy
self._tb_log_dir = tb_log_dir
self._strategy: Optional[tf.distribute.Strategy] = None
self._model_config = model_config
self._vocab: Optional[List[Text]] = None
self._vocab_stats: Dict[Text, Any] = {}
self._class_stats: Dict[int, int] = {0: 0, 1: 0}
# Whitespace tokenizer
self._tokenizer: Optional[tokenizers.Tokenizer] = None
self._encoder: Optional[preprocessing.FeverTextEncoder] = None
self._model: Optional[tf.keras.Model] = None
self._inner_model: Optional[tf.keras.Model] = None
def save(self):
"""Persist the encoder and the model to disk.
"""
if self._model is None or self._encoder is None:
raise ValueError('Model and encoder cannot be None')
else:
self._encoder.save_to_file(
# This is a prefix, which converts to: mydir/text_encoder.tokens
os.path.join(self._model_config.model_checkpoint, 'text_encoder'))
self._model.save_weights(
os.path.join(self._model_config.model_checkpoint, 'best_model.tf'))
@classmethod
def load(cls,
model_checkpoint,
model_config_overrides = None,
**kwargs):
"""Load the model, its tokenizer, and weights from the checkpoint.
Args:
model_checkpoint: Checkpoint to restore from, from .save()
model_config_overrides: Extra args for ModelConfig
**kwargs: Passed through to trainer, used for overriding checkpoint
Returns:
A model in the same state as just before it was saved with .save()
"""
# pylint: disable=protected-access
model_config = ModelConfig.from_file(
os.path.join(model_checkpoint, 'model_config.json'),
overrides=model_config_overrides)
trainer = Trainer(model_config=model_config, **kwargs)
trainer._tokenizer = trainer._build_tokenizer()
encoder_path = os.path.join(model_checkpoint, 'text_encoder')
if model_config.text_encoder == 'bert':
trainer._encoder = preprocessing.BertTextEncoder.load_from_file(
encoder_path)
elif model_config.text_encoder == 'basic':
trainer._encoder = preprocessing.BasicTextEncoder.load_from_file(
encoder_path)
else:
raise ValueError('Invalid text encoder')
trainer._compile()
if trainer._model is None:
raise ValueError('Model does not exist despite being compiled')
trainer._model.load_weights(os.path.join(model_checkpoint, 'best_model.tf'))
return trainer
def _save_model_config(self):
"""Save only the Model configuration to disk."""
logging.info('Saving config to: %s/model_config.json',
self._model_config.model_checkpoint)
self._model_config.save(
os.path.join(self._model_config.model_checkpoint, 'model_config.json'))
def _save_encoder(self):
"""Save only the text encoder to disk."""
self._encoder.save_to_file(
os.path.join(self._model_config.model_checkpoint, 'text_encoder'))
@property
def vocab_size(self):
if self._encoder is None:
raise ValueError('Model has not been build, so no vocab size')
else:
return self._encoder.vocab_size
def _init_strategy(self):
"""Initialize the distribution strategy (e.g. TPU/GPU/Mirrored)."""
if self._strategy is None:
if self._tpu is not None:
resolver = distribute_utils.tpu_initialize(self._tpu)
self._strategy = tf.distribute.experimental.TPUStrategy(resolver)
elif self._distribution_strategy is None or self._distribution_strategy == 'default':
self._strategy = tf.distribute.get_strategy()
elif self._distribution_strategy == 'cpu':
self._strategy = tf.distribute.OneDeviceStrategy('/device:cpu:0')
else:
if self._distribution_strategy == 'mirrored':
self._strategy = tf.distribute.MirroredStrategy()
else:
raise ValueError(
f'Invalid distribution strategy="{self._distribution_strategy}"')
def _build_tokenizer(self):
"""Build the correct tokenizer depending on model encoder.
Returns:
Tokenizer for model
"""
if self._model_config.tokenizer == 'basic':
base_tokenizer = tfds.deprecated.text.Tokenizer()
return tokenizers.ReservedTokenizer(
tokenizer=base_tokenizer, reserved_re=preprocessing.SEPARATOR_RE)
elif self._model_config.tokenizer == 'bert':
return tokenizers.BertTokenizer(
vocab_file=self._model_config.bert_vocab_path, do_lower_case=True)
else:
raise ValueError('Invalid tokenizer')
def _build_encoder(self, vocab,
tokenizer):
"""Build the encoder using the given vocab and tokenizer.
Args:
vocab: Vocab to build encoder from
tokenizer: Tokenizer to build encoder from
Returns:
The built text encoder
"""
if self._model_config.text_encoder == 'basic':
return preprocessing.BasicTextEncoder(
vocab_list=vocab,
tokenizer=tokenizer,
lowercase=self._model_config.basic_lowercase,
include_title=self._model_config.include_title,
include_sentence_id=self._model_config.include_sentence_id,
max_claim_tokens=self._model_config.max_claim_tokens,
max_evidence_tokens=self._model_config.max_evidence_tokens,
)
elif self._model_config.text_encoder == 'bert':
return preprocessing.BertTextEncoder(
tokenizer=tokenizer,
max_seq_length=self._model_config.bert_max_seq_length,
include_title=self._model_config.include_title,
include_sentence_id=self._model_config.include_sentence_id,
)
def _encode_and_batch(self,
dataset,
train=False,
filter_claims=True,
filter_evidence=True):
"""Convert a tensorflow dataset of unbatched, text examples to TF batches.
Args:
dataset: TF Dataset to transform
train: Whether to encode as training dataset
filter_claims: Whether to filter zero length claims
filter_evidence: Whether to filter zero length evidence
Returns:
encoded and batched dataset for keras fit
"""
encoded = self._encode_dataset(
dataset, filter_claims=filter_claims, filter_evidence=filter_evidence)
if train:
encoded = encoded.shuffle(
self._model_config.buffer_size, reshuffle_each_iteration=False)
batched = self._batch_dataset(encoded)
return batched
def _compile(self):
"""Compile the keras model using the correct scope."""
# pylint: disable=protected-access
self._init_strategy()
with self._strategy.scope():
if self._model_config.model == 'two_tower':
module_model = model.TwoTowerRanker(
self.vocab_size,
activation=self._model_config.activation,
matcher_name=self._model_config.matcher,
word_emb_size=self._model_config.word_emb_size,
hidden_size=self._model_config.hidden_size,
dropout=self._model_config.dropout,
use_batch_norm=self._model_config.use_batch_norm,
contextualizer=self._model_config.contextualizer,
context_num_layers=self._model_config.context_num_layers,
bidirectional=self._model_config.bidirectional,
tied_encoders=self._model_config.tied_encoders,
embedder_name=self._model_config.embedder,
matcher_hidden_size=self._model_config.matcher_hidden_size,
bert_model_name=self._model_config.bert_model_name,
bert_model_path=self._model_config.bert_model_path,
bert_trainable=self._model_config.bert_trainable,
bert_dropout=self._model_config.bert_dropout,
projection_dim=self._model_config.projection_dim,
classify_claim=self._model_config.classify_claim,
)
self._inner_model = module_model
# This hackery is necessary since keras doesn't handle dictionary inputs
# well, so we have to manually specify input/output output shapes. Since
# this is dependent on the model (e.g., bert vs other), let the encoder
# yield this.
inputs = self._encoder.compute_input_shapes()
outputs = module_model(inputs)
module_model.input_names = sorted(inputs.keys())
module_model._feed_input_names = sorted(inputs.keys())
module_model.output_names = sorted(
['evidence_matching', 'claim_classification'])
self._model = tf.keras.Model(inputs=inputs, outputs=outputs)
self._model.input_names = sorted(inputs.keys())
self._model._feed_input_names = sorted(inputs.keys())
self._model.output_names = sorted(
['evidence_matching', 'claim_classification'])
self._model.summary(line_length=500)
elif self._model_config.model == 'one_tower':
raise NotImplementedError()
else:
raise ValueError('Invalid model')
metrics = {}
evidence_metrics = [
tf.keras.metrics.BinaryAccuracy(name='accuracy'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.TruePositives(name='tp'),
tf.keras.metrics.FalsePositives(name='fp'),
tf.keras.metrics.TrueNegatives(name='tn'),
tf.keras.metrics.FalsePositives(name='fn'),
]
metrics['evidence_matching'] = evidence_metrics
loss = {}
loss['evidence_matching'] = losses.WeightedBinaryCrossentropyFromProbs(
positive_class_weight=self._model_config.positive_class_weight)
loss_weights = {
'evidence_matching': 1.0,
'claim_classification': self._model_config.claim_loss_weight
}
if self._model_config.classify_claim:
# TODO(perodriguez): add claim classifier metrics
claim_metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
]
metrics['claim_classification'] = claim_metrics
loss[
'claim_classification'] = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False)
else:
loss['claim_classification'] = losses.ZeroLoss()
metrics['claim_classification'] = []
self._model.compile(
loss=loss,
optimizer=tf.keras.optimizers.Adam(self._model_config.learning_rate),
metrics=metrics,
loss_weights=loss_weights,
)
def train(self,
*,
epochs = None,
steps_per_epoch = None,
validation_steps = None):
"""Prepare the dataset, callbacks, and model, then train/save it.
Args:
epochs: The number of epochs to train for, if None then default to
early stopping (useful for debugging)
steps_per_epoch: How many training steps to take, if None default to
normal training (useful for debugging)
validation_steps: How many validation steps to take, if None defualt to
normal training (useful for debugging)
"""
logging.info('Preparing model with config:\n%s', self._model_config)
with util.log_time('Initial dataset read'):
builder = fever_tfds.FeverEvidence(
data_dir=self._model_config.dataset,
n_similar_negatives=self._model_config.n_similar_negatives,
n_background_negatives=self._model_config.n_background_negatives,
train_scrape_type=self._model_config.scrape_type,
include_not_enough_info=self._model_config.include_not_enough_info,
title_in_scoring=self._model_config.title_in_scoring,
)
# Cache here to prevent hitting remote fs again
train_dataset = (builder.as_dataset(split='train')).cache()
val_dataset = builder.as_dataset(split='validation').cache()
if self._debug:
train_dataset = train_dataset.take(1000)
if self._debug:
val_dataset = val_dataset.take(200)
self._tokenizer = self._build_tokenizer()
self._vocab = list(self._build_vocab(train_dataset))
self._encoder = self._build_encoder(self._vocab, self._tokenizer)
train_batched = self._encode_and_batch(train_dataset, train=True)
val_batched = self._encode_and_batch(val_dataset, train=False)
# Cache the batch creation, but not the batchwise shuffle.
train_batched = train_batched.cache().shuffle(
100,
reshuffle_each_iteration=True).prefetch(tf.data.experimental.AUTOTUNE)
# Cache the batched validation data.
val_batched = val_batched.cache().prefetch(tf.data.experimental.AUTOTUNE)
self._compile()
model_callbacks = self._build_callbacks(val_batched)
# Save enough to reconstruct anything except for the model.
# The model itself is saved with the ModelCheckpoint callback.
self._save_model_config()
self._save_encoder()
if epochs is None:
epochs = self._model_config.max_epochs
self._model.fit(
train_batched,
validation_data=val_batched,
callbacks=model_callbacks,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
logging.info('Model Summary:\n%s', self._model.summary())
# First load the best model.
logging.info('Loading best model weights')
self._model.load_weights(self.model_weight_path)
logging.info('Saving dev predictions from best model')
self._save_dev_predictions(val_batched)
@property
def model_weight_path(self):
return os.path.join(self._model_config.model_checkpoint, 'best_model.tf')
def _save_dev_predictions(self, val_batched):
"""Save model predictions for the dev set.
This is used to compute Fever F1 as stopping metric
Args:
val_batched: The batched validation set.
"""
unbatched = val_batched.unbatch()
model_predictions = self._model.predict(val_batched)
claim_probs = model_predictions['claim_classification']
evidence_probs = model_predictions['evidence_matching']
predictions = []
# Extra _ is the label, which we don't need
for (ex, _), claim_prob, evidence_prob in tqdm.tqdm(
zip(unbatched, claim_probs, evidence_probs), mininterval=5):
predictions.append({
'claim_prob': claim_prob.tolist(),
'evidence_prob': evidence_prob.tolist(),
'metadata': json.loads(ex['metadata'].numpy().decode('utf8'))
})
pred_path = os.path.join(self._model_config.model_checkpoint,
'val_predictions.json')
with util.safe_open(pred_path, 'w') as f:
json.dump({'predictions': predictions}, f)
def predict(self, examples):
"""Given examples in JSON format, predict evidence relevance.
Args:
examples: List of claim/evidence pairs to rank
Returns:
Scalar scores for each pair
"""
stacked = {
'claim_text': [],
'evidence_text': [],
'metadata': [],
'label': [],
}
for ex in examples:
stacked['claim_text'].append(ex['claim_text'])
stacked['evidence_text'].append(ex['evidence_text'])
stacked['metadata'].append(ex['metadata'])
stacked['label'].append(ex['label'])
dataset = tf.data.Dataset.from_tensor_slices((stacked,))
batched_examples = self._encode_and_batch(
dataset, filter_claims=False, filter_evidence=False)
preds = []
for batch in batched_examples:
# model.predict() is broken after model load so we have to do this
# manually.
preds.append(self._model(batch))
return np.vstack(preds).reshape(-1).tolist()
def embed(self, examples, *, as_claim,
as_evidence): # Checker .tolist() -> Any
"""Embed a list of evidence text.
Args:
examples: A list of evidence text to embed.
as_claim: Whether to embed examples as claims
as_evidence: Whether to embed examples as evidence
Returns:
A list of embeddings, one for each evidence text.
"""
stacked = {
'claim_text': [],
'evidence_text': [],
'metadata': [],
'label': [],
}
for text in examples:
# Dummie value to make sure tokenizing works.
if as_claim:
stacked['claim_text'].append(text)
else:
stacked['claim_text'].append('a')
if as_evidence:
stacked['evidence_text'].append(text)
else:
stacked['evidence_text'].append('a')
stacked['metadata'].append('')
stacked['label'].append(tf.constant(0, dtype=tf.int64))
dataset = tf.data.Dataset.from_tensor_slices((stacked,))
batched_examples = self._encode_and_batch(
dataset, filter_claims=False, filter_evidence=False)
claim_preds = []
ev_preds = []
for batch in batched_examples:
# model.predict() is broken after model load due to missing shapes, so
# have to do our own batching/unbatching.
inputs, _ = batch
claim_encoding, ev_encoding = self._model(
inputs, embed_claim=as_claim, embed_evidence=as_evidence)
claim_preds.append(claim_encoding)
ev_preds.append(ev_encoding)
return np.vstack(claim_preds).tolist(), np.vstack(ev_preds).tolist()
def embed_wiki_dataset(self, dataset):
"""Embed the wikipedia/evidence only dataset.
Args:
dataset: The wikipedia only dataset (e.g. wiki_tfds.py)
Returns:
Aligned wikipedia_urls, sentence_ids, and embeddings of model
"""
# map_fn and tf_map_fn transform the dataset to the same format as
# tfds_evidence/the one the model expects
def map_fn(text, wikipedia_url, sentence_id):
return ('a', text, wikipedia_url, str(sentence_id),
json.dumps({
'sentence_id': int(sentence_id.numpy()),
'wikipedia_url': wikipedia_url.numpy().decode('utf8')
}))
def tf_map_fn(example):
tensors = tf.py_function(
map_fn,
inp=[
example['text'], example['wikipedia_url'], example['sentence_id']
],
Tout=(tf.string, tf.string, tf.string, tf.string, tf.string))
return {
'claim_text': tensors[0],
'evidence_text': tensors[1],
'wikipedia_url': tensors[2],
'sentence_id': tensors[3],
'claim_label': tf.constant(0, dtype=tf.int64),
'evidence_label': tf.constant(0, dtype=tf.int64),
'metadata': tensors[4]
}
formatted_ds = dataset.map(tf_map_fn)
batched_examples = self._encode_and_batch(
formatted_ds, filter_claims=False, filter_evidence=False)
preds = []
wikipedia_urls = []
sentence_ids = []
for batch in tqdm.tqdm(batched_examples, mininterval=5):
# model.predict() is broken after model load due to missing shapes, so
# have to do our own batching/unbatching.
inputs, _ = batch
_, ev_encoding = self._inner_model(
inputs, embed_claim=False, embed_evidence=True)
for m in inputs['metadata'].numpy():
key = json.loads(m.decode('utf8'))
wikipedia_urls.append(key['wikipedia_url'])
sentence_ids.append(key['sentence_id'])
preds.append(ev_encoding)
return np.array(wikipedia_urls), np.array(sentence_ids), np.vstack(preds)
def embed_claim_dataset(self, dataset):
"""Embed the claim only dataset and save them with claim_ids.
Args:
dataset: The claims only dataset (e.g. claim_tfds.py)
Returns:
Aligned claim ids and embeddings from the model
"""
batched_examples = self._encode_and_batch(
dataset, filter_claims=False, filter_evidence=False)
claim_ids = []
embeddings = []
for batch in tqdm.tqdm(batched_examples, mininterval=5):
# model.predict() is broken after model load due to missing shapes, so
# have to do our own batching/unbatching.
inputs, _ = batch
# Cannot use self._model since it does not take extra arguments. Since
# we're not using the keras API (namey .predict()), we can just use the
# underlying model stored in self._inner_model.
claim_encoding, _ = self._inner_model(
inputs, embed_claim=True, embed_evidence=False)
for m in inputs['metadata'].numpy():
key = json.loads(m.decode('utf8'))
claim_ids.append(int(key['claim_id']))
embeddings.append(claim_encoding)
return np.array(claim_ids), np.vstack(embeddings)
def _build_callbacks(self, val_batched):
"""Build the callbacks used during training."""
cns_model_checkpoint = util.safe_path(
os.path.join(self._model_config.model_checkpoint, 'best_model.tf'))
model_callbacks = [
# Note: Order matters here, particularly that FeverMetricsCallback
# comes before tensorboard so it can write to the log dictionary
# and TB picks it up.
callbacks.FeverMetricsCallback(
validation_batched=val_batched,
debug=self._debug,
fever_dev_path=self._model_config.fever_dev_path,
max_evidence=self._model_config.max_evidence,
checkpoint_dir=self._model_config.model_checkpoint,
),
# TODO(perodriguez): Determine a better thing to stop on
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=.001,
patience=3,
verbose=1,
mode='min'),
# TODO(perodriguez): Determine a better thing to save on
# Checkpointing also needs to know about fever recall.
tf.keras.callbacks.ModelCheckpoint(
filepath=cns_model_checkpoint,
save_best_only=True,
monitor='val_loss',
mode='min',
verbose=1,
# There is no support for GRU/LSTM Dropout with normal save
save_weights_only=True,
),
]
if self._tb_log_dir is not None:
model_callbacks.append(
tf.keras.callbacks.TensorBoard(log_dir=self._tb_log_dir))
return model_callbacks
def _batch_dataset(self, dataset):
"""Batch the dataset depending on what model is used.
Args:
dataset: A dataset to batch
Returns:
A batched dataset with correct padding shapes.
"""
return dataset.padded_batch(
batch_size=self._model_config.batch_size,
padded_shapes=(
self._encoder.padded_shapes(),
# Must match losses in training.py
{
'claim_classification': [],
'evidence_matching': []
}))
def _encode_dataset(self,
dataset,
filter_claims=True,
filter_evidence=True):
"""Convert the tfds dataset to numbers by tokenizing/embedding."""
encode = self._encoder.build_encoder_fn()
encoded_data = dataset.map(
encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if filter_claims:
encoded_data = encoded_data.filter(preprocessing.filter_claim_fn)
if filter_evidence:
encoded_data = encoded_data.filter(preprocessing.filter_evidence_fn)
return encoded_data
def _build_vocab(self, dataset):
"""Build the vocabulary and encoder from the dataset.
Args:
dataset: The dataset to build vocab from.
Returns:
The vocabulary in the dataset, or empty vocab if using bert
"""
# If we are using bert, then we do not need to build the vocab
# since its already defined
if self._model_config.tokenizer == 'bert' and self._model_config.text_encoder == 'bert':
logging.info('Using bert, skipping vocabulary creation')
return set()
if self._tokenizer is None:
raise ValueError('Cannot build vocab without a tokenizer.')
claim_lengths = []
evidence_lengths = []
vocab = set()
for example in tqdm.tqdm(dataset, mininterval=5):
tokenized_claim, tokenized_evidence = self._tokenize_example(example)
claim_lengths.append(len(tokenized_claim))
evidence_lengths.append(len(tokenized_evidence))
vocab.update(tokenized_claim)
vocab.update(tokenized_evidence)
logging.info('Build vocab of size (without padding): %s', len(vocab))
logging.info('Claim length statistics')
logging.info('Max: %s', max(claim_lengths))
logging.info('Min: %s', min(claim_lengths))
claim_percentiles = np.percentile(claim_lengths, [50, 90, 95, 99]).tolist()
logging.info('50/90/95/99: %s', str(claim_percentiles))
logging.info('Evidence length statistics')
logging.info('Max: %s', max(evidence_lengths))
logging.info('Min: %s', min(evidence_lengths))
evidence_percentiles = np.percentile(evidence_lengths,
[50, 90, 95, 99]).tolist()
logging.info('50/90/95/99: %s', str(evidence_percentiles))
self._vocab_stats['claim_max'] = max(claim_lengths)
self._vocab_stats['claim_min'] = min(claim_lengths)
self._vocab_stats['claim_percentiles'] = claim_percentiles
self._vocab_stats['evidence_max'] = max(evidence_lengths)
self._vocab_stats['evidence_min'] = min(evidence_lengths)
self._vocab_stats['evidence_percentiles'] = evidence_percentiles
return vocab
def _tokenize_example(self, example):
tokenized_claim = self._tokenizer.tokenize(
example['claim_text'].numpy().decode('utf8'))
tokenized_evidence = self._tokenizer.tokenize(
example['evidence_text'].numpy().decode('utf8'))
return tokenized_claim, tokenized_evidence
|
|
#!/usr/bin/env python
import pyaudio
import wave
import multiprocessing
from tkinter import *
from time import sleep
import sys
import datetime
import os
import argparse
class App():
def __init__(self, master, queue, save_file, recording_process):
# Init stuff
frame = Frame(master)
master.protocol("WM_DELETE_WINDOW", self.exit)
self.text = StringVar()
self.text2 = "Output file: " + save_file
self.record_queue = queue
self.recording_process = recording_process
self.master = master
self.done = False
self.textbox = Label(master, textvariable=self.text).pack()
self.textbox2 = Label(master, text=self.text2)
self.button = Button(master, text="Stop and Save Recording", command=self.finish_record)
self.button.pack()
self.button2 = Button(master, text="Ok", command=frame.quit)
frame.pack()
self.text.set("Status: Recording")
self.master.after(1000, self.check_record)
def finish_record(self):
# Puts something into the queue to tell the record process to stop
self.record_queue.put(True)
self.text.set("Status: Saving")
self.textbox2.pack()
self.button.destroy()
self.button2.pack()
self.text.set("Status: Done")
self.done = True
def exit(self):
self.record_queue.put(True)
def check_record(self):
# Check if the recording subprocess is still alive.
# if it is check again in a second, if not and it was not closed via GUI then exit the GUI
if not self.recording_process.is_alive() and not self.done:
sys.exit()
else:
self.master.after(1000, self.check_record)
class Record():
def __init__(self, queue, output_file, time):
# Init stuff
self.output_file = output_file
self.time = time
record = self.record(queue)
def record(self, queue):
# Most of this code was taken from the pyaudio example.
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 48000
RECORD_SECONDS = self.time
WAVE_OUTPUT_FILENAME = self.output_file
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
#Stop recording if message was sent through queue
if not queue.empty():
break
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
sys.exit()
def read_args():
# Initialize the parser
parser = argparse.ArgumentParser()
# Change the filedivider based on the OS
ostype = sys.platform
if "win32" in ostype:
filedivider = "\\"
else:
filedivider = "/"
current_dir = os.getcwd()
# Set default values
gui = True
filename = None
save_directory = current_dir
seconds = 60
# Add all the arguments
parser.add_argument("FILENAME", help="The name of the file")
parser.add_argument("-n", "--nogui", help="Runs the program without opening the GUI", action="store_true")
parser.add_argument("-u", "--usedate", help="Adds the date to the filename", action="store_true")
parser.add_argument("-d", "--directory", help="Directory to save the file in")
parser.add_argument("-m", "--minutes", help="Length of time to record in minutes", type=int)
parser.add_argument("-s", "--seconds", help="Length of time to record in seconds", type=int)
if os.name == "nt":
args, unknown = parser.parse_known_args()
else:
args = parser.parse_args()
# Set all the variables to the arguments if given
filename = args.FILENAME
if args.nogui:
gui = False
if args.usedate:
filename += str(datetime.datetime.now()).split()[0]
if args.directory:
save_directory = args.directory
if args.minutes:
seconds = args.minutes * 60
if args.seconds:
seconds = args.seconds
# Append the file extension to the filename if not already present
if not "." in filename:
filename += ".wav"
else:
split_name = filename.split(".")
if not split_name[-1] == "wav":
if filename[-1] == ".":
filename += "wav"
else:
filename += ".wav"
# Make sure the filedivider is on the end of the path to save
if save_directory[-1] != filedivider:
save_directory += filedivider
save_file = save_directory + filename
if os.path.isfile(save_file):
print("Error: file exists.")
exit()
return(gui, save_file, seconds)
if __name__ == '__main__':
# Get values from argument parser
args = read_args()
gui = args[0]
save_file = args[1]
time = args[2]
#Start the record process
multiprocessing.freeze_support()
record_queue = multiprocessing.Queue()
record_process = multiprocessing.Process(target = Record, args=(record_queue, save_file, time,))
record_process.start()
if gui == True:
#Start the main GUI
root = Tk()
app = App(root, record_queue, save_file, record_process)
root.mainloop()
|
|
# -*- coding: utf-8 -*-
from gluon import *
from gluon.storage import Storage
from s3 import *
from s3theme import NAV, SECTION
# =============================================================================
class S3MainMenuLayout(S3NavigationItem):
""" Custom Main Menu Layout """
@staticmethod
def layout(item):
""" Custom Layout Method """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized:
item.enabled = False
item.visible = False
elif item.enabled is None or item.enabled:
item.enabled = True
item.visible = True
if item.enabled and item.visible:
items = item.render_components()
if item.parent is not None:
classes = []
if item.parent.parent is None:
# Item at the top-level?
toplevel = True
if item.opts.right:
classes.append("menu-right")
else:
toplevel = False
if item.components:
classes.append("has-dropdown not-click")
if item.selected:
classes.append("active")
_class = " ".join(classes)
# Menu item with Dropdown
if item.get_first(enabled=True, link=True):
_href = item.url()
return LI(A(item.label,
_href=_href,
_id=item.attr._id
),
UL(items,
_class="dropdown"
),
_class=_class,
)
else:
# Menu item without Drop-Down
if toplevel:
item_url = item.url()
if item_url == URL(c="default", f="index"):
classes.append("menu-home")
if item.selected:
classes.append("active")
_class = " ".join(classes)
return LI(A(item.label,
_href=item_url,
_id=item.attr._id,
),
_class=_class,
)
else:
# Submenu item
if isinstance(item.label, dict):
if "name" in item.label:
label = item.label["name"]
else:
return None
else:
label = item.label
link = A(label, _href=item.url(), _id=item.attr._id)
return LI(link)
else:
# Main menu
right = []
left = []
for item in items:
if "menu-right" in item["_class"]:
item.remove_class("menu-right")
right.append(item)
else:
left.append(item)
right.reverse()
if current.response.s3.rtl:
right, left = left, right
T = current.T
data_options = {"back": T("Back"),
}
return NAV(UL(LI(A(" ",
_href=URL(c="default", f="index"),
),
_class="name"
),
LI(A(SPAN(current.T("Menu"))),
_class="toggle-topbar menu-icon",
),
_class="title-area",
),
SECTION(UL(right, _class="right"),
UL(left, _class="left"),
_class="top-bar-section",
),
_class = "top-bar",
data = {"topbar": " ",
"options": "back_text:%(back)s" % data_options,
},
)
else:
return None
# ---------------------------------------------------------------------
@staticmethod
def checkbox_item(item):
""" Render special active items """
name = item.label
link = item.url()
_id = name["id"]
if "name" in name:
_name = name["name"]
else:
_name = ""
if "value" in name:
_value = name["value"]
else:
_value = False
if "request_type" in name:
_request_type = name["request_type"]
else:
_request_type = "ajax"
if link:
if _request_type == "ajax":
_onchange='''var val=$('#%s:checked').length;$.getS3('%s'+'?val='+val,null,false,null,false,false)''' % \
(_id, link)
else:
# Just load the page. Use this if the changed menu
# item should alter the contents of the page, and
# it's simpler just to load it.
_onchange="location.href='%s'" % link
else:
_onchange=None
return LI(A(INPUT(_type="checkbox",
_id=_id,
_onchange=_onchange,
value=_value,
),
"%s" % _name,
_nowrap="nowrap",
),
_class="menu-toggle",
)
# =============================================================================
class S3PersonalMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
if item.parent is None:
# The menu
items = item.render_components()
if items:
return TAG["ul"](items, _class="sub-nav personal-menu")
else:
return "" # menu is empty
else:
# A menu item
if item.enabled and item.authorized:
return TAG["li"](A(item.label, _href=item.url()))
else:
return None
# -----------------------------------------------------------------------------
# Shortcut
MP = S3PersonalMenuLayout
# =============================================================================
class S3AboutMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
if item.parent is None:
# The menu
items = item.render_components()
if items:
return TAG["ul"](items, _class="sub-nav about-menu")
else:
return "" # menu is empty
else:
# A menu item
if item.enabled and item.authorized:
return TAG["li"](A(item.label, _href=item.url()))
else:
return None
# -----------------------------------------------------------------------------
# Shortcut
MA = S3AboutMenuLayout
# =============================================================================
class S3LanguageMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
""" Language menu layout
options for each entry:
- lang_code: the language code
- lang_name: the language name
option for the menu
- current_language: code of the current language
"""
if item.enabled:
if item.components:
# The language menu itself
current_language = current.T.accepted_language
items = item.render_components()
select = SELECT(items, value=current_language,
_name="_language",
# @ToDo T:
_title="Language Selection",
_onchange="S3.reloadWithQueryStringVars({'_language':$(this).val()});")
form = FORM(select, _class="language-selector",
_name="_language",
_action="",
_method="get")
return form
else:
# A language entry
return OPTION(item.opts.lang_name,
_value=item.opts.lang_code)
else:
return None
# -------------------------------------------------------------------------
def check_enabled(self):
""" Check whether the language menu is enabled """
if current.deployment_settings.get_L10n_display_toolbar():
return True
else:
return False
# -----------------------------------------------------------------------------
# Shortcut
ML = S3LanguageMenuLayout
# =============================================================================
class S3OrgMenuLayout(S3NavigationItem):
""" Layout for the organisation-specific menu """
@staticmethod
def layout(item):
name = "Deutsches Rotes Kreuz"
logo = IMG(_src = "/%s/static/themes/DRK/img/logo_small.png" %
current.request.application,
_alt = "Deutsches Rotes Kreuz",
_width=40,
)
# Note: render using current.menu.org.render()[0] + current.menu.org.render()[1]
return (name, logo)
# -----------------------------------------------------------------------------
# Shortcut
OM = S3OrgMenuLayout
# END =========================================================================
|
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
# Copyright 2018 The Containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import difflib
import glob
import json
import mmap
import os
import re
import sys
from datetime import date
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
# Rootdir defaults to the directory **above** the hack/repo-infra dir.
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh" or extension == "py":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
p = regexs["year"]
found = 0
for d in ref:
if p.search(d):
found = 1
break
if found == 0:
print('File %s is missing the year' % filename, file=verbose_out)
return False
# Replace all occurrences of the regex "CURRENT_YEAR|...|2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
p = regexs["authors"]
found = 0
for d in ref:
if p.search(d):
found = 1
break
if found == 0:
print('File %s is missing AUTHORS' % filename, file=verbose_out)
return False
# Replace all occurrences of the regex "The validNameHere Authors" with "AUTHORS"
p = regexs["auth"]
for i, d in enumerate(data):
(data[i], found) = p.subn('AUTHORS', d)
# Remove extra copyright notices only one is necessary
p = regexs["copyright"]
keepgoing = 1
while keepgoing == 1:
keepgoing = 0
count = 0
for d in data:
if p.search(d):
count = count + 1
if count > 1:
keepgoing = 1
data.remove(d)
break
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git',
'cluster/env.sh', 'vendor', 'test/e2e/generated/bindata.go',
'hack/boilerplate/test', '.glide']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015, 2016, ..., CURRENT_YEAR, company holder names can be anything
years = range(2014, date.today().year + 1)
regexs["date"] = re.compile( '(%s)' % "|".join(map(lambda l: str(l), years)) )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
regexs["authors"] = re.compile( 'AUTHORS' )
authors = [ 'The Kubernetes Authors', 'The Containerd Authors', 'The containerd Authors' ]
regexs["auth"] = re.compile( '(%s)' % "|".join(map(lambda l: str(l), authors)) )
regexs["copyright"] = re.compile( 'Copyright YEAR AUTHORS' )
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from collections import OrderedDict, defaultdict
from datetime import date, datetime, time
from itertools import chain, groupby
from operator import attrgetter, itemgetter
from flask import flash, session
from pytz import timezone
from sqlalchemy.orm import contains_eager, joinedload
from indico.core.config import config
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.db.sqlalchemy.util.queries import db_dates_overlap, with_total_rows
from indico.core.errors import NoReportError
from indico.modules.events.models.events import Event
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.rb import rb_settings
from indico.modules.rb.models.reservation_edit_logs import ReservationEditLog
from indico.modules.rb.models.reservation_occurrences import ReservationOccurrence
from indico.modules.rb.models.reservations import RepeatFrequency, Reservation, ReservationLink
from indico.modules.rb.models.room_nonbookable_periods import NonBookablePeriod
from indico.modules.rb.models.rooms import Room
from indico.modules.rb.operations.blockings import filter_blocked_rooms, get_rooms_blockings, group_blocked_rooms
from indico.modules.rb.operations.conflicts import get_concurrent_pre_bookings, get_rooms_conflicts
from indico.modules.rb.operations.misc import get_rooms_nonbookable_periods, get_rooms_unbookable_hours
from indico.modules.rb.util import (group_by_occurrence_date, serialize_availability, serialize_blockings,
serialize_booking_details, serialize_nonbookable_periods, serialize_occurrences,
serialize_unbookable_hours)
from indico.util.date_time import iterdays, overlaps, server_to_utc
from indico.util.i18n import _
from indico.util.string import natural_sort_key
from indico.util.struct.iterables import group_list
def group_blockings(blocked_rooms, dates):
if not blocked_rooms:
return {}
occurrences = {}
for blocked_room in blocked_rooms:
blocking = blocked_room.blocking
for date_ in dates:
if blocking.start_date <= date_ <= blocking.end_date:
occurrences[date_] = [blocking]
return occurrences
def group_nonbookable_periods(periods, dates):
if not periods:
return {}
occurrences = defaultdict(list)
for period in periods:
for d in dates:
if period.start_dt.date() <= d <= period.end_dt.date():
period_occurrence = NonBookablePeriod()
period_occurrence.start_dt = ((datetime.combine(d, time(0)))
if period.start_dt.date() != d else period.start_dt)
period_occurrence.end_dt = ((datetime.combine(d, time(23, 59)))
if period.end_dt.date() != d else period.end_dt)
occurrences[d].append(period_occurrence)
return occurrences
def get_existing_room_occurrences(room, start_dt, end_dt, repeat_frequency=RepeatFrequency.NEVER, repeat_interval=None,
allow_overlapping=False, only_accepted=False, skip_booking_id=None):
return get_existing_rooms_occurrences([room], start_dt, end_dt, repeat_frequency, repeat_interval,
allow_overlapping, only_accepted, skip_booking_id).get(room.id, [])
def get_existing_rooms_occurrences(rooms, start_dt, end_dt, repeat_frequency, repeat_interval, allow_overlapping=False,
only_accepted=False, skip_booking_id=None):
room_ids = [room.id for room in rooms]
query = (ReservationOccurrence.query
.filter(ReservationOccurrence.is_valid, Reservation.room_id.in_(room_ids))
.join(ReservationOccurrence.reservation)
.options(ReservationOccurrence.NO_RESERVATION_USER_STRATEGY,
contains_eager(ReservationOccurrence.reservation)))
if allow_overlapping:
query = query.filter(db_dates_overlap(ReservationOccurrence, 'start_dt', start_dt, 'end_dt', end_dt))
else:
query = query.filter(ReservationOccurrence.start_dt >= start_dt, ReservationOccurrence.end_dt <= end_dt)
if only_accepted:
query = query.filter(Reservation.is_accepted)
if repeat_frequency != RepeatFrequency.NEVER:
candidates = ReservationOccurrence.create_series(start_dt, end_dt, (repeat_frequency, repeat_interval))
dates = [candidate.start_dt for candidate in candidates]
query = query.filter(db.cast(ReservationOccurrence.start_dt, db.Date).in_(dates))
if skip_booking_id is not None:
query = query.filter(ReservationOccurrence.reservation_id != skip_booking_id)
return group_list(query, key=lambda obj: obj.reservation.room_id,
sort_by=lambda obj: (obj.reservation.room_id, obj.start_dt))
def get_rooms_availability(rooms, start_dt, end_dt, repeat_frequency, repeat_interval, skip_conflicts_with=None,
admin_override_enabled=False, skip_past_conflicts=False):
availability = OrderedDict()
candidates = ReservationOccurrence.create_series(start_dt, end_dt, (repeat_frequency, repeat_interval))
date_range = sorted(set(cand.start_dt.date() for cand in candidates))
occurrences = get_existing_rooms_occurrences(rooms, start_dt.replace(hour=0, minute=0),
end_dt.replace(hour=23, minute=59), repeat_frequency, repeat_interval)
blocked_rooms = get_rooms_blockings(rooms, start_dt.date(), end_dt.date())
nonoverridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
nonoverridable_only=True,
explicit=True))
overridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
overridable_only=True,
explicit=True))
unbookable_hours = get_rooms_unbookable_hours(rooms)
nonbookable_periods = get_rooms_nonbookable_periods(rooms, start_dt, end_dt)
conflicts, pre_conflicts, conflicting_candidates = get_rooms_conflicts(
rooms, start_dt.replace(tzinfo=None), end_dt.replace(tzinfo=None),
repeat_frequency, repeat_interval, nonoverridable_blocked_rooms,
nonbookable_periods, unbookable_hours, skip_conflicts_with,
allow_admin=admin_override_enabled, skip_past_conflicts=skip_past_conflicts
)
dates = list(candidate.start_dt.date() for candidate in candidates)
for room in rooms:
room_occurrences = occurrences.get(room.id, [])
room_conflicting_candidates = conflicting_candidates.get(room.id, [])
room_conflicts = conflicts.get(room.id, [])
pre_room_conflicts = pre_conflicts.get(room.id, [])
pre_bookings = [occ for occ in room_occurrences if not occ.reservation.is_accepted]
concurrent_pre_bookings = get_concurrent_pre_bookings(pre_bookings) if pre_bookings else []
existing_bookings = [occ for occ in room_occurrences if occ.reservation.is_accepted]
room_nonoverridable_blocked_rooms = nonoverridable_blocked_rooms.get(room.id, [])
room_overridable_blocked_rooms = overridable_blocked_rooms.get(room.id, [])
room_nonbookable_periods = nonbookable_periods.get(room.id, [])
room_unbookable_hours = unbookable_hours.get(room.id, [])
room_candidates = get_room_candidates(candidates, room_conflicts)
availability[room.id] = {'room_id': room.id,
'candidates': group_by_occurrence_date(room_candidates),
'conflicting_candidates': group_by_occurrence_date(room_conflicting_candidates),
'pre_bookings': group_by_occurrence_date(pre_bookings),
'concurrent_pre_bookings': group_by_occurrence_date(concurrent_pre_bookings),
'bookings': group_by_occurrence_date(existing_bookings),
'conflicts': group_by_occurrence_date(room_conflicts),
'pre_conflicts': group_by_occurrence_date(pre_room_conflicts),
'blockings': group_blockings(room_nonoverridable_blocked_rooms, dates),
'overridable_blockings': group_blockings(room_overridable_blocked_rooms, dates),
'nonbookable_periods': group_nonbookable_periods(room_nonbookable_periods, dates),
'unbookable_hours': room_unbookable_hours}
return date_range, availability
def get_room_candidates(candidates, conflicts):
return [candidate for candidate in candidates
if not (any(candidate.overlaps(conflict) for conflict in conflicts))]
def _bookings_query(filters, noload_room=False):
reservation_strategy = contains_eager('reservation')
if noload_room:
reservation_strategy.raiseload('room')
else:
reservation_strategy.joinedload('room')
reservation_strategy.noload('booked_for_user')
reservation_strategy.noload('created_by_user')
query = (ReservationOccurrence.query
.join(Reservation)
.join(Room)
.filter(~Room.is_deleted)
.options(reservation_strategy))
text = filters.get('text')
room_ids = filters.get('room_ids')
booking_criteria = [Reservation.booking_reason.ilike('%{}%'.format(text)),
Reservation.booked_for_name.ilike('%{}%'.format(text))]
if room_ids and text:
query = query.filter(db.or_(Room.id.in_(room_ids), *booking_criteria))
elif room_ids:
query = query.filter(Room.id.in_(room_ids))
elif text:
query = query.filter(db.or_(*booking_criteria))
if filters.get('start_dt'):
query = query.filter(ReservationOccurrence.start_dt >= filters['start_dt'])
if filters.get('end_dt'):
query = query.filter(ReservationOccurrence.end_dt <= filters['end_dt'])
booked_for_user = filters.get('booked_for_user')
if booked_for_user:
query = query.filter(db.or_(Reservation.booked_for_user == booked_for_user,
Reservation.created_by_user == booked_for_user))
if not filters.get('include_inactive'):
query = query.filter(ReservationOccurrence.is_valid)
return query
def get_room_calendar(start_date, end_date, room_ids, include_inactive=False, **filters):
start_dt = datetime.combine(start_date, time(hour=0, minute=0))
end_dt = datetime.combine(end_date, time(hour=23, minute=59))
query = _bookings_query(dict(filters, start_dt=start_dt, end_dt=end_dt, room_ids=room_ids,
include_inactive=include_inactive))
bookings = query.order_by(db.func.indico.natsort(Room.full_name)).all()
rooms = set()
if room_ids:
rooms = set(Room.query
.filter(~Room.is_deleted, Room.id.in_(room_ids))
.options(joinedload('location')))
rooms.update(b.reservation.room for b in bookings)
rooms = sorted(rooms, key=lambda r: natural_sort_key(r.full_name))
occurrences_by_room = groupby(bookings, attrgetter('reservation.room_id'))
unbookable_hours = get_rooms_unbookable_hours(rooms)
nonbookable_periods = get_rooms_nonbookable_periods(rooms, start_dt, end_dt)
blocked_rooms = get_rooms_blockings(rooms, start_dt, end_dt)
nonoverridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
nonoverridable_only=True,
explicit=True))
overridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
overridable_only=True,
explicit=True))
dates = [d.date() for d in iterdays(start_dt, end_dt)]
calendar = OrderedDict((room.id, {
'room_id': room.id,
'nonbookable_periods': group_nonbookable_periods(nonbookable_periods.get(room.id, []), dates),
'unbookable_hours': unbookable_hours.get(room.id, []),
'blockings': group_blockings(nonoverridable_blocked_rooms.get(room.id, []), dates),
'overridable_blockings': group_blockings(overridable_blocked_rooms.get(room.id, []), dates),
}) for room in rooms)
for room_id, occurrences in occurrences_by_room:
occurrences = list(occurrences)
pre_bookings = [occ for occ in occurrences if occ.reservation.is_pending]
existing_bookings = [occ for occ in occurrences if not occ.reservation.is_pending and occ.is_valid]
concurrent_pre_bookings = get_concurrent_pre_bookings(pre_bookings)
additional_data = {
'bookings': group_by_occurrence_date(existing_bookings),
'pre_bookings': group_by_occurrence_date(pre_bookings),
'concurrent_pre_bookings': group_by_occurrence_date(concurrent_pre_bookings)
}
if include_inactive:
additional_data.update({
'cancellations': group_by_occurrence_date(occ for occ in occurrences if occ.is_cancelled),
'rejections': group_by_occurrence_date(occ for occ in occurrences if occ.is_rejected)
})
calendar[room_id].update(additional_data)
return calendar
def get_room_details_availability(room, start_dt, end_dt):
dates = [d.date() for d in iterdays(start_dt, end_dt)]
occurrences = get_existing_room_occurrences(room, start_dt, end_dt, RepeatFrequency.DAY, 1)
pre_bookings = [occ for occ in occurrences if not occ.reservation.is_accepted]
bookings = [occ for occ in occurrences if occ.reservation.is_accepted]
blocked_rooms = get_rooms_blockings([room], start_dt.date(), end_dt.date())
nonoverridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
nonoverridable_only=True,
explicit=True)).get(room.id, [])
overridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
overridable_only=True,
explicit=True)).get(room.id, [])
unbookable_hours = get_rooms_unbookable_hours([room]).get(room.id, [])
nonbookable_periods = get_rooms_nonbookable_periods([room], start_dt, end_dt).get(room.id, [])
availability = []
for day in dates:
iso_day = day.isoformat()
nb_periods = serialize_nonbookable_periods(group_nonbookable_periods(nonbookable_periods, dates)).get(iso_day)
availability.append({
'bookings': serialize_occurrences(group_by_occurrence_date(bookings)).get(iso_day),
'pre_bookings': serialize_occurrences(group_by_occurrence_date(pre_bookings)).get(iso_day),
'blockings': serialize_blockings(group_blockings(nonoverridable_blocked_rooms, dates)).get(iso_day),
'overridable_blockings': (serialize_blockings(group_blockings(overridable_blocked_rooms, dates))
.get(iso_day)),
'nonbookable_periods': nb_periods,
'unbookable_hours': serialize_unbookable_hours(unbookable_hours),
'day': iso_day,
})
return sorted(availability, key=itemgetter('day'))
def get_booking_occurrences(booking):
date_range = sorted(set(cand.start_dt.date() for cand in booking.occurrences))
occurrences = group_by_occurrence_date(booking.occurrences)
return date_range, occurrences
def check_room_available(room, start_dt, end_dt):
occurrences = get_existing_room_occurrences(room, start_dt, end_dt, allow_overlapping=True)
prebookings = [occ for occ in occurrences if not occ.reservation.is_accepted]
bookings = [occ for occ in occurrences if occ.reservation.is_accepted]
unbookable_hours = get_rooms_unbookable_hours([room]).get(room.id, [])
hours_overlap = any(hours for hours in unbookable_hours
if overlaps((start_dt.time(), end_dt.time()), (hours.start_time, hours.end_time)))
nonbookable_periods = any(get_rooms_nonbookable_periods([room], start_dt, end_dt))
blocked_rooms = get_rooms_blockings([room], start_dt, end_dt)
nonoverridable_blocked_rooms = filter_blocked_rooms(blocked_rooms, nonoverridable_only=True, explicit=True)
blocked_for_user = any(nonoverridable_blocked_rooms)
user_booking = any(booking for booking in bookings if booking.reservation.booked_for_id == session.user.id)
user_prebooking = any(prebooking for prebooking in prebookings
if prebooking.reservation.booked_for_id == session.user.id)
return {
'can_book': room.can_book(session.user, allow_admin=False),
'can_prebook': room.can_prebook(session.user, allow_admin=False),
'conflict_booking': any(bookings),
'conflict_prebooking': any(prebookings),
'unbookable': (hours_overlap or nonbookable_periods or blocked_for_user),
'user_booking': user_booking,
'user_prebooking': user_prebooking,
}
def create_booking_for_event(room_id, event):
try:
room = Room.get_one(room_id)
default_timezone = timezone(config.DEFAULT_TIMEZONE)
start_dt = event.start_dt.astimezone(default_timezone).replace(tzinfo=None)
end_dt = event.end_dt.astimezone(default_timezone).replace(tzinfo=None)
booking_reason = "Event '{}'".format(event.title)
data = dict(start_dt=start_dt, end_dt=end_dt, booked_for_user=event.creator, booking_reason=booking_reason,
repeat_frequency=RepeatFrequency.NEVER, event_id=event.id)
booking = Reservation.create_from_data(room, data, session.user, ignore_admin=True)
booking.linked_object = event
return booking
except NoReportError:
flash(_("Booking could not be created. Probably somebody else booked the room in the meantime."), 'error')
return None
def get_active_bookings(limit, start_dt, last_reservation_id=None, **filters):
criteria = [ReservationOccurrence.start_dt > start_dt]
if last_reservation_id is not None:
criteria.append(db.and_(db.cast(ReservationOccurrence.start_dt, db.Date) >= start_dt,
ReservationOccurrence.reservation_id > last_reservation_id))
query = (_bookings_query(filters, noload_room=True)
.filter(db.or_(*criteria))
.order_by(ReservationOccurrence.start_dt,
ReservationOccurrence.reservation_id,
db.func.indico.natsort(Room.full_name))
.limit(limit))
bookings, total = with_total_rows(query)
rows_left = total - limit if total > limit else total
return group_by_occurrence_date(query, sort_by=lambda obj: (obj.start_dt, obj.reservation_id)), rows_left
def has_same_dates(old_booking, new_booking):
return (old_booking.start_dt == new_booking['start_dt'] and
old_booking.end_dt == new_booking['end_dt'] and
old_booking.repeat_interval == new_booking['repeat_interval'] and
old_booking.repeat_frequency == new_booking['repeat_frequency'])
def has_same_slots(old_booking, new_booking):
if (
old_booking.repeat_interval != new_booking['repeat_interval']
or old_booking.repeat_frequency != new_booking['repeat_frequency']
):
return False
return old_booking.start_dt <= new_booking['start_dt'] and old_booking.end_dt >= new_booking['end_dt']
def should_split_booking(booking, new_data):
today = date.today()
is_ongoing_booking = booking.start_dt.date() < today < booking.end_dt.date()
old_start_time = booking.start_dt.time()
old_end_time = booking.end_dt.time()
old_repeat_frequency = booking.repeat_frequency
old_repeat_interval = booking.repeat_interval
times_changed = new_data['start_dt'].time() != old_start_time or new_data['end_dt'].time() != old_end_time
new_repeat_frequency = new_data['repeat_frequency']
new_repeat_interval = new_data['repeat_interval']
repetition_changed = (new_repeat_frequency, new_repeat_interval) != (old_repeat_frequency, old_repeat_interval)
return is_ongoing_booking and (times_changed or repetition_changed)
def split_booking(booking, new_booking_data):
is_ongoing_booking = booking.start_dt.date() < date.today() < booking.end_dt.date()
if not is_ongoing_booking:
return
cancelled_dates = []
rejected_occs = {}
room = booking.room
occurrences = sorted(booking.occurrences, key=attrgetter('start_dt'))
old_frequency = booking.repeat_frequency
occurrences_to_cancel = [occ for occ in occurrences if occ.start_dt >= datetime.now() and occ.is_valid]
if old_frequency != RepeatFrequency.NEVER and new_booking_data['repeat_frequency'] == RepeatFrequency.NEVER:
new_start_dt = new_booking_data['start_dt']
else:
new_start_dt = datetime.combine(occurrences_to_cancel[0].start_dt.date(), new_booking_data['start_dt'].time())
cancelled_dates = [occ.start_dt.date() for occ in occurrences if occ.is_cancelled]
rejected_occs = {occ.start_dt.date(): occ.rejection_reason for occ in occurrences if occ.is_rejected}
new_end_dt = [occ for occ in occurrences if occ.start_dt < datetime.now()][-1].end_dt
old_booking_data = {
'booking_reason': booking.booking_reason,
'booked_for_user': booking.booked_for_user,
'start_dt': booking.start_dt,
'end_dt': new_end_dt,
'repeat_frequency': booking.repeat_frequency,
'repeat_interval': booking.repeat_interval,
}
booking.modify(old_booking_data, session.user)
for occurrence_to_cancel in occurrences_to_cancel:
occurrence_to_cancel.cancel(session.user, silent=True)
prebook = not room.can_book(session.user, allow_admin=False) and room.can_prebook(session.user, allow_admin=False)
resv = Reservation.create_from_data(room, dict(new_booking_data, start_dt=new_start_dt), session.user,
prebook=prebook)
for new_occ in resv.occurrences:
new_occ_start = new_occ.start_dt.date()
if new_occ_start in cancelled_dates:
new_occ.cancel(None, silent=True)
if new_occ_start in rejected_occs:
new_occ.reject(None, rejected_occs[new_occ_start], silent=True)
booking.edit_logs.append(ReservationEditLog(user_name=session.user.full_name, info=[
'Split into a new booking',
'booking_link:{}'.format(resv.id)
]))
resv.edit_logs.append(ReservationEditLog(user_name=session.user.full_name, info=[
'Split from another booking',
'booking_link:{}'.format(booking.id)
]))
return resv
def get_matching_events(start_dt, end_dt, repeat_frequency, repeat_interval):
"""Get events suitable for booking linking.
This finds events that overlap with an occurrence of a booking
with the given dates where the user is a manager.
"""
occurrences = ReservationOccurrence.create_series(start_dt, end_dt, (repeat_frequency, repeat_interval))
excluded_categories = rb_settings.get('excluded_categories')
return (Event.query
.filter(~Event.is_deleted,
~Event.room_reservation_links.any(ReservationLink.reservation.has(Reservation.is_accepted)),
db.or_(Event.happens_between(server_to_utc(occ.start_dt), server_to_utc(occ.end_dt))
for occ in occurrences),
Event.timezone == config.DEFAULT_TIMEZONE,
db.and_(Event.category_id != cat.id for cat in excluded_categories),
Event.acl_entries.any(db.and_(EventPrincipal.type == PrincipalType.user,
EventPrincipal.user_id == session.user.id,
EventPrincipal.full_access)))
.all())
def get_booking_edit_calendar_data(booking, booking_changes):
"""Return calendar-related data for the booking edit modal."""
room = booking.room
booking_details = serialize_booking_details(booking)
old_date_range = booking_details['date_range']
booking_availability = dict(booking_details['occurrences'], candidates={}, conflicts={}, conflicting_candidates={},
pre_bookings={}, pre_conflicts={}, pending_cancellations={}, num_days_available=None,
num_conflicts=None)
response = {
'will_be_split': False,
'calendars': [{'date_range': old_date_range, 'data': booking_availability}]
}
cancelled_dates = [occ.start_dt.date() for occ in booking.occurrences if occ.is_cancelled]
rejected_dates = [occ.start_dt.date() for occ in booking.occurrences if occ.is_rejected]
if should_split_booking(booking, booking_changes):
old_frequency = booking.repeat_frequency
future_occurrences = [occ for occ in sorted(booking.occurrences, key=attrgetter('start_dt'))
if occ.start_dt >= datetime.now()]
if old_frequency != RepeatFrequency.NEVER and booking_changes['repeat_frequency'] == RepeatFrequency.NEVER:
cancelled_dates = []
rejected_dates = []
new_date_range, data = get_rooms_availability([room], skip_conflicts_with=[booking.id], **booking_changes)
else:
new_booking_start_dt = datetime.combine(future_occurrences[0].start_dt.date(),
booking_changes['start_dt'].time())
availability_filters = dict(booking_changes, start_dt=new_booking_start_dt)
new_date_range, data = get_rooms_availability([room], skip_conflicts_with=[booking.id],
**availability_filters)
for occ in booking.occurrences:
serialized = serialize_occurrences({occ.start_dt.date(): [occ]})
if occ in future_occurrences and occ.is_valid:
booking_availability['pending_cancellations'].update(serialized)
elif not occ.is_rejected and not occ.is_cancelled:
booking_availability['bookings'].update(serialized)
response['will_be_split'] = True
elif not has_same_dates(booking, booking_changes):
new_date_range, data = get_rooms_availability([room], skip_conflicts_with=[booking.id],
skip_past_conflicts=True, **booking_changes)
else:
return response
room_availability = data[room.id]
room_availability['cancellations'] = {}
room_availability['rejections'] = {}
others = defaultdict(list)
for k, v in chain(room_availability['bookings'].iteritems(), room_availability['pre_bookings'].iteritems()):
others[k].extend(v)
other_bookings = {dt: filter(lambda x: x.reservation.id != booking.id, other) for dt, other in others.iteritems()}
candidates = room_availability['candidates']
for dt, dt_candidates in candidates.iteritems():
if dt in cancelled_dates:
candidates[dt] = []
room_availability['cancellations'].update({dt: dt_candidates})
elif dt in rejected_dates:
candidates[dt] = []
room_availability['rejections'].update({dt: dt_candidates})
room_availability['num_days_available'] = (
len(new_date_range) -
len(room_availability['conflicts']) -
len(room_availability['cancellations']) -
len(room_availability['rejections'])
)
room_availability['num_conflicts'] = len(room_availability['conflicts'])
room_availability['bookings'] = {}
room_availability['other'] = serialize_occurrences(other_bookings)
room_availability['pending_cancellations'] = {}
response['calendars'].append({'date_range': new_date_range, 'data': serialize_availability(data)[room.id]})
return response
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from eventlet import event as grevent
import mock
import mox
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils as json
import six
from heat.common import context
from heat.common import exception
from heat.common import identifier
from heat.common import messaging
from heat.common import template_format
from heat.engine.cfn import template as cfntemplate
from heat.engine import dependencies
from heat.engine import environment
from heat.engine.hot import functions as hot_functions
from heat.engine.hot import template as hottemplate
from heat.engine import resource as res
from heat.engine import service
from heat.engine import stack as parser
from heat.engine import stack_lock
from heat.engine import template as templatem
from heat.objects import stack as stack_object
from heat.rpc import api as rpc_api
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import generic_resource as generic_rsrc
from heat.tests.nova import fakes as fakes_nova
from heat.tests import utils
cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config')
cfg.CONF.import_opt('enable_stack_abandon', 'heat.common.config')
wp_template_no_default = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"KeyName" : {
"Description" : "KeyName",
"Type" : "String"
}
},
"Resources" : {
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "wordpress"
}
}
}
}
'''
policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "alarming",
"Resources" : {
"WebServerScaleDownPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : "",
"Cooldown" : "60",
"ScalingAdjustment" : "-1"
}
},
"Random" : {
"Type" : "OS::Heat::RandomString"
}
}
}
'''
user_policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User",
"Properties" : {
"Policies" : [ { "Ref": "WebServerAccessPolicy"} ]
}
},
"WebServerAccessPolicy" : {
"Type" : "OS::Heat::AccessPolicy",
"Properties" : {
"AllowedResources" : [ "WebServer" ]
}
},
"HostKeys" : {
"Type" : "AWS::IAM::AccessKey",
"Properties" : {
"UserName" : {"Ref": "CfnUser"}
}
},
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "wordpress"
}
}
}
}
'''
server_config_template = '''
heat_template_version: 2013-05-23
resources:
WebServer:
type: OS::Nova::Server
'''
class StackCreateTest(common.HeatTestCase):
def setUp(self):
super(StackCreateTest, self).setUp()
def test_wordpress_single_instance_stack_create(self):
stack = tools.get_stack('test_stack', utils.dummy_context())
tools.setup_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
self.assertIsNotNone(stack['WebServer'])
self.assertTrue(int(stack['WebServer'].resource_id) > 0)
self.assertNotEqual(stack['WebServer'].ipaddress, '0.0.0.0')
def test_wordpress_single_instance_stack_adopt(self):
t = template_format.parse(tools.wp_template)
template = templatem.Template(t)
ctx = utils.dummy_context()
adopt_data = {
'resources': {
'WebServer': {
'resource_id': 'test-res-id'
}
}
}
stack = parser.Stack(ctx,
'test_stack',
template,
adopt_stack_data=adopt_data)
tools.setup_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.adopt()
self.assertIsNotNone(stack['WebServer'])
self.assertEqual('test-res-id', stack['WebServer'].resource_id)
self.assertEqual((stack.ADOPT, stack.COMPLETE), stack.state)
def test_wordpress_single_instance_stack_adopt_fail(self):
t = template_format.parse(tools.wp_template)
template = templatem.Template(t)
ctx = utils.dummy_context()
adopt_data = {
'resources': {
'WebServer1': {
'resource_id': 'test-res-id'
}
}
}
stack = parser.Stack(ctx,
'test_stack',
template,
adopt_stack_data=adopt_data)
tools.setup_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.adopt()
self.assertIsNotNone(stack['WebServer'])
expected = ('Resource ADOPT failed: Exception: resources.WebServer: '
'Resource ID was not provided.')
self.assertEqual(expected, stack.status_reason)
self.assertEqual((stack.ADOPT, stack.FAILED), stack.state)
def test_wordpress_single_instance_stack_delete(self):
ctx = utils.dummy_context()
stack = tools.get_stack('test_stack', ctx)
fc = tools.setup_mocks(self.m, stack, mock_keystone=False)
self.m.ReplayAll()
stack_id = stack.store()
stack.create()
db_s = stack_object.Stack.get_by_id(ctx, stack_id)
self.assertIsNotNone(db_s)
self.assertIsNotNone(stack['WebServer'])
self.assertTrue(int(stack['WebServer'].resource_id) > 0)
self.patchobject(fc.servers, 'delete',
side_effect=fakes_nova.fake_exception())
stack.delete()
rsrc = stack['WebServer']
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.assertEqual((stack.DELETE, stack.COMPLETE), rsrc.state)
self.assertIsNone(stack_object.Stack.get_by_id(ctx, stack_id))
db_s.refresh()
self.assertEqual('DELETE', db_s.action)
self.assertEqual('COMPLETE', db_s.status, )
class StackServiceAdoptUpdateTest(common.HeatTestCase):
def setUp(self):
super(StackServiceAdoptUpdateTest, self).setUp()
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
self.man.thread_group_mgr = tools.DummyThreadGroupManager()
def _get_stack_adopt_data_and_template(self, environment=None):
template = {
"heat_template_version": "2013-05-23",
"parameters": {"app_dbx": {"type": "string"}},
"resources": {"res1": {"type": "GenericResourceType"}}}
adopt_data = {
"status": "COMPLETE",
"name": "rtrove1",
"environment": environment,
"template": template,
"action": "CREATE",
"id": "8532f0d3-ea84-444e-b2bb-2543bb1496a4",
"resources": {"res1": {
"status": "COMPLETE",
"name": "database_password",
"resource_id": "yBpuUROjfGQ2gKOD",
"action": "CREATE",
"type": "GenericResourceType",
"metadata": {}}}}
return template, adopt_data
def test_stack_adopt_with_params(self):
cfg.CONF.set_override('enable_stack_adopt', True)
environment = {'parameters': {"app_dbx": "test"}}
template, adopt_data = self._get_stack_adopt_data_and_template(
environment)
result = self.man.create_stack(self.ctx, "test_adopt_stack",
template, {}, None,
{'adopt_stack_data': str(adopt_data)})
stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id'])
self.assertEqual(template, stack.raw_template.template)
self.assertEqual(environment['parameters'],
stack.raw_template.environment['parameters'])
def test_stack_adopt_saves_input_params(self):
cfg.CONF.set_override('enable_stack_adopt', True)
environment = {'parameters': {"app_dbx": "foo"}}
input_params = {
"parameters": {"app_dbx": "bar"}
}
template, adopt_data = self._get_stack_adopt_data_and_template(
environment)
result = self.man.create_stack(self.ctx, "test_adopt_stack",
template, input_params, None,
{'adopt_stack_data': str(adopt_data)})
stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id'])
self.assertEqual(template, stack.raw_template.template)
self.assertEqual(input_params['parameters'],
stack.raw_template.environment['parameters'])
def test_stack_adopt_stack_state(self):
cfg.CONF.set_override('enable_stack_adopt', True)
env = {'parameters': {"app_dbx": "test"}}
template, adopt_data = self._get_stack_adopt_data_and_template(
env)
result = self.man.create_stack(self.ctx, "test_adopt_stack",
template, {}, None,
{'adopt_stack_data': str(adopt_data)})
stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id'])
self.assertEqual((parser.Stack.ADOPT, parser.Stack.COMPLETE),
(stack.action, stack.status))
def test_stack_adopt_disabled(self):
# to test disable stack adopt
cfg.CONF.set_override('enable_stack_adopt', False)
environment = {'parameters': {"app_dbx": "test"}}
template, adopt_data = self._get_stack_adopt_data_and_template(
environment)
ex = self.assertRaises(
dispatcher.ExpectedException,
self.man.create_stack,
self.ctx, "test_adopt_stack_disabled",
template, {}, None,
{'adopt_stack_data': str(adopt_data)})
self.assertEqual(exception.NotSupported, ex.exc_info[0])
self.assertIn('Stack Adopt', six.text_type(ex.exc_info[1]))
def _stub_update_mocks(self, stack_to_load, stack_to_return):
self.m.StubOutWithMock(parser, 'Stack')
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=stack_to_load
).AndReturn(stack_to_return)
self.m.StubOutWithMock(templatem, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
def test_stack_update(self):
stack_name = 'service_update_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = tools.get_stack(stack_name, self.ctx)
sid = old_stack.store()
old_stack.set_stack_user_project_id('1234')
s = stack_object.Stack.get_by_id(self.ctx, sid)
stack = tools.get_stack(stack_name, self.ctx)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None,
env=stack.env).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t,
convergence=False,
current_traversal=None,
prev_raw_template_id=None,
current_deps=None,
disable_rollback=True,
nested_depth=0,
owner_id=None,
parent_resource=None,
stack_user_project_id='1234',
strict_validate=True,
tenant_id='test_tenant_id',
timeout_mins=60,
user_creds_id=u'1',
username='test_username').AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
evt_mock = self.m.CreateMockAnything()
self.m.StubOutWithMock(grevent, 'Event')
grevent.Event().AndReturn(evt_mock)
self.m.ReplayAll()
api_args = {'timeout_mins': 60}
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, api_args)
self.assertEqual(old_stack.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
self.assertEqual([evt_mock], self.man.thread_group_mgr.events)
self.m.VerifyAll()
def test_stack_update_existing_parameters(self):
'''Use a template with existing parameters, then update with a
template containing additional parameters and ensure all are preserved.
'''
stack_name = 'service_update_test_stack_existing_parameters'
update_params = {'encrypted_param_names': [],
'parameter_defaults': {},
'parameters': {'newparam': 123},
'resource_registry': {'resources': {}}}
api_args = {rpc_api.PARAM_TIMEOUT: 60,
rpc_api.PARAM_EXISTING: True}
t = template_format.parse(tools.wp_template)
stack = tools.get_stack(stack_name, self.ctx, with_params=True)
stack.store()
stack.set_stack_user_project_id('1234')
self.assertEqual({'KeyName': 'test'}, stack.t.env.params)
with mock.patch('heat.engine.stack.Stack') as mock_stack:
mock_stack.load.return_value = stack
mock_stack.validate.return_value = None
result = self.man.update_stack(self.ctx, stack.identifier(),
t,
update_params,
None, api_args)
tmpl = mock_stack.call_args[0][2]
self.assertEqual({'KeyName': 'test', 'newparam': 123},
tmpl.env.params)
self.assertEqual(stack.identifier(), result)
def test_stack_update_existing_parameters_remove(self):
'''Use a template with existing parameters, then update with a
template containing additional parameters and a list of
parameters to be removed.
'''
stack_name = 'service_update_test_stack_existing_parameters'
update_params = {'encrypted_param_names': [],
'parameter_defaults': {},
'parameters': {'newparam': 123},
'resource_registry': {'resources': {}}}
api_args = {rpc_api.PARAM_TIMEOUT: 60,
rpc_api.PARAM_EXISTING: True,
rpc_api.PARAM_CLEAR_PARAMETERS: ['removeme']}
t = template_format.parse(tools.wp_template)
t['parameters']['removeme'] = {'type': 'string'}
stack = utils.parse_stack(t, stack_name=stack_name,
params={'KeyName': 'test',
'removeme': 'foo'})
stack.set_stack_user_project_id('1234')
self.assertEqual({'KeyName': 'test', 'removeme': 'foo'},
stack.t.env.params)
with mock.patch('heat.engine.stack.Stack') as mock_stack:
mock_stack.load.return_value = stack
mock_stack.validate.return_value = None
result = self.man.update_stack(self.ctx, stack.identifier(),
t,
update_params,
None, api_args)
tmpl = mock_stack.call_args[0][2]
self.assertEqual({'KeyName': 'test', 'newparam': 123},
tmpl.env.params)
self.assertEqual(stack.identifier(), result)
def test_stack_update_existing_registry(self):
'''Use a template with existing flag and ensure the
environment registry is preserved.
'''
stack_name = 'service_update_test_stack_existing_registry'
intital_registry = {'OS::Foo': 'foo.yaml',
'OS::Foo2': 'foo2.yaml',
'resources': {
'myserver': {'OS::Server': 'myserver.yaml'}}}
intial_params = {'encrypted_param_names': [],
'parameter_defaults': {},
'parameters': {},
'resource_registry': intital_registry}
initial_files = {'foo.yaml': 'foo',
'foo2.yaml': 'foo2',
'myserver.yaml': 'myserver'}
update_registry = {'OS::Foo2': 'newfoo2.yaml',
'resources': {
'myother': {'OS::Other': 'myother.yaml'}}}
update_params = {'encrypted_param_names': [],
'parameter_defaults': {},
'parameters': {},
'resource_registry': update_registry}
update_files = {'newfoo2.yaml': 'newfoo',
'myother.yaml': 'myother'}
api_args = {rpc_api.PARAM_TIMEOUT: 60,
rpc_api.PARAM_EXISTING: True}
t = template_format.parse(tools.wp_template)
stack = utils.parse_stack(t, stack_name=stack_name,
params=intial_params,
files=initial_files)
stack.set_stack_user_project_id('1234')
self.assertEqual(intial_params,
stack.t.env.user_env_as_dict())
expected_reg = {'OS::Foo': 'foo.yaml',
'OS::Foo2': 'newfoo2.yaml',
'resources': {
'myother': {'OS::Other': 'myother.yaml'},
'myserver': {'OS::Server': 'myserver.yaml'}}}
expected_env = {'encrypted_param_names': [],
'parameter_defaults': {},
'parameters': {},
'resource_registry': expected_reg}
# FIXME(shardy): Currently we don't prune unused old files
expected_files = {'foo.yaml': 'foo',
'foo2.yaml': 'foo2',
'myserver.yaml': 'myserver',
'newfoo2.yaml': 'newfoo',
'myother.yaml': 'myother'}
with mock.patch('heat.engine.stack.Stack') as mock_stack:
mock_stack.load.return_value = stack
mock_stack.validate.return_value = None
result = self.man.update_stack(self.ctx, stack.identifier(),
t,
update_params,
update_files,
api_args)
tmpl = mock_stack.call_args[0][2]
self.assertEqual(expected_env,
tmpl.env.user_env_as_dict())
self.assertEqual(expected_files,
tmpl.files)
self.assertEqual(stack.identifier(), result)
def test_stack_update_existing_parameter_defaults(self):
'''Use a template with existing flag and ensure the
environment parameter_defaults are preserved.
'''
stack_name = 'service_update_test_stack_existing_param_defaults'
intial_params = {'encrypted_param_names': [],
'parameter_defaults': {'mydefault': 123},
'parameters': {},
'resource_registry': {}}
update_params = {'encrypted_param_names': [],
'parameter_defaults': {'default2': 456},
'parameters': {},
'resource_registry': {}}
api_args = {rpc_api.PARAM_TIMEOUT: 60,
rpc_api.PARAM_EXISTING: True}
t = template_format.parse(tools.wp_template)
stack = utils.parse_stack(t, stack_name=stack_name,
params=intial_params)
stack.set_stack_user_project_id('1234')
expected_env = {'encrypted_param_names': [],
'parameter_defaults': {
'mydefault': 123,
'default2': 456},
'parameters': {},
'resource_registry': {'resources': {}}}
with mock.patch('heat.engine.stack.Stack') as mock_stack:
mock_stack.load.return_value = stack
mock_stack.validate.return_value = None
result = self.man.update_stack(self.ctx, stack.identifier(),
t,
update_params,
None, api_args)
tmpl = mock_stack.call_args[0][2]
self.assertEqual(expected_env,
tmpl.env.user_env_as_dict())
self.assertEqual(stack.identifier(), result)
def test_stack_update_reuses_api_params(self):
stack_name = 'service_update_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = tools.get_stack(stack_name, self.ctx)
old_stack.timeout_mins = 1
old_stack.disable_rollback = False
sid = old_stack.store()
old_stack.set_stack_user_project_id('1234')
s = stack_object.Stack.get_by_id(self.ctx, sid)
stack = tools.get_stack(stack_name, self.ctx)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None,
env=stack.env).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t,
convergence=False, current_traversal=None,
prev_raw_template_id=None, current_deps=None,
disable_rollback=False, nested_depth=0,
owner_id=None, parent_resource=None,
stack_user_project_id='1234',
strict_validate=True,
tenant_id='test_tenant_id', timeout_mins=1,
user_creds_id=u'1',
username='test_username').AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.ReplayAll()
api_args = {}
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, api_args)
self.assertEqual(old_stack.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
self.m.VerifyAll()
def test_stack_cancel_update_same_engine(self):
stack_name = 'service_update_cancel_test_stack'
old_stack = tools.get_stack(stack_name, self.ctx)
old_stack.state_set(old_stack.UPDATE, old_stack.IN_PROGRESS,
'test_override')
old_stack.disable_rollback = False
old_stack.store()
load_mock = self.patchobject(parser.Stack, 'load')
load_mock.return_value = old_stack
lock_mock = self.patchobject(stack_lock.StackLock, 'try_acquire')
lock_mock.return_value = self.man.engine_id
self.patchobject(self.man.thread_group_mgr, 'send')
self.man.stack_cancel_update(self.ctx, old_stack.identifier(),
cancel_with_rollback=False)
self.man.thread_group_mgr.send.assert_called_once_with(old_stack.id,
'cancel')
def test_stack_cancel_update_different_engine(self):
stack_name = 'service_update_cancel_test_stack'
old_stack = tools.get_stack(stack_name, self.ctx)
old_stack.state_set(old_stack.UPDATE, old_stack.IN_PROGRESS,
'test_override')
old_stack.disable_rollback = False
old_stack.store()
load_mock = self.patchobject(parser.Stack, 'load')
load_mock.return_value = old_stack
lock_mock = self.patchobject(stack_lock.StackLock, 'try_acquire')
another_engine_has_lock = str(uuid.uuid4())
lock_mock.return_value = another_engine_has_lock
self.patchobject(stack_lock.StackLock,
'engine_alive').return_value(True)
self.man.listener = mock.Mock()
self.man.listener.SEND = 'send'
self.man._client = messaging.get_rpc_client(
version=self.man.RPC_API_VERSION)
# In fact the another engine is not alive, so the call will timeout
self.assertRaises(dispatcher.ExpectedException,
self.man.stack_cancel_update,
self.ctx, old_stack.identifier())
def test_stack_cancel_update_wrong_state_fails(self):
stack_name = 'service_update_cancel_test_stack'
old_stack = tools.get_stack(stack_name, self.ctx)
old_stack.state_set(old_stack.UPDATE, old_stack.COMPLETE,
'test_override')
old_stack.store()
load_mock = self.patchobject(parser.Stack, 'load')
load_mock.return_value = old_stack
ex = self.assertRaises(
dispatcher.ExpectedException,
self.man.stack_cancel_update, self.ctx, old_stack.identifier())
self.assertEqual(exception.NotSupported, ex.exc_info[0])
self.assertIn("Cancelling update when stack is "
"('UPDATE', 'COMPLETE')",
six.text_type(ex.exc_info[1]))
@mock.patch.object(stack_object.Stack, 'count_total_resources')
def test_stack_update_equals(self, ctr):
stack_name = 'test_stack_update_equals_resource_limit'
params = {}
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'},
'C': {'Type': 'GenericResourceType'}}}
template = templatem.Template(tpl)
old_stack = parser.Stack(self.ctx, stack_name, template)
sid = old_stack.store()
old_stack.set_stack_user_project_id('1234')
s = stack_object.Stack.get_by_id(self.ctx, sid)
ctr.return_value = 3
stack = parser.Stack(self.ctx, stack_name, template)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None,
env=stack.env).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t,
convergence=False, current_traversal=None,
prev_raw_template_id=None, current_deps=None,
disable_rollback=True, nested_depth=0,
owner_id=None, parent_resource=None,
stack_user_project_id='1234', strict_validate=True,
tenant_id='test_tenant_id',
timeout_mins=60, user_creds_id=u'1',
username='test_username').AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.ReplayAll()
cfg.CONF.set_override('max_resources_per_stack', 3)
api_args = {'timeout_mins': 60}
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, api_args)
self.assertEqual(old_stack.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
root_stack_id = old_stack.root_stack_id()
self.assertEqual(3, old_stack.total_resources(root_stack_id))
self.m.VerifyAll()
def test_stack_update_stack_id_equal(self):
stack_name = 'test_stack_update_stack_id_equal'
tpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'AWS::StackId'}
}
}
}
}
template = templatem.Template(tpl)
create_stack = parser.Stack(self.ctx, stack_name, template)
sid = create_stack.store()
create_stack.create()
self.assertEqual((create_stack.CREATE, create_stack.COMPLETE),
create_stack.state)
s = stack_object.Stack.get_by_id(self.ctx, sid)
old_stack = parser.Stack.load(self.ctx, stack=s)
self.assertEqual((old_stack.CREATE, old_stack.COMPLETE),
old_stack.state)
self.assertEqual(create_stack.identifier().arn(),
old_stack['A'].properties['Foo'])
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(
self.ctx,
stack=s).AndReturn(old_stack)
self.m.ReplayAll()
result = self.man.update_stack(self.ctx, create_stack.identifier(),
tpl, {}, None, {})
self.assertEqual((old_stack.UPDATE, old_stack.COMPLETE),
old_stack.state)
self.assertEqual(create_stack.identifier(), result)
self.assertIsNotNone(create_stack.identifier().stack_id)
self.assertEqual(create_stack.identifier().arn(),
old_stack['A'].properties['Foo'])
self.assertEqual(create_stack['A'].id, old_stack['A'].id)
self.m.VerifyAll()
def test_stack_update_exceeds_resource_limit(self):
stack_name = 'test_stack_update_exceeds_resource_limit'
params = {}
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'},
'C': {'Type': 'GenericResourceType'}}}
template = templatem.Template(tpl)
old_stack = parser.Stack(self.ctx, stack_name, template)
sid = old_stack.store()
self.assertIsNotNone(sid)
cfg.CONF.set_override('max_resources_per_stack', 2)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.update_stack, self.ctx,
old_stack.identifier(), tpl, params,
None, {})
self.assertEqual(exception.RequestLimitExceeded, ex.exc_info[0])
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
six.text_type(ex.exc_info[1]))
def test_stack_update_verify_err(self):
stack_name = 'service_update_verify_err_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = tools.get_stack(stack_name, self.ctx)
old_stack.store()
sid = old_stack.store()
old_stack.set_stack_user_project_id('1234')
s = stack_object.Stack.get_by_id(self.ctx, sid)
stack = tools.get_stack(stack_name, self.ctx)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None,
env=stack.env).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t,
convergence=False, current_traversal=None,
prev_raw_template_id=None, current_deps=None,
disable_rollback=True, nested_depth=0,
owner_id=None, parent_resource=None,
stack_user_project_id='1234', strict_validate=True,
tenant_id='test_tenant_id',
timeout_mins=60, user_creds_id=u'1',
username='test_username').AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndRaise(exception.StackValidationFailed(
message='fubar'))
self.m.ReplayAll()
api_args = {'timeout_mins': 60}
ex = self.assertRaises(
dispatcher.ExpectedException,
self.man.update_stack,
self.ctx, old_stack.identifier(),
template, params, None, api_args)
self.assertEqual(exception.StackValidationFailed, ex.exc_info[0])
self.m.VerifyAll()
def test_stack_update_nonexist(self):
stack_name = 'service_update_nonexist_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stack = tools.get_stack(stack_name, self.ctx)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.update_stack,
self.ctx, stack.identifier(), template,
params, None, {})
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
self.m.VerifyAll()
def test_stack_update_no_credentials(self):
cfg.CONF.set_default('deferred_auth_method', 'password')
stack_name = 'test_stack_update_no_credentials'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = tools.get_stack(stack_name, self.ctx)
# force check for credentials on create
old_stack['WebServer'].requires_deferred_auth = True
sid = old_stack.store()
old_stack.set_stack_user_project_id('1234')
s = stack_object.Stack.get_by_id(self.ctx, sid)
self.ctx = utils.dummy_context(password=None)
self.m.StubOutWithMock(self.man, '_get_stack')
self.man._get_stack(self.ctx, old_stack.identifier()).AndReturn(s)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None,
env=old_stack.env).AndReturn(old_stack.t)
environment.Environment(params).AndReturn(old_stack.env)
parser.Stack(self.ctx, old_stack.name,
old_stack.t,
convergence=False,
current_traversal=None,
prev_raw_template_id=None,
current_deps=None,
disable_rollback=True,
nested_depth=0,
owner_id=None,
parent_resource=None,
stack_user_project_id='1234',
strict_validate=True,
tenant_id='test_tenant_id',
timeout_mins=60,
user_creds_id=u'1',
username='test_username').AndReturn(old_stack)
self.m.ReplayAll()
api_args = {'timeout_mins': 60}
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.update_stack, self.ctx,
old_stack.identifier(),
template, params, None, api_args)
self.assertEqual(exception.MissingCredentialError, ex.exc_info[0])
self.assertEqual(
'Missing required credential: X-Auth-Key',
six.text_type(ex.exc_info[1]))
self.m.VerifyAll()
class StackConvergenceServiceCreateUpdateTest(common.HeatTestCase):
def setUp(self):
super(StackConvergenceServiceCreateUpdateTest, self).setUp()
cfg.CONF.set_override('convergence_engine', True)
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
def _stub_update_mocks(self, stack_to_load, stack_to_return):
self.m.StubOutWithMock(parser, 'Stack')
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=stack_to_load
).AndReturn(stack_to_return)
self.m.StubOutWithMock(templatem, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
def _test_stack_create_convergence(self, stack_name):
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stack = tools.get_stack(stack_name, self.ctx,
template=tools.string_template_five,
convergence=True)
self.m.StubOutWithMock(templatem, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
self.m.StubOutWithMock(parser, 'Stack')
templatem.Template(template, files=None,
env=stack.env).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, owner_id=None,
parent_resource=None,
nested_depth=0, user_creds_id=None,
stack_user_project_id=None,
timeout_mins=60,
disable_rollback=False,
convergence=True).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.ReplayAll()
api_args = {'timeout_mins': 60, 'disable_rollback': False}
result = self.man.create_stack(self.ctx, 'service_create_test_stack',
template, params, None, api_args)
db_stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id'])
self.assertEqual(db_stack.convergence, True)
self.assertEqual(result['stack_id'], db_stack.id)
self.m.VerifyAll()
def test_stack_create_enabled_convergence_engine(self):
stack_name = 'service_create_test_stack'
self._test_stack_create_convergence(stack_name)
def test_stack_update_enabled_convergence_engine(self):
stack_name = 'service_update_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = tools.get_stack(stack_name, self.ctx,
template=tools.string_template_five,
convergence=True)
old_stack.timeout_mins = 1
sid = old_stack.store()
s = stack_object.Stack.get_by_id(self.ctx, sid)
stack = tools.get_stack(stack_name, self.ctx,
template=tools.string_template_five_update,
convergence=True)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None,
env=stack.env).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t,
owner_id=old_stack.owner_id,
nested_depth=old_stack.nested_depth,
user_creds_id=old_stack.user_creds_id,
stack_user_project_id=old_stack.stack_user_project_id,
timeout_mins=60,
disable_rollback=False,
parent_resource=None,
strict_validate=True,
tenant_id=old_stack.tenant_id,
username=old_stack.username,
convergence=old_stack.convergence,
current_traversal=old_stack.current_traversal,
prev_raw_template_id=old_stack.prev_raw_template_id,
current_deps=old_stack.current_deps).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.ReplayAll()
api_args = {'timeout_mins': 60, 'disable_rollback': False}
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, api_args)
self.assertEqual(old_stack.convergence, True)
self.assertEqual(old_stack.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
self.m.VerifyAll()
class StackServiceAuthorizeTest(common.HeatTestCase):
def setUp(self):
super(StackServiceAuthorizeTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_service_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.engine_id = 'engine-fake-uuid'
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
@tools.stack_context('service_authorize_stack_user_nocreds_test_stack')
def test_stack_authorize_stack_user_nocreds(self):
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
@tools.stack_context('service_authorize_user_attribute_error_test_stack')
def test_stack_authorize_stack_user_attribute_error(self):
self.m.StubOutWithMock(json, 'loads')
json.loads(None).AndRaise(AttributeError)
self.m.ReplayAll()
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
self.m.VerifyAll()
@tools.stack_context('service_authorize_stack_user_type_error_test_stack')
def test_stack_authorize_stack_user_type_error(self):
self.m.StubOutWithMock(json, 'loads')
json.loads(mox.IgnoreArg()).AndRaise(TypeError)
self.m.ReplayAll()
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
self.m.VerifyAll()
def test_stack_authorize_stack_user(self):
self.ctx = utils.dummy_context()
self.ctx.aws_creds = '{"ec2Credentials": {"access": "4567"}}'
stack_name = 'stack_authorize_stack_user'
stack = tools.get_stack(stack_name, self.ctx, user_policy_template)
self.stack = stack
fc = tools.setup_mocks(self.m, stack)
self.patchobject(fc.servers, 'delete',
side_effect=fakes_nova.fake_exception())
self.m.ReplayAll()
stack.store()
stack.create()
self.assertTrue(self.eng._authorize_stack_user(
self.ctx, self.stack, 'WebServer'))
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'CfnUser'))
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'NoSuchResource'))
self.m.VerifyAll()
def test_stack_authorize_stack_user_user_id(self):
self.ctx = utils.dummy_context(user_id=str(uuid.uuid4()))
stack_name = 'stack_authorize_stack_user_user_id'
stack = tools.get_stack(stack_name, self.ctx, server_config_template)
self.stack = stack
def handler(resource_name):
return resource_name == 'WebServer'
self.stack.register_access_allowed_handler(self.ctx.user_id, handler)
# matching credential_id and resource_name
self.assertTrue(self.eng._authorize_stack_user(
self.ctx, self.stack, 'WebServer'))
# not matching resource_name
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'NoSuchResource'))
# not matching credential_id
self.ctx.user_id = str(uuid.uuid4())
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'WebServer'))
class StackServiceTest(common.HeatTestCase):
def setUp(self):
super(StackServiceTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_service_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.thread_group_mgr = tools.DummyThreadGroupManager()
self.eng.engine_id = 'engine-fake-uuid'
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
@tools.stack_context('service_identify_test_stack', False)
def test_stack_identify(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
identity = self.eng.identify_stack(self.ctx, self.stack.name)
self.assertEqual(self.stack.identifier(), identity)
self.m.VerifyAll()
@tools.stack_context('ef0c41a4-644f-447c-ad80-7eecb0becf79', False)
def test_stack_identify_by_name_in_uuid(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
identity = self.eng.identify_stack(self.ctx, self.stack.name)
self.assertEqual(self.stack.identifier(), identity)
self.m.VerifyAll()
@tools.stack_context('service_identify_uuid_test_stack', False)
def test_stack_identify_uuid(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
identity = self.eng.identify_stack(self.ctx, self.stack.id)
self.assertEqual(self.stack.identifier(), identity)
self.m.VerifyAll()
def test_stack_identify_nonexist(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.identify_stack, self.ctx, 'wibble')
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
@tools.stack_context('service_create_existing_test_stack', False)
def test_stack_create_existing(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.create_stack, self.ctx,
self.stack.name, self.stack.t.t, {}, None, {})
self.assertEqual(exception.StackExists, ex.exc_info[0])
@tools.stack_context('service_name_tenants_test_stack', False)
def test_stack_by_name_tenants(self):
self.assertEqual(
self.stack.id,
stack_object.Stack.get_by_name(self.ctx, self.stack.name).id)
ctx2 = utils.dummy_context(tenant_id='stack_service_test_tenant2')
self.assertIsNone(stack_object.Stack.get_by_name(
ctx2,
self.stack.name))
@tools.stack_context('service_list_all_test_stack')
def test_stack_list_all(self):
self.m.StubOutWithMock(parser.Stack, '_from_db')
parser.Stack._from_db(
self.ctx, mox.IgnoreArg(),
resolve_data=False
).AndReturn(self.stack)
self.m.ReplayAll()
sl = self.eng.list_stacks(self.ctx)
self.assertEqual(1, len(sl))
for s in sl:
self.assertIn('creation_time', s)
self.assertIn('updated_time', s)
self.assertIn('stack_identity', s)
self.assertIsNotNone(s['stack_identity'])
self.assertIn('stack_name', s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertIn('stack_status', s)
self.assertIn('stack_status_reason', s)
self.assertIn('description', s)
self.assertIn('WordPress', s['description'])
self.m.VerifyAll()
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_passes_marker_info(self, mock_stack_get_all):
limit = object()
marker = object()
sort_keys = object()
sort_dir = object()
self.eng.list_stacks(self.ctx, limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir)
mock_stack_get_all.assert_called_once_with(self.ctx,
limit,
sort_keys,
marker,
sort_dir,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_passes_filtering_info(self, mock_stack_get_all):
filters = {'foo': 'bar'}
self.eng.list_stacks(self.ctx, filters=filters)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
filters,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_tenant_safe_defaults_to_true(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_passes_tenant_safe_info(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, tenant_safe=False)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
False,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_show_nested(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, show_nested=True)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_show_deleted(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, show_deleted=True)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_show_hidden(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, show_hidden=True)
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_tags(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, tags=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
['foo', 'bar'],
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_tags_any(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, tags_any=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
['foo', 'bar'],
mock.ANY,
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_not_tags(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, not_tags=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
['foo', 'bar'],
mock.ANY,
)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_not_tags_any(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, not_tags_any=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
['foo', 'bar'],
)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stacks_passes_filter_info(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, filters={'foo': 'bar'})
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters={'foo': 'bar'},
tenant_safe=mock.ANY,
show_deleted=False,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stacks_tenant_safe_default_true(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=False,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stacks_passes_tenant_safe_info(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, tenant_safe=False)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=False,
show_deleted=False,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stacks_show_nested(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, show_nested=True)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=False,
show_nested=True,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stack_show_deleted(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, show_deleted=True)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stack_show_hidden(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, show_hidden=True)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=False,
show_nested=False,
show_hidden=True,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@tools.stack_context('service_abandon_stack')
def test_abandon_stack(self):
cfg.CONF.set_override('enable_stack_abandon', True)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
expected_res = {
u'WebServer': {
'action': 'CREATE',
'metadata': {},
'name': u'WebServer',
'resource_data': {},
'resource_id': '9999',
'status': 'COMPLETE',
'type': u'AWS::EC2::Instance'}}
self.m.ReplayAll()
ret = self.eng.abandon_stack(self.ctx, self.stack.identifier())
self.assertEqual(10, len(ret))
self.assertEqual('CREATE', ret['action'])
self.assertEqual('COMPLETE', ret['status'])
self.assertEqual('service_abandon_stack', ret['name'])
self.assertEqual({}, ret['files'])
self.assertIn('id', ret)
self.assertEqual(expected_res, ret['resources'])
self.assertEqual(self.stack.t.t, ret['template'])
self.assertIn('project_id', ret)
self.assertIn('stack_user_project_id', ret)
self.assertIn('environment', ret)
self.assertIn('files', ret)
self.m.VerifyAll()
def test_stack_describe_nonexistent(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.StackNotFound(stack_name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier,
show_deleted=True).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.show_stack,
self.ctx, non_exist_identifier)
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
self.m.VerifyAll()
def test_stack_describe_bad_tenant(self):
non_exist_identifier = identifier.HeatIdentifier(
'wibble', 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
invalid_tenant_exc = exception.InvalidTenant(target='test',
actual='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier,
show_deleted=True).AndRaise(invalid_tenant_exc)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.show_stack,
self.ctx, non_exist_identifier)
self.assertEqual(exception.InvalidTenant, ex.exc_info[0])
self.m.VerifyAll()
@tools.stack_context('service_describe_test_stack', False)
def test_stack_describe(self):
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier(),
show_deleted=True).AndReturn(s)
self.m.ReplayAll()
sl = self.eng.show_stack(self.ctx, self.stack.identifier())
self.assertEqual(1, len(sl))
s = sl[0]
self.assertIn('creation_time', s)
self.assertIn('updated_time', s)
self.assertIn('stack_identity', s)
self.assertIsNotNone(s['stack_identity'])
self.assertIn('stack_name', s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertIn('stack_status', s)
self.assertIn('stack_status_reason', s)
self.assertIn('description', s)
self.assertIn('WordPress', s['description'])
self.assertIn('parameters', s)
self.m.VerifyAll()
@tools.stack_context('service_describe_all_test_stack', False)
def test_stack_describe_all(self):
sl = self.eng.show_stack(self.ctx, None)
self.assertEqual(1, len(sl))
s = sl[0]
self.assertIn('creation_time', s)
self.assertIn('updated_time', s)
self.assertIn('stack_identity', s)
self.assertIsNotNone(s['stack_identity'])
self.assertIn('stack_name', s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertIn('stack_status', s)
self.assertIn('stack_status_reason', s)
self.assertIn('description', s)
self.assertIn('WordPress', s['description'])
self.assertIn('parameters', s)
@mock.patch('heat.engine.template._get_template_extension_manager')
def test_list_template_versions(self, templ_mock):
class DummyMgr(object):
def names(self):
return ['a.b', 'c.d']
def __getitem__(self, item):
m = mock.MagicMock()
if item == 'a.b':
m.plugin = cfntemplate.CfnTemplate
return m
else:
m.plugin = hottemplate.HOTemplate20130523
return m
templ_mock.return_value = DummyMgr()
templates = self.eng.list_template_versions(self.ctx)
expected = [{'version': 'a.b', 'type': 'cfn'},
{'version': 'c.d', 'type': 'hot'}]
self.assertEqual(expected, templates)
@mock.patch('heat.engine.template._get_template_extension_manager')
def test_list_template_functions(self, templ_mock):
class DummyFunc1(object):
"""
Dummy Func1
Dummy Func1 Long Description
"""
class DummyFunc2(object):
"""Dummy Func2
Dummy Func2 Long Description
"""
plugin_mock = mock.Mock(
functions={'dummy1': DummyFunc1,
'dummy2': DummyFunc2,
'removed': hot_functions.Removed})
dummy_tmpl = mock.Mock(plugin=plugin_mock)
class DummyMgr(object):
def __getitem__(self, item):
return dummy_tmpl
templ_mock.return_value = DummyMgr()
functions = self.eng.list_template_functions(self.ctx, 'dummytemplate')
expected = [{'functions': 'dummy1',
'description': 'Dummy Func1'},
{'functions': 'dummy2',
'description': 'Dummy Func2'}]
self.assertEqual(sorted(expected, key=lambda k: k['functions']),
sorted(functions, key=lambda k: k['functions']))
def _test_describe_stack_resource(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('metadata', r)
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertIn('attributes', r)
self.assertEqual('WebServer', r['resource_name'])
self.m.VerifyAll()
@tools.stack_context('service_stack_resource_describe__test_stack')
def test_stack_resource_describe(self):
self._test_describe_stack_resource()
def test_stack_resource_describe_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id,
'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.StackNotFound(stack_name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
self.m.VerifyAll()
@tools.stack_context('service_resource_describe_nonexist_test_stack')
def test_stack_resource_describe_nonexist_resource(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
self.m.VerifyAll()
@tools.stack_context('service_resource_describe_noncreated_test_stack',
create_res=False)
def test_stack_resource_describe_noncreated_resource(self):
self._test_describe_stack_resource()
@tools.stack_context('service_resource_describe_user_deny_test_stack')
def test_stack_resource_describe_stack_user_deny(self):
self.ctx.roles = [cfg.CONF.heat_stack_user_role]
self.m.StubOutWithMock(service.EngineService, '_authorize_stack_user')
service.EngineService._authorize_stack_user(self.ctx, mox.IgnoreArg(),
'foo').AndReturn(False)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.Forbidden, ex.exc_info[0])
self.m.VerifyAll()
@tools.stack_context('service_resources_describe_test_stack')
def test_stack_resources_describe(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
'WebServer')
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
self.m.VerifyAll()
@tools.stack_context('service_resources_describe_no_filter_test_stack')
def test_stack_resources_describe_no_filter(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
None)
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
self.m.VerifyAll()
def test_stack_resources_describe_bad_lookup(self):
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, None).AndRaise(TypeError)
self.m.ReplayAll()
self.assertRaises(TypeError,
self.eng.describe_stack_resources,
self.ctx, None, 'WebServer')
self.m.VerifyAll()
def test_stack_resources_describe_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resources,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
@tools.stack_context('find_phys_res_stack')
def test_find_physical_resource(self):
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
None)
phys_id = resources[0]['physical_resource_id']
result = self.eng.find_physical_resource(self.ctx, phys_id)
self.assertIsInstance(result, dict)
resource_identity = identifier.ResourceIdentifier(**result)
self.assertEqual(self.stack.identifier(), resource_identity.stack())
self.assertEqual('WebServer', resource_identity.resource_name)
def test_find_physical_resource_nonexist(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.find_physical_resource,
self.ctx, 'foo')
self.assertEqual(exception.PhysicalResourceNotFound, ex.exc_info[0])
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_list(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier())
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('updated_time', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.m.VerifyAll()
@mock.patch.object(parser.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack_with_depth')
def test_stack_resources_list_with_depth(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
2)
self.stack.iter_resources.assert_called_once_with(2)
@mock.patch.object(parser.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack_with_max_depth')
def test_stack_resources_list_with_max_depth(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
99)
max_depth = cfg.CONF.max_nested_stack_depth
self.stack.iter_resources.assert_called_once_with(max_depth)
@mock.patch.object(parser.Stack, 'load')
def test_stack_resources_list_deleted_stack(self, mock_load):
stack = tools.setup_stack('resource_list_deleted_stack', self.ctx)
stack_id = stack.identifier()
mock_load.return_value = stack
tools.clean_up_stack(stack)
resources = self.eng.list_stack_resources(self.ctx, stack_id)
self.assertEqual(1, len(resources))
res = resources[0]
self.assertEqual('DELETE', res['resource_action'])
self.assertEqual('COMPLETE', res['resource_status'])
def test_stack_resources_list_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.StackNotFound(stack_name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier, show_deleted=True
).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.list_stack_resources,
self.ctx, non_exist_identifier)
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
self.m.VerifyAll()
def test_signal_reception_async(self):
self.eng.thread_group_mgr = tools.DummyThreadGroupMgrLogStart()
stack_name = 'signal_reception_async'
stack = tools.get_stack(stack_name, self.ctx, policy_template)
self.stack = stack
tools.setup_keystone_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
test_data = {'food': 'yum'}
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.ReplayAll()
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.assertEqual([(self.stack.id, mox.IgnoreArg())],
self.eng.thread_group_mgr.started)
self.m.VerifyAll()
def test_signal_reception_sync(self):
stack_name = 'signal_reception_sync'
stack = tools.get_stack(stack_name, self.ctx, policy_template)
self.stack = stack
tools.setup_keystone_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
test_data = {'food': 'yum'}
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.StubOutWithMock(res.Resource, 'signal')
res.Resource.signal(mox.IgnoreArg()).AndReturn(None)
self.m.ReplayAll()
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data,
sync_call=True)
self.m.VerifyAll()
def test_signal_reception_no_resource(self):
stack_name = 'signal_reception_no_resource'
stack = tools.get_stack(stack_name, self.ctx, policy_template)
tools.setup_keystone_mocks(self.m, stack)
self.stack = stack
self.m.ReplayAll()
stack.store()
stack.create()
test_data = {'food': 'yum'}
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'resource_does_not_exist',
test_data)
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
self.m.VerifyAll()
def test_signal_reception_unavailable_resource(self):
stack_name = 'signal_reception_unavailable_resource'
stack = tools.get_stack(stack_name, self.ctx, policy_template)
stack.store()
self.stack = stack
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(
self.ctx, stack=mox.IgnoreArg(),
use_stored_context=mox.IgnoreArg()
).AndReturn(self.stack)
self.m.ReplayAll()
test_data = {'food': 'yum'}
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.assertEqual(exception.ResourceNotAvailable, ex.exc_info[0])
self.m.VerifyAll()
def test_signal_returns_metadata(self):
stack = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stack
tools.setup_keystone_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
test_metadata = {'food': 'yum'}
rsrc = stack['WebServerScaleDownPolicy']
rsrc.metadata_set(test_metadata)
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.StubOutWithMock(res.Resource, 'signal')
res.Resource.signal(mox.IgnoreArg()).AndReturn(None)
self.m.ReplayAll()
md = self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
self.assertEqual(test_metadata, md)
self.m.VerifyAll()
def test_signal_calls_metadata_update(self):
stack = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stack
tools.setup_keystone_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.StubOutWithMock(res.Resource, 'signal')
res.Resource.signal(mox.IgnoreArg()).AndReturn(None)
self.m.StubOutWithMock(res.Resource, 'metadata_update')
# this will be called once for the Random resource
res.Resource.metadata_update().AndReturn(None)
self.m.ReplayAll()
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
self.m.VerifyAll()
def test_signal_no_calls_metadata_update(self):
stack = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stack
tools.setup_keystone_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
res.Resource.signal_needs_metadata_updates = False
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.StubOutWithMock(res.Resource, 'signal')
res.Resource.signal(mox.IgnoreArg()).AndReturn(None)
# this will never be called
self.m.StubOutWithMock(res.Resource, 'metadata_update')
self.m.ReplayAll()
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
self.m.VerifyAll()
res.Resource.signal_needs_metadata_updates = True
def test_stack_list_all_empty(self):
sl = self.eng.list_stacks(self.ctx)
self.assertEqual(0, len(sl))
def test_stack_describe_all_empty(self):
sl = self.eng.show_stack(self.ctx, None)
self.assertEqual(0, len(sl))
def test_lazy_load_resources(self):
stack_name = 'lazy_load_test'
lazy_load_template = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'foo'},
}
}
}
}
templ = templatem.Template(lazy_load_template)
stack = parser.Stack(self.ctx, stack_name, templ)
self.assertIsNone(stack._resources)
self.assertIsNone(stack._dependencies)
resources = stack.resources
self.assertIsInstance(resources, dict)
self.assertEqual(2, len(resources))
self.assertIsInstance(resources.get('foo'),
generic_rsrc.GenericResource)
self.assertIsInstance(resources.get('bar'),
generic_rsrc.ResourceWithProps)
stack_dependencies = stack.dependencies
self.assertIsInstance(stack_dependencies, dependencies.Dependencies)
self.assertEqual(2, len(stack_dependencies.graph()))
def _preview_stack(self):
res._register_class('GenericResource1', generic_rsrc.GenericResource)
res._register_class('GenericResource2', generic_rsrc.GenericResource)
args = {}
params = {}
files = None
stack_name = 'SampleStack'
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Description': 'Lorem ipsum.',
'Resources': {
'SampleResource1': {'Type': 'GenericResource1'},
'SampleResource2': {'Type': 'GenericResource2'}}}
return self.eng.preview_stack(self.ctx, stack_name, tpl,
params, files, args)
def test_preview_stack_returns_a_stack(self):
stack = self._preview_stack()
expected_identity = {'path': '',
'stack_id': 'None',
'stack_name': 'SampleStack',
'tenant': 'stack_service_test_tenant'}
self.assertEqual(expected_identity, stack['stack_identity'])
self.assertEqual('SampleStack', stack['stack_name'])
self.assertEqual('Lorem ipsum.', stack['description'])
def test_preview_stack_returns_list_of_resources_in_stack(self):
stack = self._preview_stack()
self.assertIsInstance(stack['resources'], list)
self.assertEqual(2, len(stack['resources']))
resource_types = set(r['resource_type'] for r in stack['resources'])
self.assertIn('GenericResource1', resource_types)
self.assertIn('GenericResource2', resource_types)
resource_names = set(r['resource_name'] for r in stack['resources'])
self.assertIn('SampleResource1', resource_names)
self.assertIn('SampleResource2', resource_names)
def test_preview_stack_validates_new_stack(self):
exc = exception.StackExists(stack_name='Validation Failed')
self.eng._validate_new_stack = mock.Mock(side_effect=exc)
ex = self.assertRaises(dispatcher.ExpectedException,
self._preview_stack)
self.assertEqual(exception.StackExists, ex.exc_info[0])
@mock.patch.object(service.api, 'format_stack_preview', new=mock.Mock())
@mock.patch.object(service.parser, 'Stack')
def test_preview_stack_checks_stack_validity(self, mock_parser):
exc = exception.StackValidationFailed(message='Validation Failed')
mock_parsed_stack = mock.Mock()
mock_parsed_stack.validate.side_effect = exc
mock_parser.return_value = mock_parsed_stack
ex = self.assertRaises(dispatcher.ExpectedException,
self._preview_stack)
self.assertEqual(exception.StackValidationFailed, ex.exc_info[0])
@mock.patch.object(stack_object.Stack, 'get_by_name')
def test_validate_new_stack_checks_existing_stack(self, mock_stack_get):
mock_stack_get.return_value = 'existing_db_stack'
tmpl = templatem.Template(
{'HeatTemplateFormatVersion': '2012-12-12'})
self.assertRaises(exception.StackExists, self.eng._validate_new_stack,
self.ctx, 'test_existing_stack', tmpl)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_validate_new_stack_checks_stack_limit(self, mock_db_count):
cfg.CONF.set_override('max_stacks_per_tenant', 99)
mock_db_count.return_value = 99
template = templatem.Template(
{'HeatTemplateFormatVersion': '2012-12-12'})
self.assertRaises(exception.RequestLimitExceeded,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack', template)
def test_validate_new_stack_checks_incorrect_keywords_in_resource(self):
template = {'heat_template_version': '2013-05-23',
'resources': {
'Res': {'Type': 'GenericResource1'}}}
parsed_template = templatem.Template(template)
ex = self.assertRaises(exception.StackValidationFailed,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack',
parsed_template)
msg = (u'"Type" is not a valid keyword '
'inside a resource definition')
self.assertEqual(msg, six.text_type(ex))
def test_validate_new_stack_checks_incorrect_sections(self):
template = {'heat_template_version': '2013-05-23',
'unknown_section': {
'Res': {'Type': 'GenericResource1'}}}
parsed_template = templatem.Template(template)
ex = self.assertRaises(exception.StackValidationFailed,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack',
parsed_template)
msg = u'The template section is invalid: unknown_section'
self.assertEqual(msg, six.text_type(ex))
def test_validate_new_stack_checks_resource_limit(self):
cfg.CONF.set_override('max_resources_per_stack', 5)
template = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'Res1': {'Type': 'GenericResource1'},
'Res2': {'Type': 'GenericResource1'},
'Res3': {'Type': 'GenericResource1'},
'Res4': {'Type': 'GenericResource1'},
'Res5': {'Type': 'GenericResource1'},
'Res6': {'Type': 'GenericResource1'}}}
parsed_template = templatem.Template(template)
self.assertRaises(exception.RequestLimitExceeded,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack', parsed_template)
def test_validate_new_stack_handle_assertion_error(self):
tmpl = mock.MagicMock()
expected_message = 'Expected assertion error'
tmpl.validate.side_effect = AssertionError(expected_message)
exc = self.assertRaises(AssertionError, self.eng._validate_new_stack,
self.ctx, 'stack_name', tmpl)
self.assertEqual(expected_message, six.text_type(exc))
@mock.patch('heat.engine.service.ThreadGroupManager',
return_value=mock.Mock())
@mock.patch.object(stack_object.Stack, 'get_all')
@mock.patch('heat.engine.stack_lock.StackLock',
return_value=mock.Mock())
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(context, 'get_admin_context')
def test_engine_reset_stack_status(
self,
mock_admin_context,
mock_stack_load,
mock_stacklock,
mock_get_all,
mock_thread):
mock_admin_context.return_value = self.ctx
db_stack = mock.MagicMock()
db_stack.id = 'foo'
db_stack.status = 'IN_PROGRESS'
db_stack.status_reason = None
mock_get_all.return_value = [db_stack]
fake_stack = mock.MagicMock()
fake_stack.action = 'CREATE'
fake_stack.id = 'foo'
fake_stack.status = 'IN_PROGRESS'
fake_stack.state_set.return_value = None
mock_stack_load.return_value = fake_stack
fake_lock = mock.MagicMock()
fake_lock.get_engine_id.return_value = 'old-engine'
fake_lock.acquire.return_value = None
mock_stacklock.return_value = fake_lock
self.eng.thread_group_mgr = mock_thread
self.eng.reset_stack_status()
mock_admin_context.assert_called_once_with()
filters = {'status': parser.Stack.IN_PROGRESS}
mock_get_all.assert_called_once_with(self.ctx,
filters=filters,
tenant_safe=False)
mock_stack_load.assert_called_once_with(self.ctx,
stack=db_stack,
use_stored_context=True)
mock_thread.start_with_acquired_lock.assert_called_once_with(
fake_stack, fake_lock, fake_stack.state_set, fake_stack.action,
fake_stack.FAILED, 'Engine went down during stack CREATE'
)
|
|
from django.conf import settings
from django.core.management import call_command
from django.db import connection
from django.test.utils import _set_autocommit, TEST_DATABASE_PREFIX
import os, re, sys
def getstatusoutput(cmd):
"A simpler version of getstatusoutput that works on win32 platforms."
stdin, stdout, stderr = os.popen3(cmd)
output = stdout.read()
if output.endswith('\n'): output = output[:-1]
status = stdin.close()
return status, output
def create_lang(db_name, verbosity=1):
"Sets up the pl/pgsql language on the given database."
# Getting the command-line options for the shell command
options = get_cmd_options(db_name)
# Constructing the 'createlang' command.
createlang_cmd = 'createlang %splpgsql' % options
if verbosity >= 1: print createlang_cmd
# Must have database super-user privileges to execute createlang -- it must
# also be in your path.
status, output = getstatusoutput(createlang_cmd)
# Checking the status of the command, 0 => execution successful
if status:
raise Exception("Error executing 'plpgsql' command: %s\n" % output)
def _create_with_cursor(db_name, verbosity=1, autoclobber=False):
"Creates database with psycopg2 cursor."
# Constructing the necessary SQL to create the database (the DATABASE_USER
# must possess the privileges to create a database)
create_sql = 'CREATE DATABASE %s' % connection.ops.quote_name(db_name)
if settings.DATABASE_USER:
create_sql += ' OWNER %s' % settings.DATABASE_USER
cursor = connection.cursor()
_set_autocommit(connection)
try:
# Trying to create the database first.
cursor.execute(create_sql)
#print create_sql
except Exception, e:
# Drop and recreate, if necessary.
if not autoclobber:
confirm = raw_input("\nIt appears the database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % db_name)
if autoclobber or confirm == 'yes':
if verbosity >= 1: print 'Destroying old spatial database...'
drop_db(db_name)
if verbosity >= 1: print 'Creating new spatial database...'
cursor.execute(create_sql)
else:
raise Exception('Spatial Database Creation canceled.')
foo = _create_with_cursor
created_regex = re.compile(r'^createdb: database creation failed: ERROR: database ".+" already exists')
def _create_with_shell(db_name, verbosity=1, autoclobber=False):
"""
If no spatial database already exists, then using a cursor will not work.
Thus, a `createdb` command will be issued through the shell to bootstrap
creation of the spatial database.
"""
# Getting the command-line options for the shell command
options = get_cmd_options(False)
create_cmd = 'createdb -O %s %s%s' % (settings.DATABASE_USER, options, db_name)
if verbosity >= 1: print create_cmd
# Attempting to create the database.
status, output = getstatusoutput(create_cmd)
if status:
if created_regex.match(output):
if not autoclobber:
confirm = raw_input("\nIt appears the database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % db_name)
if autoclobber or confirm == 'yes':
if verbosity >= 1: print 'Destroying old spatial database...'
drop_cmd = 'dropdb %s%s' % (options, db_name)
status, output = getstatusoutput(drop_cmd)
if status != 0:
raise Exception('Could not drop database %s: %s' % (db_name, output))
if verbosity >= 1: print 'Creating new spatial database...'
status, output = getstatusoutput(create_cmd)
if status != 0:
raise Exception('Could not create database after dropping: %s' % output)
else:
raise Exception('Spatial Database Creation canceled.')
else:
raise Exception('Unknown error occurred in creating database: %s' % output)
def create_spatial_db(test=False, verbosity=1, autoclobber=False, interactive=False):
"Creates a spatial database based on the settings."
# Making sure we're using PostgreSQL and psycopg2
if settings.DATABASE_ENGINE != 'postgresql_psycopg2':
raise Exception('Spatial database creation only supported postgresql_psycopg2 platform.')
# Getting the spatial database name
if test:
db_name = get_spatial_db(test=True)
_create_with_cursor(db_name, verbosity=verbosity, autoclobber=autoclobber)
else:
db_name = get_spatial_db()
_create_with_shell(db_name, verbosity=verbosity, autoclobber=autoclobber)
# Creating the db language, does not need to be done on NT platforms
# since the PostGIS installer enables this capability.
if os.name != 'nt':
create_lang(db_name, verbosity=verbosity)
# Now adding in the PostGIS routines.
load_postgis_sql(db_name, verbosity=verbosity)
if verbosity >= 1: print 'Creation of spatial database %s successful.' % db_name
# Closing the connection
connection.close()
settings.DATABASE_NAME = db_name
# Syncing the database
call_command('syncdb', verbosity=verbosity, interactive=interactive)
def drop_db(db_name=False, test=False):
"""
Drops the given database (defaults to what is returned from
get_spatial_db()). All exceptions are propagated up to the caller.
"""
if not db_name: db_name = get_spatial_db(test=test)
cursor = connection.cursor()
cursor.execute('DROP DATABASE %s' % connection.ops.quote_name(db_name))
def get_cmd_options(db_name):
"Obtains the command-line PostgreSQL connection options for shell commands."
# The db_name parameter is optional
options = ''
if db_name:
options += '-d %s ' % db_name
if settings.DATABASE_USER:
options += '-U %s ' % settings.DATABASE_USER
if settings.DATABASE_HOST:
options += '-h %s ' % settings.DATABASE_HOST
if settings.DATABASE_PORT:
options += '-p %s ' % settings.DATABASE_PORT
return options
def get_spatial_db(test=False):
"""
Returns the name of the spatial database. The 'test' keyword may be set
to return the test spatial database name.
"""
if test:
if settings.TEST_DATABASE_NAME:
test_db_name = settings.TEST_DATABASE_NAME
else:
test_db_name = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
return test_db_name
else:
if not settings.DATABASE_NAME:
raise Exception('must configure DATABASE_NAME in settings.py')
return settings.DATABASE_NAME
def load_postgis_sql(db_name, verbosity=1):
"""
This routine loads up the PostGIS SQL files lwpostgis.sql and
spatial_ref_sys.sql.
"""
# Getting the path to the PostGIS SQL
try:
# POSTGIS_SQL_PATH may be placed in settings to tell GeoDjango where the
# PostGIS SQL files are located. This is especially useful on Win32
# platforms since the output of pg_config looks like "C:/PROGRA~1/..".
sql_path = settings.POSTGIS_SQL_PATH
except AttributeError:
status, sql_path = getstatusoutput('pg_config --sharedir')
if status:
sql_path = '/usr/local/share'
# The PostGIS SQL post-creation files.
lwpostgis_file = os.path.join(sql_path, 'lwpostgis.sql')
srefsys_file = os.path.join(sql_path, 'spatial_ref_sys.sql')
if not os.path.isfile(lwpostgis_file):
raise Exception('Could not find PostGIS function definitions in %s' % lwpostgis_file)
if not os.path.isfile(srefsys_file):
raise Exception('Could not find PostGIS spatial reference system definitions in %s' % srefsys_file)
# Getting the psql command-line options, and command format.
options = get_cmd_options(db_name)
cmd_fmt = 'psql %s-f "%%s"' % options
# Now trying to load up the PostGIS functions
cmd = cmd_fmt % lwpostgis_file
if verbosity >= 1: print cmd
status, output = getstatusoutput(cmd)
if status:
raise Exception('Error in loading PostGIS lwgeometry routines.')
# Now trying to load up the Spatial Reference System table
cmd = cmd_fmt % srefsys_file
if verbosity >= 1: print cmd
status, output = getstatusoutput(cmd)
if status:
raise Exception('Error in loading PostGIS spatial_ref_sys table.')
# Setting the permissions because on Windows platforms the owner
# of the spatial_ref_sys and geometry_columns tables is always
# the postgres user, regardless of how the db is created.
if os.name == 'nt': set_permissions(db_name)
def set_permissions(db_name):
"""
Sets the permissions on the given database to that of the user specified
in the settings. Needed specifically for PostGIS on Win32 platforms.
"""
cursor = connection.cursor()
user = settings.DATABASE_USER
cursor.execute('ALTER TABLE geometry_columns OWNER TO %s' % user)
cursor.execute('ALTER TABLE spatial_ref_sys OWNER TO %s' % user)
|
|
import os
import cPickle
import numpy as np
import xml.etree.ElementTree as ET
import random
import svgwrite
from IPython.display import SVG, display
from svg.path import Path, Line, Arc, CubicBezier, QuadraticBezier, parse_path
def calculate_start_point(data, factor=1.0, block_size = 200):
# will try to center the sketch to the middle of the block
# determines maxx, minx, maxy, miny
sx = 0
sy = 0
maxx = 0
minx = 0
maxy = 0
miny = 0
for i in xrange(len(data)):
sx += round(float(data[i, 0])*factor, 3)
sy += round(float(data[i, 1])*factor, 3)
maxx = max(maxx, sx)
minx = min(minx, sx)
maxy = max(maxy, sy)
miny = min(miny, sy)
abs_x = block_size/2-(maxx-minx)/2-minx
abs_y = block_size/2-(maxy-miny)/2-miny
return abs_x, abs_y, (maxx-minx), (maxy-miny)
def draw_stroke_color_array(data, factor=1, svg_filename = 'sample.svg', stroke_width = 1, block_size = 200, maxcol = 5, svg_only = False, color_mode = True):
num_char = len(data)
if num_char < 1:
return
max_color_intensity = 225
numrow = np.ceil(float(num_char)/float(maxcol))
dwg = svgwrite.Drawing(svg_filename, size=(block_size*(min(num_char, maxcol)), block_size*numrow))
dwg.add(dwg.rect(insert=(0, 0), size=(block_size*(min(num_char, maxcol)), block_size*numrow),fill='white'))
the_color = "rgb("+str(random.randint(0, max_color_intensity))+","+str(int(random.randint(0, max_color_intensity)))+","+str(int(random.randint(0, max_color_intensity)))+")"
for j in xrange(len(data)):
lift_pen = 0
#end_of_char = 0
cdata = data[j]
abs_x, abs_y, size_x, size_y = calculate_start_point(cdata, factor, block_size)
abs_x += (j % maxcol) * block_size
abs_y += (j / maxcol) * block_size
for i in xrange(len(cdata)):
x = round(float(cdata[i,0])*factor, 3)
y = round(float(cdata[i,1])*factor, 3)
prev_x = round(abs_x, 3)
prev_y = round(abs_y, 3)
abs_x += x
abs_y += y
if (lift_pen == 1):
p = "M "+str(abs_x)+","+str(abs_y)+" "
the_color = "rgb("+str(random.randint(0, max_color_intensity))+","+str(int(random.randint(0, max_color_intensity)))+","+str(int(random.randint(0, max_color_intensity)))+")"
else:
p = "M "+str(prev_x)+","+str(prev_y)+" L "+str(abs_x)+","+str(abs_y)+" "
lift_pen = max(cdata[i, 2], cdata[i, 3]) # lift pen if both eos or eoc
#end_of_char = cdata[i, 3] # not used for now.
if color_mode == False:
the_color = "#000"
dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill(the_color)) #, opacity=round(random.random()*0.5+0.5, 3)
dwg.save()
if svg_only == False:
display(SVG(dwg.tostring()))
def draw_stroke_color(data, factor=1, svg_filename = 'sample.svg', stroke_width = 1, block_size = 200, maxcol = 5, svg_only = False, color_mode = True):
def split_sketch(data):
# split a sketch with many eoc into an array of sketches, each with just one eoc at the end.
# ignores last stub with no eoc.
counter = 0
result = []
for i in xrange(len(data)):
eoc = data[i, 3]
if eoc > 0:
result.append(data[counter:i+1])
counter = i+1
#if (counter < len(data)): # ignore the rest
# result.append(data[counter:])
return result
data = np.array(data, dtype=np.float32)
data = split_sketch(data)
draw_stroke_color_array(data, factor, svg_filename, stroke_width, block_size, maxcol, svg_only, color_mode)
class SketchLoader():
def __init__(self, batch_size=50, seq_length=300, scale_factor = 1.0, data_filename = "kanji"):
self.data_dir = "./data"
self.batch_size = batch_size
self.seq_length = seq_length
self.scale_factor = scale_factor # divide data by this factor
data_file = os.path.join(self.data_dir, data_filename+".cpkl")
raw_data_dir = os.path.join(self.data_dir, data_filename)
if not (os.path.exists(data_file)) :
print "creating training data cpkl file from raw source"
self.length_data = self.preprocess(raw_data_dir, data_file)
self.load_preprocessed(data_file)
self.num_samples = len(self.raw_data)
self.index = range(self.num_samples) # this list will be randomized later.
self.reset_index_pointer()
def preprocess(self, data_dir, data_file):
# create data file from raw xml files from iam handwriting source.
len_data = []
def cubicbezier(x0, y0, x1, y1, x2, y2, x3, y3, n=20):
# from http://rosettacode.org/wiki/Bitmap/B%C3%A9zier_curves/Cubic
pts = []
for i in range(n+1):
t = float(i) / float(n)
a = (1. - t)**3
b = 3. * t * (1. - t)**2
c = 3.0 * t**2 * (1.0 - t)
d = t**3
x = float(a * x0 + b * x1 + c * x2 + d * x3)
y = float(a * y0 + b * y1 + c * y2 + d * y3)
pts.append( (x, y) )
return pts
def get_path_strings(svgfile):
tree = ET.parse(svgfile)
p = []
for elem in tree.iter():
if elem.attrib.has_key('d'):
p.append(elem.attrib['d'])
return p
def build_lines(svgfile, line_length_threshold = 10.0, min_points_per_path = 1, max_points_per_path = 3):
# we don't draw lines less than line_length_threshold
path_strings = get_path_strings(svgfile)
lines = []
for path_string in path_strings:
full_path = parse_path(path_string)
for i in range(len(full_path)):
p = full_path[i]
if type(p) != Line and type(p) != CubicBezier:
print "encountered an element that is not just a line or bezier "
print "type: ",type(p)
print p
else:
x_start = p.start.real
y_start = p.start.imag
x_end = p.end.real
y_end = p.end.imag
line_length = np.sqrt((x_end-x_start)*(x_end-x_start)+(y_end-y_start)*(y_end-y_start))
len_data.append(line_length)
points = []
if type(p) == CubicBezier:
x_con1 = p.control1.real
y_con1 = p.control1.imag
x_con2 = p.control2.real
y_con2 = p.control2.imag
n_points = int(line_length / line_length_threshold)+1
n_points = max(n_points, min_points_per_path)
n_points = min(n_points, max_points_per_path)
points = cubicbezier(x_start, y_start, x_con1, y_con1, x_con2, y_con2, x_end, y_end, n_points)
else:
points = [(x_start, y_start), (x_end, y_end)]
if i == 0: # only append the starting point for svg
lines.append([points[0][0], points[0][1], 0, 0]) # put eoc to be zero
for j in range(1, len(points)):
eos = 0
if j == len(points)-1 and i == len(full_path)-1:
eos = 1
lines.append([points[j][0], points[j][1], eos, 0]) # put eoc to be zero
lines = np.array(lines, dtype=np.float32)
# make it relative moves
lines[1:,0:2] -= lines[0:-1,0:2]
lines[-1,3] = 1 # end of character
lines[0] = [0, 0, 0, 0] # start at origin
return lines[1:]
# build the list of xml files
filelist = []
# Set the directory you want to start from
rootDir = data_dir
for dirName, subdirList, fileList in os.walk(rootDir):
#print('Found directory: %s' % dirName)
for fname in fileList:
#print('\t%s' % fname)
filelist.append(dirName+"/"+fname)
# build stroke database of every xml file inside iam database
sketch = []
for i in range(len(filelist)):
if (filelist[i][-3:] == 'svg'):
print 'processing '+filelist[i]
sketch.append(build_lines(filelist[i]))
f = open(data_file,"wb")
cPickle.dump(sketch, f, protocol=2)
f.close()
return len_data
def load_preprocessed(self, data_file):
f = open(data_file,"rb")
self.raw_data = cPickle.load(f)
# scale the data here, rather than at the data construction (since scaling may change)
for data in self.raw_data:
data[:,0:2] /= self.scale_factor
f.close()
def next_batch(self):
# returns a set of batches, but the constraint is that the start of each input data batch
# is the start of a new character (although the end of a batch doesn't have to be end of a character)
def next_seq(n):
result = np.zeros((n, 5), dtype=np.float32) # x, y, [eos, eoc, cont] tokens
#result[0, 2:4] = 1 # set eos and eoc to true for first point
# experimental line below, put a random factor between 70-130% to generate more examples
rand_scale_factor_x = np.random.rand()*0.6+0.7
rand_scale_factor_y = np.random.rand()*0.6+0.7
idx = 0
data = self.current_data()
for i in xrange(n):
result[i, 0:4] = data[idx] # eoc = 0.0
result[i, 4] = 1 # continue on stroke
if (result[i, 2] > 0 or result[i, 3] > 0):
result[i, 4] = 0
idx += 1
if (idx >= len(data)-1): # skip to next sketch example next time and mark eoc
result[i, 4] = 0
result[i, 3] = 1
result[i, 2] = 0 # overrides end of stroke one-hot
idx = 0
self.tick_index_pointer()
data = self.current_data()
assert(result[i, 2:5].sum() == 1)
self.tick_index_pointer() # needed if seq_length is less than last data.
result[:, 0] *= rand_scale_factor_x
result[:, 1] *= rand_scale_factor_y
return result
skip_length = self.seq_length+1
batch = []
for i in xrange(self.batch_size):
seq = next_seq(skip_length)
batch.append(seq)
batch = np.array(batch, dtype=np.float32)
return batch[:,0:-1], batch[:, 1:]
def current_data(self):
return self.raw_data[self.index[self.pointer]]
def tick_index_pointer(self):
self.pointer += 1
if (self.pointer >= len(self.raw_data)):
self.pointer = 0
self.epoch_finished = True
def reset_index_pointer(self):
# randomize order for the raw list in the next go.
self.pointer = 0
self.epoch_finished = False
self.index = np.random.permutation(self.index)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Perturb a `LinearOperator` with a rank `K` update."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
__all__ = [
"LinearOperatorLowRankUpdate",
]
class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
"""Perturb a `LinearOperator` with a rank `K` update.
This operator acts like a [batch] matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
`LinearOperatorLowRankUpdate` represents `A = L + U D V^H`, where
```
L, is a LinearOperator representing [batch] M x N matrices
U, is a [batch] M x K matrix. Typically K << M.
D, is a [batch] K x K matrix.
V, is a [batch] N x K matrix. Typically K << N.
V^H is the Hermitian transpose (adjoint) of V.
```
If `M = N`, determinants and solves are done using the matrix determinant
lemma and Woodbury identities, and thus require L and D to be non-singular.
Solves and determinants will be attempted unless the "is_non_singular"
property of L and D is False.
In the event that L and D are positive-definite, and U = V, solves and
determinants can be done using a Cholesky factorization.
```python
# Create a 3 x 3 diagonal linear operator.
diag_operator = LinearOperatorDiag(
diag_update=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True,
is_positive_definite=True)
# Perturb with a rank 2 perturbation
operator = LinearOperatorLowRankUpdate(
operator=diag_operator,
u=[[1., 2.], [-1., 3.], [0., 0.]],
diag_update=[11., 12.],
v=[[1., 2.], [-1., 3.], [10., 10.]])
operator.shape
==> [3, 3]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.matmul(x)
==> Shape [3, 4] Tensor
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
### Performance
Suppose `operator` is a `LinearOperatorLowRankUpdate` of shape `[M, N]`,
made from a rank `K` update of `base_operator` which performs `.matmul(x)` on
`x` having `x.shape = [N, R]` with `O(L_matmul*N*R)` complexity (and similarly
for `solve`, `determinant`. Then, if `x.shape = [N, R]`,
* `operator.matmul(x)` is `O(L_matmul*N*R + K*N*R)`
and if `M = N`,
* `operator.solve(x)` is `O(L_matmul*N*R + N*K*R + K^2*R + K^3)`
* `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)`
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular`, `self_adjoint`, `positive_definite`,
`diag_update_positive` and `square`. These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
base_operator,
u,
diag_update=None,
v=None,
is_diag_update_positive=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorLowRankUpdate"):
"""Initialize a `LinearOperatorLowRankUpdate`.
This creates a `LinearOperator` of the form `A = L + U D V^H`, with
`L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch]
diagonal matrix.
If `L` is non-singular, solves and determinants are available.
Solves/determinants both involve a solve/determinant of a `K x K` system.
In the event that L and D are self-adjoint positive-definite, and U = V,
this can be done using a Cholesky factorization. The user should set the
`is_X` matrix property hints, which will trigger the appropriate code path.
Args:
base_operator: Shape `[B1,...,Bb, M, N]` real `float16`, `float32` or
`float64` `LinearOperator`. This is `L` above.
u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`.
This is `U` above.
diag_update: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype`
as `base_operator`. This is the diagonal of `D` above.
Defaults to `D` being the identity operator.
v: Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]`
Defaults to `v = u`, in which case the perturbation is symmetric.
If `M != N`, then `v` must be set since the perturbation is not square.
is_diag_update_positive: Python `bool`.
If `True`, expect `diag_update > 0`.
is_non_singular: Expect that this operator is non-singular.
Default is `None`, unless `is_positive_definite` is auto-set to be
`True` (see below).
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. Default is `None`, unless `base_operator` is self-adjoint
and `v = None` (meaning `u=v`), in which case this defaults to `True`.
is_positive_definite: Expect that this operator is positive definite.
Default is `None`, unless `base_operator` is positive-definite
`v = None` (meaning `u=v`), and `is_diag_update_positive`, in which case
this defaults to `True`.
Note that we say an operator is positive definite when the quadratic
form `x^H A x` has positive real part for all nonzero `x`.
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_X` flags are set in an inconsistent way.
"""
# TODO(langmore) support complex types.
# Complex types are not allowed due to tf.cholesky() requiring float.
# If complex dtypes are allowed, we update the following
# 1. is_diag_update_positive should still imply that `diag > 0`, but we need
# to remind the user that this implies diag is real. This is needed
# because if diag has non-zero imaginary part, it will not be
# self-adjoint positive definite.
dtype = base_operator.dtype
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
]
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if diag_update is None:
if is_diag_update_positive is False:
raise ValueError(
"Default diagonal is the identity, which is positive. However, "
"user set 'is_diag_update_positive' to False.")
is_diag_update_positive = True
# In this case, we can use a Cholesky decomposition to help us solve/det.
self._use_cholesky = (
base_operator.is_positive_definite and base_operator.is_self_adjoint
and is_diag_update_positive
and v is None)
# Possibly auto-set some characteristic flags from None to True.
# If the Flags were set (by the user) incorrectly to False, then raise.
if base_operator.is_self_adjoint and v is None and not dtype.is_complex:
if is_self_adjoint is False:
raise ValueError(
"A = L + UDU^H, with L self-adjoint and D real diagonal. Since"
" UDU^H is self-adjoint, this must be a self-adjoint operator.")
is_self_adjoint = True
# The condition for using a cholesky is sufficient for SPD, and
# we no weaker choice of these hints leads to SPD. Therefore,
# the following line reads "if hints indicate SPD..."
if self._use_cholesky:
if (
is_positive_definite is False
or is_self_adjoint is False
or is_non_singular is False):
raise ValueError(
"Arguments imply this is self-adjoint positive-definite operator.")
is_positive_definite = True
is_self_adjoint = True
values = base_operator.graph_parents + [u, diag_update, v]
with ops.name_scope(name, values=values):
# Create U and V.
self._u = ops.convert_to_tensor(u, name="u")
if v is None:
self._v = self._u
else:
self._v = ops.convert_to_tensor(v, name="v")
if diag_update is None:
self._diag_update = None
else:
self._diag_update = ops.convert_to_tensor(
diag_update, name="diag_update")
# Create base_operator L.
self._base_operator = base_operator
graph_parents = base_operator.graph_parents + [
self.u, self._diag_update, self.v]
graph_parents = [p for p in graph_parents if p is not None]
super(LinearOperatorLowRankUpdate, self).__init__(
dtype=self._base_operator.dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
# Create the diagonal operator D.
self._set_diag_operators(diag_update, is_diag_update_positive)
self._is_diag_update_positive = is_diag_update_positive
check_ops.assert_same_float_dtype((base_operator, self.u, self.v,
self._diag_update))
self._check_shapes()
# Pre-compute the so-called "capacitance" matrix
# C := D^{-1} + V^H L^{-1} U
self._capacitance = self._make_capacitance()
if self._use_cholesky:
self._chol_capacitance = linalg_ops.cholesky(self._capacitance)
def _check_shapes(self):
"""Static check that shapes are compatible."""
# Broadcast shape also checks that u and v are compatible.
uv_shape = array_ops.broadcast_static_shape(
self.u.get_shape(), self.v.get_shape())
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape, uv_shape[:-2])
self.base_operator.domain_dimension.assert_is_compatible_with(
uv_shape[-2])
if self._diag_update is not None:
uv_shape[-1].assert_is_compatible_with(self._diag_update.get_shape()[-1])
array_ops.broadcast_static_shape(
batch_shape, self._diag_update.get_shape()[:-1])
def _set_diag_operators(self, diag_update, is_diag_update_positive):
"""Set attributes self._diag_update and self._diag_operator."""
if diag_update is not None:
self._diag_operator = linear_operator_diag.LinearOperatorDiag(
self._diag_update, is_positive_definite=is_diag_update_positive)
self._diag_inv_operator = linear_operator_diag.LinearOperatorDiag(
1. / self._diag_update, is_positive_definite=is_diag_update_positive)
else:
if self.u.get_shape()[-1].value is not None:
r = self.u.get_shape()[-1].value
else:
r = array_ops.shape(self.u)[-1]
self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
num_rows=r, dtype=self.dtype)
self._diag_inv_operator = self._diag_operator
@property
def u(self):
"""If this operator is `A = L + U D V^H`, this is the `U`."""
return self._u
@property
def v(self):
"""If this operator is `A = L + U D V^H`, this is the `V`."""
return self._v
@property
def is_diag_update_positive(self):
"""If this operator is `A = L + U D V^H`, this hints `D > 0` elementwise."""
return self._is_diag_update_positive
@property
def diag_update(self):
"""If this operator is `A = L + U D V^H`, this is the diagonal of `D`."""
return self._diag_update
@property
def diag_operator(self):
"""If this operator is `A = L + U D V^H`, this is `D`."""
return self._diag_operator
@property
def base_operator(self):
"""If this operator is `A = L + U D V^H`, this is the `L`."""
return self._base_operator
def _shape(self):
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape,
self.u.get_shape()[:-2])
return batch_shape.concatenate(self.base_operator.shape[-2:])
def _shape_tensor(self):
batch_shape = array_ops.broadcast_dynamic_shape(
self.base_operator.batch_shape_tensor(),
array_ops.shape(self.u)[:-2])
return array_ops.concat(
[batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
u = self.u
v = self.v
l = self.base_operator
d = self.diag_operator
leading_term = l.matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
if adjoint:
uh_x = math_ops.matmul(u, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_uh_x = d.matmul(uh_x, adjoint=adjoint)
v_d_uh_x = math_ops.matmul(v, d_uh_x)
return leading_term + v_d_uh_x
else:
vh_x = math_ops.matmul(v, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_vh_x = d.matmul(vh_x, adjoint=adjoint)
u_d_vh_x = math_ops.matmul(u, d_vh_x)
return leading_term + u_d_vh_x
def _determinant(self):
if self.is_positive_definite:
return math_ops.exp(self.log_abs_determinant())
# The matrix determinant lemma gives
# https://en.wikipedia.org/wiki/Matrix_determinant_lemma
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
# where C is sometimes known as the capacitance matrix,
# C := D^{-1} + V^H L^{-1} U
det_c = linalg_ops.matrix_determinant(self._capacitance)
det_d = self.diag_operator.determinant()
det_l = self.base_operator.determinant()
return det_c * det_d * det_l
def _log_abs_determinant(self):
# Recall
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
log_abs_det_d = self.diag_operator.log_abs_determinant()
log_abs_det_l = self.base_operator.log_abs_determinant()
if self._use_cholesky:
chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance)
log_abs_det_c = 2 * math_ops.reduce_sum(
math_ops.log(chol_cap_diag), reduction_indices=[-1])
else:
det_c = linalg_ops.matrix_determinant(self._capacitance)
log_abs_det_c = math_ops.log(math_ops.abs(det_c))
return log_abs_det_c + log_abs_det_d + log_abs_det_l
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
if self.base_operator.is_non_singular is False:
raise ValueError(
"Solve not implemented unless this is a perturbation of a "
"non-singular LinearOperator.")
# The Woodbury formula gives:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
# (L + UDV^H)^{-1}
# = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1}
# = L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
# where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U
# Note also that, with ^{-H} being the inverse of the adjoint,
# (L + UDV^H)^{-H}
# = L^{-H} - L^{-H} V C^{-H} U^H L^{-H}
l = self.base_operator
if adjoint:
v = self.u
u = self.v
else:
v = self.v
u = self.u
# L^{-1} rhs
linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
# V^H L^{-1} rhs
vh_linv_rhs = math_ops.matmul(v, linv_rhs, adjoint_a=True)
# C^{-1} V^H L^{-1} rhs
if self._use_cholesky:
capinv_vh_linv_rhs = linalg_ops.cholesky_solve(
self._chol_capacitance, vh_linv_rhs)
else:
capinv_vh_linv_rhs = linalg_ops.matrix_solve(
self._capacitance, vh_linv_rhs, adjoint=adjoint)
# U C^{-1} V^H M^{-1} rhs
u_capinv_vh_linv_rhs = math_ops.matmul(u, capinv_vh_linv_rhs)
# L^{-1} U C^{-1} V^H L^{-1} rhs
linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint)
# L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
return linv_rhs - linv_u_capinv_vh_linv_rhs
def _make_capacitance(self):
# C := D^{-1} + V^H L^{-1} U
# which is sometimes known as the "capacitance" matrix.
# L^{-1} U
linv_u = self.base_operator.solve(self.u)
# V^H L^{-1} U
vh_linv_u = math_ops.matmul(self.v, linv_u, adjoint_a=True)
# D^{-1} + V^H L^{-1} V
capacitance = self._diag_inv_operator.add_to_tensor(vh_linv_u)
return capacitance
|
|
import npc
import pytest
import subprocess
def run_create(tmp_path, *extra_args):
if not extra_args:
extra_args = []
npc.cli.start(['--campaign', str(tmp_path), 'werewolf'] + list(extra_args))
def test_calls_correct_function(tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name')
npc.commands.create_character.werewolf.assert_called_once()
class TestTemplateOption:
def test_template_always_werewolf(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'character_name', 'auspice_name')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert args == ('character_name','auspice_name')
class TestNameOption:
def test_must_be_filled(self, tmp_path, mocker, capsys):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
with pytest.raises(BaseException):
run_create(tmp_path)
_, err = capsys.readouterr()
assert 'name' in err
def test_accepts_all_strings(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'character_name', 'auspice_name')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert args == ('character_name','auspice_name')
class TestAuspiceOption:
def test_must_be_filled(self, tmp_path, mocker, capsys):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
with pytest.raises(BaseException):
run_create(tmp_path, 'name')
_, err = capsys.readouterr()
assert 'auspice' in err
def test_accepts_all_strings(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'character_name', 'auspice_name')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert args == ('character_name','auspice_name')
class TestTribeOption:
def test_defaults_to_none(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'name', 'auspice')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['tribe'] is None
@pytest.mark.parametrize('argname', ['-t', '--tribe'])
def test_accepts_short_and_long_arg(self, argname, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'name', 'auspice', argname, 'test_tribe')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['tribe'] == 'test_tribe'
class TestPackOption:
def test_defaults_to_none(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'name', 'auspice')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['pack'] is None
@pytest.mark.parametrize('argname', ['-p', '--pack'])
def test_accepts_short_and_long_arg(self, argname, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'name', 'auspice', argname, 'test_pack')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['pack'] == 'test_pack'
class TestGroupsOption:
def test_defaults_to_none(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['groups'] is None
@pytest.mark.parametrize('argname', ['-g', '--groups'])
def test_accepts_short_and_long_arg(self, argname, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name', argname, 'g1')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert 'g1' in kwargs['groups']
def test_accepts_many_names(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name', '-g', 'g1', 'g2', 'g3')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['groups'] == ['g1', 'g2', 'g3']
class TestDeadOption:
def test_defaults_to_false(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['dead'] == False
def test_accepts_long_opt(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name', '--dead')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['dead'] != False
def test_allows_no_text(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name', '--dead')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['dead'] == ''
def test_allows_single_arg(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name', '--dead', 'fifteen ways')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['dead'] == 'fifteen ways'
class TestForeignOption:
def test_defaults_to_false(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['foreign'] == False
def test_accepts_long_opt(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name', '--foreign')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['foreign'] != False
def test_allows_no_text(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name', '--foreign')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['foreign'] == ''
def test_allows_single_arg(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name', '--foreign', 'over there')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['foreign'] == 'over there'
class TestLocationOption:
def test_defaults_to_false(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['location'] == False
def test_accepts_long_opt(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
run_create(tmp_path, 'hi', 'auspice_name', '--location', 'here')
args, kwargs = npc.commands.create_character.werewolf.call_args
assert kwargs['location'] == 'here'
def test_requires_text(self, tmp_path, mocker):
mocker.patch('npc.commands.create_character.werewolf', autospec=True)
with pytest.raises(BaseException):
run_create(tmp_path, 'hi', 'auspice_name', '--location')
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class tmtrafficpolicy(base_resource) :
""" Configuration for TM traffic policy resource. """
def __init__(self) :
self._name = ""
self._rule = ""
self._action = ""
self._hits = 0
self.___count = 0
@property
def name(self) :
"""Name for the traffic policy. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my policy" or 'my policy').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the traffic policy. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my policy" or 'my policy').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
"""Expression, against which traffic is evaluated. Written in the classic syntax.
Maximum length of a string literal in the expression is 255 characters. A longer string can be split into smaller strings of up to 255 characters each, and the smaller strings concatenated with the + operator. For example, you can create a 500-character string as follows: '"<string of 255 characters>" + "<string of 245 characters>"'
The following requirements apply only to the NetScaler CLI:
* If the expression includes one or more spaces, enclose the entire expression in double quotation marks.
* If the expression itself includes double quotation marks, escape the quotations by using the \ character.
* Alternatively, you can use single quotation marks to enclose the rule, in which case you do not have to escape the double quotation marks.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
"""Expression, against which traffic is evaluated. Written in the classic syntax.
Maximum length of a string literal in the expression is 255 characters. A longer string can be split into smaller strings of up to 255 characters each, and the smaller strings concatenated with the + operator. For example, you can create a 500-character string as follows: '"<string of 255 characters>" + "<string of 245 characters>"'
The following requirements apply only to the NetScaler CLI:
* If the expression includes one or more spaces, enclose the entire expression in double quotation marks.
* If the expression itself includes double quotation marks, escape the quotations by using the \ character.
* Alternatively, you can use single quotation marks to enclose the rule, in which case you do not have to escape the double quotation marks.
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def action(self) :
"""Name of the action to apply to requests or connections that match this policy.<br/>Minimum length = 1.
"""
try :
return self._action
except Exception as e:
raise e
@action.setter
def action(self, action) :
"""Name of the action to apply to requests or connections that match this policy.<br/>Minimum length = 1
"""
try :
self._action = action
except Exception as e:
raise e
@property
def hits(self) :
"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(tmtrafficpolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.tmtrafficpolicy
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add tmtrafficpolicy.
"""
try :
if type(resource) is not list :
addresource = tmtrafficpolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.action = resource.action
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].action = resource[i].action
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete tmtrafficpolicy.
"""
try :
if type(resource) is not list :
deleteresource = tmtrafficpolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update tmtrafficpolicy.
"""
try :
if type(resource) is not list :
updateresource = tmtrafficpolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.action = resource.action
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].action = resource[i].action
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of tmtrafficpolicy resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = tmtrafficpolicy()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the tmtrafficpolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = tmtrafficpolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = tmtrafficpolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [tmtrafficpolicy() for _ in range(len(name))]
obj = [tmtrafficpolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = tmtrafficpolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of tmtrafficpolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = tmtrafficpolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the tmtrafficpolicy resources configured on NetScaler.
"""
try :
obj = tmtrafficpolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of tmtrafficpolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = tmtrafficpolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class tmtrafficpolicy_response(base_response) :
def __init__(self, length=1) :
self.tmtrafficpolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.tmtrafficpolicy = [tmtrafficpolicy() for _ in range(length)]
|
|
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for progress callbacks."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import time
from gslib.thread_message import ProgressMessage
from gslib.utils import parallelism_framework_util
# Default upper and lower bounds for progress callback frequency.
_START_BYTES_PER_CALLBACK = 1024 * 256
_MAX_BYTES_PER_CALLBACK = 1024 * 1024 * 100
_TIMEOUT_SECONDS = 1
# Max width of URL to display in progress indicator. Wide enough to allow
# 15 chars for x/y display on an 80 char wide terminal.
MAX_PROGRESS_INDICATOR_COLUMNS = 65
class ProgressCallbackWithTimeout(object):
"""Makes progress callbacks at least once every _TIMEOUT_SECONDS.
This prevents wrong throughput calculation while not being excessively
overwhelming.
"""
def __init__(self,
total_size,
callback_func,
start_bytes_per_callback=_START_BYTES_PER_CALLBACK,
timeout=_TIMEOUT_SECONDS):
"""Initializes the callback with timeout.
Args:
total_size: Total bytes to process. If this is None, size is not known
at the outset.
callback_func: Func of (int: processed_so_far, int: total_bytes)
used to make callbacks.
start_bytes_per_callback: Lower bound of bytes per callback.
timeout: Number maximum of seconds without a callback.
"""
self._bytes_per_callback = start_bytes_per_callback
self._callback_func = callback_func
self._total_size = total_size
self._last_time = time.time()
self._timeout = timeout
self._bytes_processed_since_callback = 0
self._callbacks_made = 0
self._total_bytes_processed = 0
def Progress(self, bytes_processed):
"""Tracks byte processing progress, making a callback if necessary."""
self._bytes_processed_since_callback += bytes_processed
cur_time = time.time()
if (self._bytes_processed_since_callback > self._bytes_per_callback or
(self._total_size is not None and self._total_bytes_processed +
self._bytes_processed_since_callback >= self._total_size) or
(self._last_time - cur_time) > self._timeout):
self._total_bytes_processed += self._bytes_processed_since_callback
# TODO: We check if >= total_size and truncate because JSON uploads count
# multipart metadata during their send progress. If the size is unknown,
# we can't do this and the progress message will make it appear that we
# send more than the original stream.
if self._total_size is not None:
bytes_sent = min(self._total_bytes_processed, self._total_size)
else:
bytes_sent = self._total_bytes_processed
self._callback_func(bytes_sent, self._total_size)
self._bytes_processed_since_callback = 0
self._callbacks_made += 1
self._last_time = cur_time
class ProgressCallbackWithBackoff(object):
"""Makes progress callbacks with exponential backoff to a maximum value.
This prevents excessive log message output.
"""
def __init__(self,
total_size,
callback_func,
start_bytes_per_callback=_START_BYTES_PER_CALLBACK,
max_bytes_per_callback=_MAX_BYTES_PER_CALLBACK,
calls_per_exponent=10):
"""Initializes the callback with backoff.
Args:
total_size: Total bytes to process. If this is None, size is not known
at the outset.
callback_func: Func of (int: processed_so_far, int: total_bytes)
used to make callbacks.
start_bytes_per_callback: Lower bound of bytes per callback.
max_bytes_per_callback: Upper bound of bytes per callback.
calls_per_exponent: Number of calls to make before reducing rate.
"""
self._bytes_per_callback = start_bytes_per_callback
self._callback_func = callback_func
self._calls_per_exponent = calls_per_exponent
self._max_bytes_per_callback = max_bytes_per_callback
self._total_size = total_size
self._bytes_processed_since_callback = 0
self._callbacks_made = 0
self._total_bytes_processed = 0
def Progress(self, bytes_processed):
"""Tracks byte processing progress, making a callback if necessary."""
self._bytes_processed_since_callback += bytes_processed
if (self._bytes_processed_since_callback > self._bytes_per_callback or
(self._total_bytes_processed + self._bytes_processed_since_callback >=
self._total_size and self._total_size is not None)):
self._total_bytes_processed += self._bytes_processed_since_callback
# TODO: We check if >= total_size and truncate because JSON uploads count
# multipart metadata during their send progress. If the size is unknown,
# we can't do this and the progress message will make it appear that we
# send more than the original stream.
if self._total_size is not None:
bytes_sent = min(self._total_bytes_processed, self._total_size)
else:
bytes_sent = self._total_bytes_processed
self._callback_func(bytes_sent, self._total_size)
self._bytes_processed_since_callback = 0
self._callbacks_made += 1
if self._callbacks_made > self._calls_per_exponent:
self._bytes_per_callback = min(self._bytes_per_callback * 2,
self._max_bytes_per_callback)
self._callbacks_made = 0
class FileProgressCallbackHandler(object):
"""Tracks progress info for large operations like file copy or hash.
Information is sent to the status_queue, which will print it in the
UI Thread.
"""
def __init__(self,
status_queue,
start_byte=0,
override_total_size=None,
src_url=None,
component_num=None,
dst_url=None,
operation_name=None):
"""Initializes the callback handler.
Args:
status_queue: Queue for posting status messages for UI display.
start_byte: The beginning of the file component, if one is being used.
override_total_size: The size of the file component, if one is being used.
src_url: FileUrl/CloudUrl representing the source file.
component_num: Indicates the component number, if any.
dst_url: FileUrl/CloudUrl representing the destination file, or None
for unary operations like hashing.
operation_name: String representing the operation name
"""
self._status_queue = status_queue
self._start_byte = start_byte
self._override_total_size = override_total_size
self._component_num = component_num
self._src_url = src_url
self._dst_url = dst_url
self._operation_name = operation_name
# Ensures final newline is written once even if we get multiple callbacks.
self._last_byte_written = False
# Function signature is in boto callback format, which cannot be changed.
def call(
self, # pylint: disable=invalid-name
last_byte_processed,
total_size):
"""Gathers information describing the operation progress.
Actual message is printed to stderr by UIThread.
Args:
last_byte_processed: The last byte processed in the file. For file
components, this number should be in the range
[start_byte:start_byte + override_total_size].
total_size: Total size of the ongoing operation.
"""
if self._last_byte_written:
return
if self._override_total_size:
total_size = self._override_total_size
parallelism_framework_util.PutToQueueWithTimeout(
self._status_queue,
ProgressMessage(total_size,
last_byte_processed - self._start_byte,
self._src_url,
time.time(),
component_num=self._component_num,
operation_name=self._operation_name,
dst_url=self._dst_url))
if total_size and last_byte_processed - self._start_byte == total_size:
self._last_byte_written = True
|
|
import os
import time
import random
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
import zstackwoodpecker.zstack_test.zstack_test_snapshot as zstack_sp_header
import zstackwoodpecker.zstack_test.zstack_test_volume as zstack_volume_header
import zstackwoodpecker.operations.scenario_operations as sce_ops
import zstackwoodpecker.header.host as host_header
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
Path = [[]]
index = 0
tag = "VM_TEST_REBOOT"
backup = None
def record(fun):
def recorder(vm, op):
global index
if op != tag:
Path[index].append(op)
elif op == tag:
Path.append([op])
Path[index].append(op)
index += 1
return fun(vm, op)
return recorder
VM_RUNGGING_OPS = [
"VM_TEST_SNAPSHOT",
"VM_TEST_CREATE_IMG",
"VM_TEST_RESIZE_RVOL",
"VM_TEST_NONE"
]
VM_STOPPED_OPS = [
"VM_TEST_SNAPSHOT",
"VM_TEST_CREATE_IMG",
"VM_TEST_RESIZE_RVOL",
"VM_TEST_CHANGE_OS",
"VM_TEST_RESET",
"VM_TEST_NONE"
]
VM_STATE_OPS = [
"VM_TEST_STOP",
"VM_TEST_REBOOT",
"VM_TEST_NONE"
]
@record
def vm_op_test(vm, op):
test_util.test_logger(vm.vm.name + "-------" + op)
ops = {
"VM_TEST_STOP": stop,
"VM_TEST_REBOOT": reboot,
"VM_TEST_NONE": do_nothing,
"VM_TEST_MIGRATE": migrate,
"VM_TEST_SNAPSHOT": create_snapshot,
"VM_TEST_CREATE_IMG": create_image,
"VM_TEST_RESIZE_RVOL": resize_rvol,
"VM_TEST_CHANGE_OS": change_os,
"VM_TEST_RESET": reset,
"VM_TEST_BACKUP": back_up
}
ops[op](vm)
def stop(vm):
vm.stop()
def reboot(vm):
vm.reboot()
def do_nothing(vm):
pass
def reset(vm):
vm.reinit()
def migrate(vm_obj):
ps = test_lib.lib_get_primary_storage_by_vm(vm_obj.get_vm())
if ps.type in [inventory.CEPH_PRIMARY_STORAGE_TYPE, 'SharedMountPoint', inventory.NFS_PRIMARY_STORAGE_TYPE,
'SharedBlock']:
target_host = test_lib.lib_find_random_host(vm_obj.vm)
vm_obj.migrate(target_host.uuid)
elif ps.type in [inventory.LOCAL_STORAGE_TYPE]:
vm_obj.check()
target_host = test_lib.lib_find_random_host(vm_obj.vm)
vol_ops.migrate_volume(vm_obj.get_vm().allVolumes[0].uuid, target_host.uuid)
vm_obj.start()
test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
else:
test_util.test_fail("FOUND NEW STORAGTE TYPE. FAILED")
def create_snapshot(vm_obj):
vol_obj = zstack_volume_header.ZstackTestVolume()
vol_obj.set_volume(test_lib.lib_get_root_volume(vm_obj.get_vm()))
snapshots_root = zstack_sp_header.ZstackVolumeSnapshot()
snapshots_root.set_utility_vm(vm_obj)
snapshots_root.set_target_volume(vol_obj)
snapshots_root.create_snapshot('create_data_snapshot1')
snapshots_root.check()
sp1 = snapshots_root.get_current_snapshot()
#vm_obj.stop()
#vm_obj.check()
#snapshots_root.use_snapshot(sp1)
#vm_obj.start()
#test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
def create_image(vm_obj):
volume_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid
bs_list = test_lib.lib_get_backup_storage_list_by_vm(vm_obj.vm)
image_option = test_util.ImageOption()
image_option.set_root_volume_uuid(volume_uuid)
image_option.set_name('image_resize_template')
image_option.set_backup_storage_uuid_list([bs_list[0].uuid])
image = img_ops.create_root_volume_template(image_option)
new_image = zstack_image_header.ZstackTestImage()
new_image.set_creation_option(image_option)
new_image.set_image(image)
new_image.check()
new_image.clean()
def resize_rvol(vm_obj):
vol_size = test_lib.lib_get_root_volume(vm_obj.get_vm()).size
volume_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid
set_size = 1024 * 1024 * 1024 + int(vol_size)
vol_ops.resize_volume(volume_uuid, set_size)
vm_obj.update()
vol_size_after = test_lib.lib_get_root_volume(vm_obj.get_vm()).size
# if set_size != vol_size_after:
# test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after)
# vm_obj.check()
test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
def change_os(vm_obj):
vm_uuid = vm_obj.get_vm().uuid
last_l3network_uuid = test_lib.lib_get_l3s_uuid_by_vm(vm_obj.get_vm())
last_ps_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).primaryStorageUuid
cond = res_ops.gen_query_conditions("system", '=', "false")
cond = res_ops.gen_query_conditions("mediaType", '=', "RootVolumeTemplate", cond)
cond = res_ops.gen_query_conditions("platform", '=', "Linux", cond)
image_uuid = random.choice(res_ops.query_resource(res_ops.IMAGE, cond)).uuid
vm_ops.change_vm_image(vm_uuid, image_uuid)
vm_obj.start()
vm_obj.update()
# check whether the vm is running successfully
test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
# check whether the network config has changed
l3network_uuid_after = test_lib.lib_get_l3s_uuid_by_vm(vm_obj.get_vm())
if l3network_uuid_after != last_l3network_uuid:
test_util.test_fail('Change VM Image Failed.The Network config has changed.')
# check whether primarystorage has changed
ps_uuid_after = test_lib.lib_get_root_volume(vm_obj.get_vm()).primaryStorageUuid
if ps_uuid_after != last_ps_uuid:
test_util.test_fail('Change VM Image Failed.Primarystorage has changed.')
def back_up(vm_obj):
global backup
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
backup_option = test_util.BackupOption()
backup_option.set_name("test_compare")
backup_option.set_volume_uuid(test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid)
backup_option.set_backupStorage_uuid(bs.uuid)
backup = vol_ops.create_backup(backup_option)
def print_path(Path):
print("=" * 43 + "PATH" + "=" * 43)
for i in range(len(Path)):
path = ''
for j in range(len(Path[i])):
if j == len(Path[i]) - 1:
path += Path[i][j]
else:
path += (Path[i][j] + " --> ")
print(path)
print("=" * 90)
def test():
global test_obj_dict, VM_RUNGGING_OPS, VM_STOPPED_OPS, VM_STATE_OPS, backup
ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
vm_name = "test_vm"
cond = res_ops.gen_query_conditions("system", '=', "false")
cond = res_ops.gen_query_conditions("mediaType", '=', "RootVolumeTemplate", cond)
cond = res_ops.gen_query_conditions("platform", '=', "Linux", cond)
img_name = res_ops.query_resource(res_ops.IMAGE, cond)[0].name
cond = res_ops.gen_query_conditions("category", '=', "Private")
l3_name = res_ops.query_resource(res_ops.L3_NETWORK,cond)[0].name
vm = test_stub.create_vm(vm_name, img_name, l3_name)
path = "VM_TEST_REBOOT --> VM_TEST_MIGRATE --> VM_TEST_BACKUP --> VM_TEST_NONE --> VM_TEST_CREATE_IMG --> VM_TEST_BACKUP"
path_array = path.split(" --> ")
for i in path_array:
if i == "VM_TEST_MIGRATE" and ps.type == inventory.LOCAL_STORAGE_TYPE:
vm.stop()
vm_op_test(vm, i)
continue
if vm.state == "Stopped":
vm.start()
if i == "VM_TEST_BACKUP":
if test_lib.lib_is_vm_l3_has_vr(vm.vm):
test_lib.TestHarness = test_lib.TestHarnessVR
time.sleep(60)
cmd = "echo 111 > /root/" + str(int(time.time()))
test_lib.lib_execute_command_in_vm(vm.vm,cmd)
vm.suspend()
# create_snapshot/backup
vm_op_test(vm, "VM_TEST_BACKUP")
# compare vm & image created by backup
compare(ps, vm, backup)
vm.resume()
else:
vm_op_test(vm, i)
test_util.test_pass("path: " + path + " test pass")
def error_cleanup():
global test_obj_dict
print_path(Path)
def compare(ps, vm, backup):
test_util.test_logger("-----------------compare----------------")
# find vm_host
host = test_lib.lib_find_host_by_vm(vm.vm)
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
root_volume = test_lib.lib_get_root_volume(vm.get_vm())
vm_path = root_volume.installPath
if ps.type == "SharedBlock":
vm_path = "/dev/" + root_volume.installPath.split("/")[2] + "/" + root_volume.installPath.split("/")[3]
test_util.test_logger(vm_path)
name = backup.backupStorageRefs[0].installPath.split("/")[2]
id = backup.backupStorageRefs[0].installPath.split("/")[3]
# compare vm_root_volume & image
cmd = "mkdir /root/%s;" \
"/usr/local/zstack/imagestore/bin/zstcli " \
"-rootca=/var/lib/zstack/imagestorebackupstorage/package/certs/ca.pem " \
"-url=%s:8000 " \
"pull -installpath /root/%s/old.qcow2 %s:%s;" \
"qemu-img compare %s /root/%s/old.qcow2;" % (id, bs.hostname, id, name, id, vm_path, id)
# clean image
result = test_lib.lib_execute_ssh_cmd(host.managementIp, "root", "password", cmd, timeout=300)
if result != "Images are identical.\n":
test_util.test_fail("compare vm_root_volume & image created by backup")
|
|
import random
from ..config import query
from ..domains import ZZ
from .polyerrors import CoercionFailed, DomainError
from .rings import PolyElement, PolynomialRing
from .rootisolation import _FindRoot
class UnivarPolynomialRing(PolynomialRing, _FindRoot):
"""A class for representing univariate polynomial rings."""
def __call__(self, element):
if isinstance(element, list):
try:
return self.from_terms(element)
except (TypeError, ValueError):
return self.from_list(element)
return super().__call__(element)
def from_list(self, element):
return self.from_dict({(i,): c for i, c in enumerate(element)})
def _random(self, n, a, b, percent=None):
domain = self.domain
if percent is None:
percent = 100//(b - a)
percent = min(max(0, percent), 100)
nz = ((n + 1)*percent)//100
f = []
while len(f) < n + 1:
v = domain.convert(random.randint(a, b))
if v:
f.append(v)
if nz:
f[-nz:] = [domain.zero]*nz
lt = f.pop(0)
random.shuffle(f)
f.insert(0, lt)
return self.from_list(list(reversed(f)))
def _gf_random(self, n, irreducible=False):
domain = self.domain
assert domain.is_FiniteField
while True:
f = [domain(random.randint(0, domain.order - 1))
for i in range(n)] + [domain.one]
f = self.from_list(f)
if not irreducible or f.is_irreducible:
return f
def dispersionset(self, p, q=None):
r"""Compute the *dispersion set* of two polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as:
.. math::
\operatorname{J}(f, g)
& := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\
& = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\}
For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`.
Examples
========
Note that the definition of the dispersion is not symmetric:
>>> R, x = ring('x', QQ)
>>> fp = x**4 - 3*x**2 + 1
>>> gp = fp.shift(-3)
>>> R.dispersionset(fp, gp)
{2, 3, 4}
>>> R.dispersionset(gp, fp)
set()
Computing the dispersion also works over field extensions:
>>> R, x = ring('x', QQ.algebraic_field(sqrt(5)))
>>> fp = x**2 + sqrt(5)*x - 1
>>> gp = x**2 + (2 + sqrt(5))*x + sqrt(5)
>>> R.dispersionset(fp, gp)
{2}
>>> R.dispersionset(gp, fp)
{1, 4}
We can even perform the computations for polynomials
having symbolic coefficients:
>>> D, a = ring('a', QQ)
>>> R, x = ring('x', D)
>>> fp = 4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x
>>> R.dispersionset(fp)
{0, 1}
References
==========
* :cite:`Man1994disp`
* :cite:`Koepf98`
* :cite:`Abramov71rat`
* :cite:`Man1993indefsum`
"""
# Check for valid input
same = q is None
if same:
q = p
if p.ring is not q.ring:
raise ValueError('Polynomials must have the same ring')
fdomain = self.domain.field
# We define the dispersion of constant polynomials to be zero
if p.degree() < 1 or q.degree() < 1:
return {0}
# Factor p and q over the rationals
fp = p.factor_list()
fq = q.factor_list() if not same else fp
# Iterate over all pairs of factors
J = set()
for s, _ in fp[1]:
for t, _ in fq[1]:
m = s.degree()
n = t.degree()
if n != m:
continue
an = s.LC
bn = t.LC
if an - bn:
continue
# Note that the roles of `s` and `t` below are switched
# w.r.t. the original paper. This is for consistency
# with the description in the book of W. Koepf.
anm1 = s[(m - 1,)]
bnm1 = t[(n - 1,)]
alpha = fdomain(anm1 - bnm1)/fdomain(n*bn)
if alpha not in ZZ:
continue
alpha = ZZ.convert(alpha)
if alpha < 0 or alpha in J:
continue
if n > 1 and s - t.shift(alpha):
continue
J.add(alpha)
return J
class UnivarPolyElement(PolyElement):
"""Element of univariate distributed polynomial ring."""
def all_coeffs(self):
if self:
return [self[(i,)] for i in range(self.degree() + 1)]
else:
return [self.parent.domain.zero]
def shift(self, a):
return self.compose(0, self.ring.gens[0] + a)
def half_gcdex(self, other):
"""
Half extended Euclidean algorithm in `F[x]`.
Returns ``(s, h)`` such that ``h = gcd(self, other)``
and ``s*self = h (mod other)``.
Examples
========
>>> _, x = ring('x', QQ)
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> f.half_gcdex(g)
(-1/5*x + 3/5, x + 1)
"""
ring = self.ring
domain = ring.domain
if not domain.is_Field:
raise DomainError(f"can't compute half extended GCD over {domain}")
a, b = ring.one, ring.zero
f, g = self, other
while g:
q, r = divmod(f, g)
f, g = g, r
a, b = b, a - q*b
a = a.quo_ground(f.LC)
f = f.monic()
return a, f
@property
def is_cyclotomic(self):
return self.ring._cyclotomic_p(self)
def _right_decompose(self, s):
ring = self.ring
domain = ring.domain
x = ring.gens[0]
n = self.degree()
lc = self.LC
f = self.copy()
g = x**s
r = n // s
for i in range(1, s):
coeff = domain.zero
for j in range(i):
if (n + j - i,) not in f:
continue
assert (s - j,) in g
fc, gc = f[(n + j - i,)], g[(s - j,)]
coeff += (i - r*j)*fc*gc
g[(s - i,)] = domain.quo(coeff, i*r*lc)
g._strip_zero()
return g
def _left_decompose(self, h):
ring = self.ring
g, i = ring.zero, 0
f = self.copy()
while f:
q, r = divmod(f, h)
if r.degree() > 0:
return
else:
g[(i,)] = r.LC
f, i = q, i + 1
g._strip_zero()
return g
def _decompose(self):
df = self.degree()
for s in range(2, df):
if df % s != 0:
continue
h = self._right_decompose(s)
g = self._left_decompose(h)
if g is not None:
return g, h
def decompose(self):
"""
Compute functional decomposition of ``f`` in ``K[x]``.
Given a univariate polynomial ``f`` with coefficients in a field of
characteristic zero, returns list ``[f_1, f_2, ..., f_n]``, where::
f = f_1 o f_2 o ... f_n = f_1(f_2(... f_n))
and ``f_2, ..., f_n`` are monic and homogeneous polynomials of at
least second degree.
Unlike factorization, complete functional decompositions of
polynomials are not unique, consider examples:
1. ``f o g = f(x + b) o (g - b)``
2. ``x**n o x**m = x**m o x**n``
3. ``T_n o T_m = T_m o T_n``
where ``T_n`` and ``T_m`` are Chebyshev polynomials.
Examples
========
>>> _, x = ring('x', ZZ)
>>> (x**4 - 2*x**3 + x**2).decompose()
[x**2, x**2 - x]
References
==========
* :cite:`Kozen1989decomposition`
"""
F = []
f = self.copy()
while True:
result = f._decompose()
if result is not None:
f, h = result
F = [h] + F
else:
break
return [f] + F
def sturm(self):
"""
Computes the Sturm sequence of ``f`` in ``F[x]``.
Given a univariate, square-free polynomial ``f(x)`` returns the
associated Sturm sequence (see e.g. :cite:`Davenport1988systems`)
``f_0(x), ..., f_n(x)`` defined by::
f_0(x), f_1(x) = f(x), f'(x)
f_n = -rem(f_{n-2}(x), f_{n-1}(x))
Examples
========
>>> _, x = ring('x', QQ)
>>> (x**3 - 2*x**2 + x - 3).sturm()
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2/9*x + 25/9, -2079/4]
"""
return self.ring._sturm(self)
def __mul__(self, other):
ring = self.ring
try:
other = ring.convert(other)
except CoercionFailed:
return NotImplemented
if max(self.degree(), other.degree()) > query('KARATSUBA_CUTOFF'):
return self._mul_karatsuba(other)
return super().__mul__(other)
def _mul_karatsuba(self, other):
"""
Multiply dense polynomials in ``K[x]`` using Karatsuba's algorithm.
References
==========
* :cite:`Hoeven02`
"""
ring = self.ring
domain = ring.domain
df = self.degree()
dg = other.degree()
n = max(df, dg) + 1
n2 = n//2
fl = self.slice(0, n2)
gl = other.slice(0, n2)
fh = self.slice(n2, n).quo_term(((n2,), domain.one))
gh = other.slice(n2, n).quo_term(((n2,), domain.one))
lo = fl*gl
hi = fh*gh
mid = (fl + fh)*(gl + gh)
mid -= (lo + hi)
return lo + mid.mul_monom((n2,)) + hi.mul_monom((2*n2,))
|
|
#FLM: Mark Feature Generator
###################################################
### THE VALUES BELOW CAN BE EDITED AS NEEDED ######
###################################################
kInstancesDataFileName = "instances"
kPrefsFileName = "MarkFeatureGenerator.prefs"
###################################################
__copyright__ = __license__ = """
Copyright (c) 2010-2013 Adobe Systems Incorporated. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
__doc__ = """
Mark Feature Generator v1.3.2 - Mar 10 2013
This script will generate a set of "features.mark" files from the mark data (anchors
and combining marks class) of a Multiple Master (MM) FontLab font, or one "features.mark"
file if the font is Single Master (SM). The "features.mark" file is a text file containing
the font's mark attachment data in features-file definition syntax.
The script can also generate "feature.mkmk" file(s).
For information about how the "features.mark/mkmk" files are created, please read the
documentation in the WriteFeaturesMarkFDK.py module that can be found
in FontLab/Studio5/Macros/System/Modules/
For information on how to format the "instances" file, please read the documentation in the
InstanceGenerator.py script.
To access the script's options, hold down the CONTROL key while clicking on the play
button to run the script.
VERY IMPORTANT: For this script to work, all the combining mark glyphs
must be added to an OpenType class named 'COMBINING_MARKS'.
If the font has ligatures and the ligatures have anchors, the ligature glyphs have to be put in
OpenType classes named "LIGATURES_WITH_X_COMPONENTS", where 'X' should be replaced by a number
between 2 and 9 (inclusive). Additionally, the names of the anchors used on the ligature glyphs
need to have a tag (e.g. 1ST, 2ND) which is used for corresponding the anchors with the correct
ligature component. Keep in mind that in Right-To-Left scripts, the first component of the ligature
is the one on the right side of the glyph.
The script will first show a file selection dialog for choosing a folder. The "features.mark"
file(s) will be written to that folder, if the font is SM, or to a sub-directory path
<selected_folder>/<face_name>, if the font is MM. In this case, the face name is derived by
taking the part of the font's PostScript name after the hyphen, or "Regular" is there is no
hyphen. (e.g. if the font's PostScript name is MyFont-BoldItalic, the folder will be named
"BoldItalic")
If the font is MM, the script requires a file, named "instances", which contains all the
instance-specific values. The "instances" file must be a simple text file, located in the
same folder as the MM FontLab file.
==================================================
Versions:
v1.0 - Feb 15 2010 - Initial release
v1.1 - Feb 19 2010 - Added the option to generate 'mkmk' feature
v1.2 - Apr 21 2011 - Added the option of writing the mark classes in a separate file
v1.3 - Jun 15 2012 - Added the option to output the lookups in the format required for Indian scripts.
v1.3.1 - Jul 19 2012 - Changed the description of one of the options in the UI.
v1.3.2 - Mar 10 2013 - Minor improvements.
"""
import os, sys, re, copy, math, time
try:
from AdobeFontLabUtils import checkControlKeyPress, checkShiftKeyPress
import WriteFeaturesMarkFDK
except ImportError,e:
print "Failed to find the Adobe FDK support scripts."
print "Please run the script FDK/Tools/FontLab/installFontLabMacros.py script, and try again."
print "Current directory: ", os.path.abspath(os.getcwd())
print "Current list of search paths for modules: "
import pprint
pprint.pprint(sys.path)
raise e
kFieldsKey = "#KEYS:"
kFamilyName = "FamilyName"
kFontName = "FontName"
kFullName = "FullName"
kWeight = "Weight"
kCoordsKey = "Coords"
kIsBoldKey = "IsBold" # This is changed to kForceBold in the instanceDict when reading in the instance file.
kForceBold = "ForceBold"
kIsItalicKey = "IsItalic"
kExceptionSuffixes = "ExceptionSuffixes"
kExtraGlyphs = "ExtraGlyphs"
kFixedFieldKeys = {
# field index: key name
0:kFamilyName,
1:kFontName,
2:kFullName,
3:kWeight,
4:kCoordsKey,
5:kIsBoldKey,
}
kNumFixedFields = len(kFixedFieldKeys)
kBlueScale = "BlueScale"
kBlueShift = "BlueShift"
kBlueFuzz = "BlueFuzz"
kBlueValues = "BlueValues"
kOtherBlues = "OtherBlues"
kFamilyBlues = "FamilyBlues"
kFamilyOtherBlues = "FamilyOtherBlues"
kStdHW = "StdHW"
kStdVW = "StdVW"
kStemSnapH = "StemSnapH"
kStemSnapV = "StemSnapV"
kAlignmentZonesKeys = [kBlueValues, kOtherBlues, kFamilyBlues, kFamilyOtherBlues]
kTopAlignZonesKeys = [kBlueValues, kFamilyBlues]
kMaxTopZonesSize = 14 # 7 zones
kBotAlignZonesKeys = [kOtherBlues, kFamilyOtherBlues]
kMaxBotZonesSize = 10 # 5 zones
kStdStemsKeys = [kStdHW, kStdVW]
kMaxStdStemsSize = 1
kStemSnapKeys = [kStemSnapH, kStemSnapV]
kMaxStemSnapSize = 12 # including StdStem
class ParseError(ValueError):
pass
def validateArrayValues(arrayList, valuesMustBePositive):
for i in range(len(arrayList)):
try:
arrayList[i] = eval(arrayList[i])
except (NameError, SyntaxError):
return
if valuesMustBePositive:
if arrayList[i] < 0:
return
return arrayList
def readInstanceFile(instancesFilePath):
f = open(instancesFilePath, "rt")
data = f.read()
f.close()
lines = data.splitlines()
i = 0
parseError = 0
keyDict = copy.copy(kFixedFieldKeys)
numKeys = kNumFixedFields
numLines = len(lines)
instancesList = []
for i in range(numLines):
line = lines[i]
# Skip over blank lines
line2 = line.strip()
if not line2:
continue
# Get rid of all comments. If we find a key definition comment line, parse it.
commentIndex = line.find('#')
if commentIndex >= 0:
if line.startswith(kFieldsKey):
if instancesList:
print "ERROR: Header line (%s) must preceed a data line." % kFieldsKey
raise ParseError
# parse the line with the field names.
line = line[len(kFieldsKey):]
line = line.strip()
keys = line.split('\t')
keys = map(lambda name: name.strip(), keys)
numKeys = len(keys)
k = kNumFixedFields
while k < numKeys:
keyDict[k] = keys[k]
k +=1
continue
else:
line = line[:commentIndex]
continue
# Must be a data line.
fields = line.split('\t')
fields = map(lambda datum: datum.strip(), fields)
numFields = len(fields)
if (numFields != numKeys):
print "ERROR: In line %s, the number of fields %s does not match the number of key names %s (FamilyName, FontName, FullName, Weight, Coords, IsBold)." % (i+1, numFields, numKeys)
parseError = 1
continue
instanceDict= {}
#Build a dict from key to value. Some kinds of values needs special processing.
for k in range(numFields):
key = keyDict[k]
field = fields[k]
if not field:
continue
if field in ["Default", "None", "FontBBox"]:
# FontBBox is no longer supported - I calculate the real
# instance fontBBox from the glyph metrics instead,
continue
if key == kFontName:
value = field
elif key in [kExtraGlyphs, kExceptionSuffixes]:
value = eval(field)
elif key in [kIsBoldKey, kIsItalicKey, kCoordsKey]:
try:
value = eval(field) # this works for all three fields.
if key == kIsBoldKey: # need to convert to Type 1 field key.
instanceDict[key] = value
# add kForceBold key.
key = kForceBold
if value == 1:
value = "true"
else:
value = "false"
elif key == kIsItalicKey:
if value == 1:
value = "true"
else:
value = "false"
elif key == kCoordsKey:
if type(value) == type(0):
value = (value,)
except (NameError, SyntaxError):
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
elif field[0] in ["[","{"]: # it is a Type 1 array value. Turn it into a list and verify that there's an even number of values for the alignment zones
value = field[1:-1].split() # Remove the begin and end brackets/braces, and make a list
if key in kAlignmentZonesKeys:
if len(value) % 2 != 0:
print "ERROR: In line %s, the %s field does not have an even number of values." % (i+1, key)
parseError = 1
continue
if key in kTopAlignZonesKeys: # The Type 1 spec only allows 7 top zones (7 pairs of values)
if len(value) > kMaxTopZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxTopZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kBotAlignZonesKeys: # The Type 1 spec only allows 5 top zones (5 pairs of values)
if len(value) > kMaxBotZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxBotZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kStdStemsKeys:
if len(value) > kMaxStdStemsSize:
print "ERROR: In line %s, the %s field can only have %d value." % (i+1, key, kMaxStdStemsSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, True) # True = all values must be positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
if key in kStemSnapKeys: # The Type 1 spec only allows 12 stem widths, including 1 standard stem
if len(value) > kMaxStemSnapSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxStemSnapSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, True) # True = all values must be positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
else:
# either a single number or a string.
if re.match(r"^[-.\d]+$", field):
value = field #it is a Type 1 number. Pass as is, as a string.
else:
value = field
instanceDict[key] = value
if (kStdHW in instanceDict and kStemSnapH not in instanceDict) or (kStdHW not in instanceDict and kStemSnapH in instanceDict):
print "ERROR: In line %s, either the %s value or the %s values are missing or were invalid." % (i+1, kStdHW, kStemSnapH)
parseError = 1
elif (kStdHW in instanceDict and kStemSnapH in instanceDict): # cannot be just 'else' because it will generate a 'KeyError' when these hinting parameters are not provided in the 'instances' file
if instanceDict[kStemSnapH][0] != instanceDict[kStdHW][0]:
print "ERROR: In line %s, the first value in %s must be the same as the %s value." % (i+1, kStemSnapH, kStdHW)
parseError = 1
if (kStdVW in instanceDict and kStemSnapV not in instanceDict) or (kStdVW not in instanceDict and kStemSnapV in instanceDict):
print "ERROR: In line %s, either the %s value or the %s values are missing or were invalid." % (i+1, kStdVW, kStemSnapV)
parseError = 1
elif (kStdVW in instanceDict and kStemSnapV in instanceDict): # cannot be just 'else' because it will generate a 'KeyError' when these hinting parameters are not provided in the 'instances' file
if instanceDict[kStemSnapV][0] != instanceDict[kStdVW][0]:
print "ERROR: In line %s, the first value in %s must be the same as the %s value." % (i+1, kStemSnapV, kStdVW)
parseError = 1
instancesList.append(instanceDict)
if parseError or len(instancesList) == 0:
raise(ParseError)
return instancesList
def handleInstanceLight(f, fontInstanceDict, instanceInfo):
# Set names
f.font_name = fontInstanceDict[kFontName]
instValues = fontInstanceDict[kCoordsKey]
# This name does not go into the CFF font header. It's used in the 'features.kern' to have a record of the instance.
# Builds information about the source font and instance values
for x in range(len(instValues)):
instanceInfo += '_' + str(instValues[x])
f.menu_name = instanceInfo
return f
def makeFaceFolder(root, folder):
facePath = os.path.join(root, folder)
if not os.path.exists(facePath):
os.makedirs(facePath)
return facePath
def handleFontLight(folderPath, fontMM, fontInstanceDict, options):
try:
faceName = fontInstanceDict[kFontName].split('-')[1]
except IndexError:
faceName = 'Regular'
print
print faceName
fontName = fontInstanceDict[kFontName]
instValues = fontInstanceDict[kCoordsKey]
try:
fontInstance = Font(fontMM, instValues) # creates instance
except:
print "Error: Could not create instance <%s> (%s)" % (instValues, fontName)
return
instanceInfo = os.path.basename(fontMM.file_name) # The name of the source MM VFB is recorded as part of the info regarding the instance
fontInstance = handleInstanceLight(fontInstance, fontInstanceDict, instanceInfo)
instanceFolder = makeFaceFolder(folderPath, faceName)
WriteFeaturesMarkFDK.MarkDataClass(fontInstance, instanceFolder, options.trimCasingTags, options.genMkmkFeature, options.writeClassesFile, options.indianScriptsFormat)
def makeFeature(options):
try:
parentDir = os.path.dirname(os.path.abspath(fl.font.file_name))
except AttributeError:
print "The font has not been saved. Please save the font and try again."
return
if fl.font[0].layers_number == 1:
fontSM = fl.font # Single Master Font
else:
fontMM = fl.font # MM Font
axisNum = int(math.log(fontMM[0].layers_number, 2)) # Number of axis in font
instancesFilePath = os.path.join(parentDir, kInstancesDataFileName)
if not os.path.isfile(instancesFilePath):
print "Could not find the file named '%s' in the path below\n\t%s" % (kInstancesDataFileName, parentDir)
return
try:
print "Parsing instances file..."
instancesList = readInstanceFile(instancesFilePath)
except ParseError:
print "Error parsing file or file is empty."
return
# A few checks before proceeding...
if instancesList:
# Make sure that the instance values is compatible with the number of axis in the MM font
for i in range(len(instancesList)):
instanceDict = instancesList[i]
axisVal = instanceDict[kCoordsKey] # Get AxisValues strings
if axisNum != len(axisVal):
print 'ERROR: The %s value for the instance named %s in the %s file is not compatible with the number of axis in the MM source font.' % (kCoordsKey, instanceDict[kFontName], kInstancesDataFileName)
return
folderPath = fl.GetPathName("Select parent directory to output file(s)")
# Cancel was clicked or Esc key was pressed
if not folderPath:
return
t1 = time.time() # Initiates a timer of the whole process
if fl.font[0].layers_number == 1:
print fontSM.font_name
WriteFeaturesMarkFDK.MarkDataClass(fontSM, folderPath, options.trimCasingTags, options.genMkmkFeature, options.writeClassesFile, options.indianScriptsFormat)
else:
for fontInstance in instancesList:
handleFontLight(folderPath, fontMM, fontInstance, options)
t2 = time.time()
elapsedSeconds = t2-t1
if (elapsedSeconds/60) < 1:
print '\nCompleted in %.1f seconds.\n' % elapsedSeconds
else:
print '\nCompleted in %.1f minutes.\n' % (elapsedSeconds/60)
class MarkGenOptions:
# Holds the options for the module.
# The values of all member items NOT prefixed with "_" are written to/read from
# a preferences file.
# This also gets/sets the same member fields in the passed object.
def __init__(self):
self.trimCasingTags = 0
self.genMkmkFeature = 0
self.writeClassesFile = 0
self.indianScriptsFormat = 0
# items not written to prefs
self._prefsBaseName = kPrefsFileName
self._prefsPath = None
def _getPrefs(self, callerObject = None):
foundPrefsFile = 0
# We will put the prefs file in a directory "Preferences" at the same level as the Macros directory
dirPath = os.path.dirname(WriteFeaturesMarkFDK.__file__)
name = " "
while name and (name.lower() != "macros"):
name = os.path.basename(dirPath)
dirPath = os.path.dirname(dirPath)
if name.lower() != "macros" :
dirPath = None
if dirPath:
dirPath = os.path.join(dirPath, "Preferences")
if not os.path.exists(dirPath): # create it so we can save a prefs file there later.
try:
os.mkdir(dirPath)
except (IOError,OSError):
print("Failed to create prefs directory %s" % (dirPath))
return foundPrefsFile
else:
return foundPrefsFile
# the prefs directory exists. Try and open the file.
self._prefsPath = os.path.join(dirPath, self._prefsBaseName)
if os.path.exists(self._prefsPath):
try:
pf = file(self._prefsPath, "rt")
data = pf.read()
prefs = eval(data)
pf.close()
except (IOError, OSError):
print("Prefs file exists but cannot be read %s" % (self._prefsPath))
return foundPrefsFile
# We've successfully read the prefs file
foundPrefsFile = 1
kelList = prefs.keys()
for key in kelList:
exec("self.%s = prefs[\"%s\"]" % (key,key))
# Add/set the member fields of the calling object
if callerObject:
keyList = dir(self)
for key in keyList:
if key[0] == "_":
continue
exec("callerObject.%s = self.%s" % (key, key))
return foundPrefsFile
def _savePrefs(self, callerObject = None):
prefs = {}
if not self._prefsPath:
return
keyList = dir(self)
for key in keyList:
if key[0] == "_":
continue
if callerObject:
exec("self.%s = callerObject.%s" % (key, key))
exec("prefs[\"%s\"] = self.%s" % (key, key))
try:
pf = file(self._prefsPath, "wt")
pf.write(repr(prefs))
pf.close()
print("Saved prefs in %s." % self._prefsPath)
except (IOError, OSError):
print("Failed to write prefs file in %s." % self._prefsPath)
class MarkGenDialog:
def __init__(self):
""" NOTE: the Get and Save preferences class methods access the preference values as fields
of the dialog by name. If you want to change a preference value, the dialog control value must have
the same field name.
"""
dWidth = 350
dMargin = 25
xMax = dWidth - dMargin
# Mark Feature Options section
xC1 = dMargin + 20 # Left indent of the "Generate..." options
yC0 = 0
yC1 = yC0 + 30
yC2 = yC1 + 30
yC3 = yC2 + 30
yC4 = yC3 + 30
endYsection = yC4 + 30
dHeight = endYsection + 70 # Total height of dialog
self.d = Dialog(self)
self.d.size = Point(dWidth, dHeight)
self.d.Center()
self.d.title = "Mark Feature Generator Preferences"
self.options = MarkGenOptions()
self.options._getPrefs(self) # This both loads prefs and assigns the member fields of the dialog.
self.d.AddControl(CHECKBOXCONTROL, Rect(xC1, yC1, xMax, aAUTO), "genMkmkFeature", STYLE_CHECKBOX, " Write mark-to-mark lookups")
self.d.AddControl(CHECKBOXCONTROL, Rect(xC1, yC2, xMax, aAUTO), "trimCasingTags", STYLE_CHECKBOX, " Trim casing tags on anchor names")
self.d.AddControl(CHECKBOXCONTROL, Rect(xC1, yC3, xMax, aAUTO), "writeClassesFile", STYLE_CHECKBOX, " Write mark classes in separate file")
self.d.AddControl(CHECKBOXCONTROL, Rect(xC1, yC4, xMax, aAUTO), "indianScriptsFormat", STYLE_CHECKBOX, " Format the output for Indian scripts")
def on_genMkmkFeature(self, code):
self.d.GetValue("genMkmkFeature")
def on_trimCasingTags(self, code):
self.d.GetValue("trimCasingTags")
def on_writeClassesFile(self, code):
self.d.GetValue("writeClassesFile")
def on_indianScriptsFormat(self, code):
self.d.GetValue("indianScriptsFormat")
def on_ok(self,code):
self.result = 1
# update options
self.options._savePrefs(self) # update prefs file
def on_cancel(self, code):
self.result = 0
def Run(self):
self.d.Run()
return self.result
def run():
global debug
if fl.count == 0:
print 'No font opened.'
return
if len(fl.font) == 0:
print 'The font has no glyphs.'
return
else:
dontShowDialog = 1
result = 2
dontShowDialog = checkControlKeyPress()
debug = not checkShiftKeyPress()
if dontShowDialog:
print "Hold down CONTROL key while starting this script in order to set options.\n"
options = MarkGenOptions()
options._getPrefs() # load current settings from prefs
makeFeature(options)
else:
IGd = MarkGenDialog()
result = IGd.Run() # returns 0 for cancel, 1 for ok
if result == 1:
options = MarkGenOptions()
options._getPrefs() # load current settings from prefs
makeFeature(options)
if __name__ == "__main__":
run()
|
|
"""
Utility classes and functions for the polynomial modules.
This module provides: error and warning objects; a polynomial base class;
and some routines used in both the `polynomial` and `chebyshev` modules.
Error objects
-------------
.. autosummary::
:toctree: generated/
PolyError base class for this sub-package's errors.
PolyDomainError raised when domains are mismatched.
Warning objects
---------------
.. autosummary::
:toctree: generated/
RankWarning raised in least-squares fit for rank-deficient matrix.
Base class
----------
.. autosummary::
:toctree: generated/
PolyBase Obsolete base class for the polynomial classes. Do not use.
Functions
---------
.. autosummary::
:toctree: generated/
as_series convert list of array_likes into 1-D arrays of common type.
trimseq remove trailing zeros.
trimcoef remove small trailing coefficients.
getdomain return the domain appropriate for a given set of abscissae.
mapdomain maps points between domains.
mapparms parameters of the linear map between domains.
"""
from __future__ import division, absolute_import, print_function
import operator
import warnings
import numpy as np
__all__ = [
'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq',
'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase']
#
# Warnings and Exceptions
#
class RankWarning(UserWarning):
"""Issued by chebfit when the design matrix is rank deficient."""
pass
class PolyError(Exception):
"""Base class for errors in this module."""
pass
class PolyDomainError(PolyError):
"""Issued by the generic Poly class when two domains don't match.
This is raised when an binary operation is passed Poly objects with
different domains.
"""
pass
#
# Base class for all polynomial types
#
class PolyBase(object):
"""
Base class for all polynomial types.
Deprecated in numpy 1.9.0, use the abstract
ABCPolyBase class instead. Note that the latter
requires a number of virtual functions to be
implemented.
"""
pass
#
# Helper functions to convert inputs to 1-D arrays
#
def trimseq(seq):
"""Remove small Poly series coefficients.
Parameters
----------
seq : sequence
Sequence of Poly series coefficients. This routine fails for
empty sequences.
Returns
-------
series : sequence
Subsequence with trailing zeros removed. If the resulting sequence
would be empty, return the first element. The returned sequence may
or may not be a view.
Notes
-----
Do not lose the type info if the sequence contains unknown objects.
"""
if len(seq) == 0:
return seq
else:
for i in range(len(seq) - 1, -1, -1):
if seq[i] != 0:
break
return seq[:i+1]
def as_series(alist, trim=True):
"""
Return argument as a list of 1-d arrays.
The returned list contains array(s) of dtype double, complex double, or
object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of
size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays
of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array
raises a Value Error if it is not first reshaped into either a 1-d or 2-d
array.
Parameters
----------
alist : array_like
A 1- or 2-d array_like
trim : boolean, optional
When True, trailing zeros are removed from the inputs.
When False, the inputs are passed through intact.
Returns
-------
[a1, a2,...] : list of 1-D arrays
A copy of the input data as a list of 1-d arrays.
Raises
------
ValueError
Raised when `as_series` cannot convert its input to 1-d arrays, or at
least one of the resulting arrays is empty.
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> a = np.arange(4)
>>> pu.as_series(a)
[array([0.]), array([1.]), array([2.]), array([3.])]
>>> b = np.arange(6).reshape((2,3))
>>> pu.as_series(b)
[array([0., 1., 2.]), array([3., 4., 5.])]
>>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16)))
[array([1.]), array([0., 1., 2.]), array([0., 1.])]
>>> pu.as_series([2, [1.1, 0.]])
[array([2.]), array([1.1])]
>>> pu.as_series([2, [1.1, 0.]], trim=False)
[array([2.]), array([1.1, 0. ])]
"""
arrays = [np.array(a, ndmin=1, copy=False) for a in alist]
if min([a.size for a in arrays]) == 0:
raise ValueError("Coefficient array is empty")
if any([a.ndim != 1 for a in arrays]):
raise ValueError("Coefficient array is not 1-d")
if trim:
arrays = [trimseq(a) for a in arrays]
if any([a.dtype == np.dtype(object) for a in arrays]):
ret = []
for a in arrays:
if a.dtype != np.dtype(object):
tmp = np.empty(len(a), dtype=np.dtype(object))
tmp[:] = a[:]
ret.append(tmp)
else:
ret.append(a.copy())
else:
try:
dtype = np.common_type(*arrays)
except Exception:
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=True, dtype=dtype) for a in arrays]
return ret
def trimcoef(c, tol=0):
"""
Remove "small" "trailing" coefficients from a polynomial.
"Small" means "small in absolute value" and is controlled by the
parameter `tol`; "trailing" means highest order coefficient(s), e.g., in
``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``)
both the 3-rd and 4-th order coefficients would be "trimmed."
Parameters
----------
c : array_like
1-d array of coefficients, ordered from lowest order to highest.
tol : number, optional
Trailing (i.e., highest order) elements with absolute value less
than or equal to `tol` (default value is zero) are removed.
Returns
-------
trimmed : ndarray
1-d array with trailing zeros removed. If the resulting series
would be empty, a series containing a single zero is returned.
Raises
------
ValueError
If `tol` < 0
See Also
--------
trimseq
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.trimcoef((0,0,3,0,5,0,0))
array([0., 0., 3., 0., 5.])
>>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
array([0.])
>>> i = complex(0,1) # works for complex
>>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
array([0.0003+0.j , 0.001 -0.001j])
"""
if tol < 0:
raise ValueError("tol must be non-negative")
[c] = as_series([c])
[ind] = np.nonzero(np.abs(c) > tol)
if len(ind) == 0:
return c[:1]*0
else:
return c[:ind[-1] + 1].copy()
def getdomain(x):
"""
Return a domain suitable for given abscissae.
Find a domain suitable for a polynomial or Chebyshev series
defined at the values supplied.
Parameters
----------
x : array_like
1-d array of abscissae whose domain will be determined.
Returns
-------
domain : ndarray
1-d array containing two values. If the inputs are complex, then
the two returned points are the lower left and upper right corners
of the smallest rectangle (aligned with the axes) in the complex
plane containing the points `x`. If the inputs are real, then the
two points are the ends of the smallest interval containing the
points `x`.
See Also
--------
mapparms, mapdomain
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> points = np.arange(4)**2 - 5; points
array([-5, -4, -1, 4])
>>> pu.getdomain(points)
array([-5., 4.])
>>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle
>>> pu.getdomain(c)
array([-1.-1.j, 1.+1.j])
"""
[x] = as_series([x], trim=False)
if x.dtype.char in np.typecodes['Complex']:
rmin, rmax = x.real.min(), x.real.max()
imin, imax = x.imag.min(), x.imag.max()
return np.array((complex(rmin, imin), complex(rmax, imax)))
else:
return np.array((x.min(), x.max()))
def mapparms(old, new):
"""
Linear map parameters between domains.
Return the parameters of the linear map ``offset + scale*x`` that maps
`old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``.
Parameters
----------
old, new : array_like
Domains. Each domain must (successfully) convert to a 1-d array
containing precisely two values.
Returns
-------
offset, scale : scalars
The map ``L(x) = offset + scale*x`` maps the first domain to the
second.
See Also
--------
getdomain, mapdomain
Notes
-----
Also works for complex numbers, and thus can be used to calculate the
parameters required to map any line in the complex plane to any other
line therein.
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.mapparms((-1,1),(-1,1))
(0.0, 1.0)
>>> pu.mapparms((1,-1),(-1,1))
(-0.0, -1.0)
>>> i = complex(0,1)
>>> pu.mapparms((-i,-1),(1,i))
((1+1j), (1-0j))
"""
oldlen = old[1] - old[0]
newlen = new[1] - new[0]
off = (old[1]*new[0] - old[0]*new[1])/oldlen
scl = newlen/oldlen
return off, scl
def mapdomain(x, old, new):
"""
Apply linear map to input points.
The linear map ``offset + scale*x`` that maps the domain `old` to
the domain `new` is applied to the points `x`.
Parameters
----------
x : array_like
Points to be mapped. If `x` is a subtype of ndarray the subtype
will be preserved.
old, new : array_like
The two domains that determine the map. Each must (successfully)
convert to 1-d arrays containing precisely two values.
Returns
-------
x_out : ndarray
Array of points of the same shape as `x`, after application of the
linear map between the two domains.
See Also
--------
getdomain, mapparms
Notes
-----
Effectively, this implements:
.. math ::
x\\_out = new[0] + m(x - old[0])
where
.. math ::
m = \\frac{new[1]-new[0]}{old[1]-old[0]}
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> old_domain = (-1,1)
>>> new_domain = (0,2*np.pi)
>>> x = np.linspace(-1,1,6); x
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ])
>>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out
array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary
6.28318531])
>>> x - pu.mapdomain(x_out, new_domain, old_domain)
array([0., 0., 0., 0., 0., 0.])
Also works for complex numbers (and thus can be used to map any line in
the complex plane to any other line therein).
>>> i = complex(0,1)
>>> old = (-1 - i, 1 + i)
>>> new = (-1 + i, 1 - i)
>>> z = np.linspace(old[0], old[1], 6); z
array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ])
>>> new_z = pu.mapdomain(z, old, new); new_z
array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary
"""
x = np.asanyarray(x)
off, scl = mapparms(old, new)
return off + scl*x
def _vander2d(vander_f, x, y, deg):
"""
Helper function used to implement the ``<type>vander2d`` functions.
Parameters
----------
vander_f : function(array_like, int) -> ndarray
The 1d vander function, such as ``polyvander``
x, y, deg :
See the ``<type>vander2d`` functions for more detail
"""
degx, degy = [
_deprecate_as_int(d, "degrees")
for d in deg
]
x, y = np.array((x, y), copy=False) + 0.0
vx = vander_f(x, degx)
vy = vander_f(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def _vander3d(vander_f, x, y, z, deg):
"""
Helper function used to implement the ``<type>vander3d`` functions.
Parameters
----------
vander_f : function(array_like, int) -> ndarray
The 1d vander function, such as ``polyvander``
x, y, z, deg :
See the ``<type>vander3d`` functions for more detail
"""
degx, degy, degz = [
_deprecate_as_int(d, "degrees")
for d in deg
]
x, y, z = np.array((x, y, z), copy=False) + 0.0
vx = vander_f(x, degx)
vy = vander_f(y, degy)
vz = vander_f(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def _fromroots(line_f, mul_f, roots):
"""
Helper function used to implement the ``<type>fromroots`` functions.
Parameters
----------
line_f : function(float, float) -> ndarray
The ``<type>line`` function, such as ``polyline``
mul_f : function(array_like, array_like) -> ndarray
The ``<type>mul`` function, such as ``polymul``
roots :
See the ``<type>fromroots`` functions for more detail
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = as_series([roots], trim=False)
roots.sort()
p = [line_f(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [mul_f(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = mul_f(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def _valnd(val_f, c, *args):
"""
Helper function used to implement the ``<type>val<n>d`` functions.
Parameters
----------
val_f : function(array_like, array_like, tensor: bool) -> array_like
The ``<type>val`` function, such as ``polyval``
c, args :
See the ``<type>val<n>d`` functions for more detail
"""
try:
args = tuple(np.array(args, copy=False))
except Exception:
# preserve the old error message
if len(args) == 2:
raise ValueError('x, y, z are incompatible')
elif len(args) == 3:
raise ValueError('x, y are incompatible')
else:
raise ValueError('ordinates are incompatible')
it = iter(args)
x0 = next(it)
# use tensor on only the first
c = val_f(x0, c)
for xi in it:
c = val_f(xi, c, tensor=False)
return c
def _gridnd(val_f, c, *args):
"""
Helper function used to implement the ``<type>grid<n>d`` functions.
Parameters
----------
val_f : function(array_like, array_like, tensor: bool) -> array_like
The ``<type>val`` function, such as ``polyval``
c, args :
See the ``<type>grid<n>d`` functions for more detail
"""
for xi in args:
c = val_f(xi, c)
return c
def _div(mul_f, c1, c2):
"""
Helper function used to implement the ``<type>div`` functions.
Implementation uses repeated subtraction of c2 multiplied by the nth basis.
For some polynomial types, a more efficient approach may be possible.
Parameters
----------
mul_f : function(array_like, array_like) -> array_like
The ``<type>mul`` function, such as ``polymul``
c1, c2 :
See the ``<type>div`` functions for more detail
"""
# c1, c2 are trimmed copies
[c1, c2] = as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = mul_f([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, trimseq(rem)
def _add(c1, c2):
""" Helper function used to implement the ``<type>add`` functions. """
# c1, c2 are trimmed copies
[c1, c2] = as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return trimseq(ret)
def _sub(c1, c2):
""" Helper function used to implement the ``<type>sub`` functions. """
# c1, c2 are trimmed copies
[c1, c2] = as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return trimseq(ret)
def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None):
"""
Helper function used to implement the ``<type>fit`` functions.
Parameters
----------
vander_f : function(array_like, int) -> ndarray
The 1d vander function, such as ``polyvander``
c1, c2 :
See the ``<type>fit`` functions for more detail
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = vander_f(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = vander_f(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def _pow(mul_f, c, pow, maxpower):
"""
Helper function used to implement the ``<type>pow`` functions.
Parameters
----------
vander_f : function(array_like, int) -> ndarray
The 1d vander function, such as ``polyvander``
pow, maxpower :
See the ``<type>pow`` functions for more detail
mul_f : function(array_like, array_like) -> ndarray
The ``<type>mul`` function, such as ``polymul``
"""
# c is a trimmed copy
[c] = as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = mul_f(prd, c)
return prd
def _deprecate_as_int(x, desc):
"""
Like `operator.index`, but emits a deprecation warning when passed a float
Parameters
----------
x : int-like, or float with integral value
Value to interpret as an integer
desc : str
description to include in any error message
Raises
------
TypeError : if x is a non-integral float or non-numeric
DeprecationWarning : if x is an integral float
"""
try:
return operator.index(x)
except TypeError:
# Numpy 1.17.0, 2019-03-11
try:
ix = int(x)
except TypeError:
pass
else:
if ix == x:
warnings.warn(
"In future, this will raise TypeError, as {} will need to "
"be an integer not just an integral float."
.format(desc),
DeprecationWarning,
stacklevel=3
)
return ix
raise TypeError("{} must be an integer".format(desc))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import imp
import os
import StringIO
import sys
from nova import context
from nova import db
from nova import exception
from nova import test
from nova.tests.db import fakes as db_fakes
TOPDIR = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir,
os.pardir))
NOVA_MANAGE_PATH = os.path.join(TOPDIR, 'bin', 'nova-manage')
sys.dont_write_bytecode = True
nova_manage = imp.load_source('nova_manage', NOVA_MANAGE_PATH)
sys.dont_write_bytecode = False
class FixedIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FixedIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = nova_manage.FixedIpCommands()
def test_reserve(self):
self.commands.reserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], True)
def test_reserve_nonexistent_address(self):
self.assertRaises(SystemExit,
self.commands.reserve,
'55.55.55.55')
def test_unreserve(self):
self.commands.unreserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], False)
def test_unreserve_nonexistent_address(self):
self.assertRaises(SystemExit,
self.commands.unreserve,
'55.55.55.55')
def test_list(self):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO.StringIO()))
self.commands.list()
self.assertTrue(sys.stdout.getvalue().find('192.168.0.100') != -1)
def test_list_just_one_host(self):
def fake_fixed_ip_get_by_host(*args, **kwargs):
return [db_fakes.fixed_ip_fields]
self.useFixture(fixtures.MonkeyPatch(
'nova.db.fixed_ip_get_by_host',
fake_fixed_ip_get_by_host))
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO.StringIO()))
self.commands.list('banana')
self.assertTrue(sys.stdout.getvalue().find('192.168.0.100') != -1)
class FloatingIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FloatingIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = nova_manage.FloatingIpCommands()
def test_address_to_hosts(self):
def assert_loop(result, expected):
for ip in result:
self.assertTrue(str(ip) in expected)
address_to_hosts = self.commands.address_to_hosts
# /32 and /31
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/32')
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/31')
# /30
expected = ["192.168.100.%s" % i for i in range(1, 3)]
result = address_to_hosts('192.168.100.0/30')
self.assertTrue(len(list(result)) == 2)
assert_loop(result, expected)
# /29
expected = ["192.168.100.%s" % i for i in range(1, 7)]
result = address_to_hosts('192.168.100.0/29')
self.assertTrue(len(list(result)) == 6)
assert_loop(result, expected)
# /28
expected = ["192.168.100.%s" % i for i in range(1, 15)]
result = address_to_hosts('192.168.100.0/28')
self.assertTrue(len(list(result)) == 14)
assert_loop(result, expected)
# /16
result = address_to_hosts('192.168.100.0/16')
self.assertTrue(len(list(result)) == 65534)
# NOTE(dripton): I don't test /13 because it makes the test take 3s.
# /12 gives over a million IPs, which is ridiculous.
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/12')
class NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NetworkCommandsTestCase, self).setUp()
self.commands = nova_manage.NetworkCommands()
self.net = {'id': 0,
'label': 'fake',
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::/64',
'multi_host': False,
'gateway_v6': 'dead:beef::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '8.8.8.8',
'dns2': '8.8.4.4',
'vlan': 200,
'vpn_public_address': '10.0.0.2',
'vpn_public_port': '2222',
'vpn_private_address': '192.168.0.2',
'dhcp_start': '192.168.0.3',
'project_id': 'fake_project',
'host': 'fake_host',
'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
def fake_network_get_by_cidr(context, cidr):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(cidr, self.fake_net['cidr'])
return db_fakes.FakeModel(self.fake_net)
def fake_network_get_by_uuid(context, uuid):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(uuid, self.fake_net['uuid'])
return db_fakes.FakeModel(self.fake_net)
def fake_network_update(context, network_id, values):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.assertEqual(values, self.fake_update_value)
self.fake_network_get_by_cidr = fake_network_get_by_cidr
self.fake_network_get_by_uuid = fake_network_get_by_uuid
self.fake_network_update = fake_network_update
def test_create(self):
def fake_create_networks(obj, context, **kwargs):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(kwargs['label'], 'Test')
self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
self.assertEqual(kwargs['multi_host'], False)
self.assertEqual(kwargs['num_networks'], 1)
self.assertEqual(kwargs['network_size'], 256)
self.assertEqual(kwargs['vlan_start'], 200)
self.assertEqual(kwargs['vpn_start'], 2000)
self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
self.assertEqual(kwargs['gateway'], '10.2.0.1')
self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
self.assertEqual(kwargs['bridge'], 'br200')
self.assertEqual(kwargs['bridge_interface'], 'eth0')
self.assertEqual(kwargs['dns1'], '8.8.8.8')
self.assertEqual(kwargs['dns2'], '8.8.4.4')
self.flags(network_manager='nova.network.manager.VlanManager')
from nova.network import manager as net_manager
self.stubs.Set(net_manager.VlanManager, 'create_networks',
fake_create_networks)
self.commands.create(
label='Test',
cidr='10.2.0.0/24',
num_networks=1,
network_size=256,
multi_host='F',
vlan_start=200,
vpn_start=2000,
cidr_v6='fd00:2::/120',
gateway='10.2.0.1',
gateway_v6='fd00:2::22',
bridge='br200',
bridge_interface='eth0',
dns1='8.8.8.8',
dns2='8.8.4.4',
uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
def test_list(self):
def fake_network_get_all(context):
return [db_fakes.FakeModel(self.net)]
self.stubs.Set(db, 'network_get_all', fake_network_get_all)
output = StringIO.StringIO()
sys.stdout = output
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
_fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
"%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
"%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
head = _fmt % {'id': _('id'),
'cidr': _('IPv4'),
'cidr_v6': _('IPv6'),
'dhcp_start': _('start address'),
'dns1': _('DNS1'),
'dns2': _('DNS2'),
'vlan': _('VlanID'),
'project_id': _('project'),
'uuid': _("uuid")}
body = _fmt % {'id': self.net['id'],
'cidr': self.net['cidr'],
'cidr_v6': self.net['cidr_v6'],
'dhcp_start': self.net['dhcp_start'],
'dns1': self.net['dns1'],
'dns2': self.net['dns2'],
'vlan': self.net['vlan'],
'project_id': self.net['project_id'],
'uuid': self.net['uuid']}
answer = '%s\n%s\n' % (head, body)
self.assertEqual(result, answer)
def test_delete(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_uuid',
self.fake_network_get_by_uuid)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(uuid=self.fake_net['uuid'])
def test_delete_by_cidr(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(fixed_range=self.fake_net['cidr'])
def _test_modify_base(self, update_value, project, host, dis_project=None,
dis_host=None):
self.fake_net = self.net
self.fake_update_value = update_value
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
self.stubs.Set(db, 'network_update', self.fake_network_update)
self.commands.modify(self.fake_net['cidr'], project=project, host=host,
dis_project=dis_project, dis_host=dis_host)
def test_modify_associate(self):
self._test_modify_base(update_value={'project_id': 'test_project',
'host': 'test_host'},
project='test_project', host='test_host')
def test_modify_unchanged(self):
self._test_modify_base(update_value={}, project=None, host=None)
def test_modify_disassociate(self):
self._test_modify_base(update_value={'project_id': None, 'host': None},
project=None, host=None, dis_project=True,
dis_host=True)
class InstanceTypeCommandsTestCase(test.TestCase):
def setUp(self):
super(InstanceTypeCommandsTestCase, self).setUp()
values = dict(name="test.small",
memory_mb=220,
vcpus=1,
root_gb=16,
ephemeral_gb=32,
flavorid=105)
ref = db.instance_type_create(context.get_admin_context(),
values)
self.instance_type_name = ref["name"]
self.instance_type_id = ref["id"]
self.instance_type_flavorid = ref["flavorid"]
self.set_key = nova_manage.InstanceTypeCommands().set_key
self.unset_key = nova_manage.InstanceTypeCommands().unset_key
def tearDown(self):
db.instance_type_destroy(context.get_admin_context(),
"test.small")
super(InstanceTypeCommandsTestCase, self).tearDown()
def _test_extra_specs_empty(self):
empty_specs = {}
actual_specs = db.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(empty_specs, actual_specs)
def test_extra_specs_set_unset(self):
expected_specs = {'k1': 'v1'}
self._test_extra_specs_empty()
self.set_key(self.instance_type_name, "k1", "v1")
actual_specs = db.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEquals(expected_specs, actual_specs)
self.unset_key(self.instance_type_name, "k1")
self._test_extra_specs_empty()
def test_extra_specs_update(self):
expected_specs = {'k1': 'v1'}
updated_specs = {'k1': 'v2'}
self._test_extra_specs_empty()
self.set_key(self.instance_type_name, "k1", "v1")
actual_specs = db.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEquals(expected_specs, actual_specs)
self.set_key(self.instance_type_name, "k1", "v2")
actual_specs = db.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEquals(updated_specs, actual_specs)
self.unset_key(self.instance_type_name, "k1")
def test_extra_specs_multiple(self):
two_items_extra_specs = {'k1': 'v1',
'k3': 'v3'}
self._test_extra_specs_empty()
self.set_key(self.instance_type_name, "k1", "v1")
self.set_key(self.instance_type_name, "k3", "v3")
actual_specs = db.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEquals(two_items_extra_specs, actual_specs)
self.unset_key(self.instance_type_name, "k1")
self.unset_key(self.instance_type_name, "k3")
class ProjectCommandsTestCase(test.TestCase):
def setUp(self):
super(ProjectCommandsTestCase, self).setUp()
self.commands = nova_manage.ProjectCommands()
def test_quota(self):
output = StringIO.StringIO()
sys.stdout = output
self.commands.quota(project_id='admin',
key='instances',
value='unlimited',
)
sys.stdout = sys.__stdout__
result = output.getvalue()
self.assertEquals(('instances: unlimited' in result), True)
def test_quota_update_invalid_key(self):
self.assertRaises(SystemExit,
self.commands.quota, 'admin', 'volumes1', '10'
)
class DBCommandsTestCase(test.TestCase):
def setUp(self):
super(DBCommandsTestCase, self).setUp()
self.commands = nova_manage.DbCommands()
def test_archive_deleted_rows_negative(self):
self.assertRaises(SystemExit,
self.commands.archive_deleted_rows, -1)
class ServiceCommandsTestCase(test.TestCase):
def setUp(self):
super(ServiceCommandsTestCase, self).setUp()
self.commands = nova_manage.ServiceCommands()
def test_service_enable_invalid_params(self):
self.assertRaises(SystemExit,
self.commands.enable, 'nohost', 'noservice')
def test_service_disable_invalid_params(self):
self.assertRaises(SystemExit,
self.commands.disable, 'nohost', 'noservice')
|
|
from __future__ import print_function
import os, sys, inspect
import h5py
import numpy as np
import matplotlib
import random
import math
import multiprocessing
from PIL import Image
from Crypto.Random.random import randint
from functools import partial
# Load the configuration file
import config
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.append(cmd_folder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],config.caffe_path+"/python")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
sys.path.append(config.caffe_path+"/python")
# Ensure correct compilation of Caffe and Pycaffe
if config.library_compile:
cpus = multiprocessing.cpu_count()
cwd = os.getcwd()
os.chdir(config.caffe_path)
result = os.system("make all -j %s" % cpus)
if result != 0:
sys.exit(result)
result = os.system("make pycaffe -j %s" % cpus)
if result != 0:
sys.exit(result)
os.chdir(cwd)
# Import pycaffe
import caffe
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
import netconf
# General variables
# Size of a float variable
fsize = 4
def compute_memory_weights(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
memory += shape_arr[i][1]
return memory
def compute_memory_buffers(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
memory = max(memory, shape_arr[i][0])
return memory
def compute_memory_blobs(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
mem = fsize * shape_arr[i][2]
for j in range(0,len(shape_arr[i][4])):
mem *= shape_arr[i][4][j]
memory += mem
return memory
def update_shape(shape_arr, update):
last_shape = shape_arr[-1]
new_shape = [update[0](last_shape[0]), update[1](last_shape[1]), update[2](last_shape[2]),
[update[3][min(i,len(update[3])-1)](last_shape[3][i]) for i in range(0,len(last_shape[3]))],
[update[4][min(i,len(update[4])-1)](last_shape[4][i]) for i in range(0,len(last_shape[4]))]]
shape_arr += [new_shape]
print ("TEST B: %s" % [update[4][min(i,len(update[4])-1)]([1,1,1][i]) for i in range(0,3)])
return shape_arr
def data_layer(shape):
data, label = L.MemoryData(dim=shape, ntop=2)
return data, label
def conv_relu(run_shape, bottom, num_output, kernel_size=[3], stride=[1], pad=[0], kstride=[1], group=1, weight_std=0.01):
# The convolution buffer and weight memory
weight_mem = fsize * num_output * run_shape[-1][2]
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= kernel_size[min(i,len(kernel_size)-1)]
conv_buff *= run_shape[-1][4][i]
weight_mem *= kernel_size[min(i,len(kernel_size)-1)]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
conv = L.Convolution(bottom, kernel_size=kernel_size, stride=stride, kstride=kstride,
num_output=num_output, pad=pad, group=group,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
return conv, L.ReLU(conv, in_place=True, negative_slope=0.005)
def convolution(run_shape, bottom, num_output, kernel_size=[3], stride=[1], pad=[0], kstride=[1], group=1, weight_std=0.01):
# The convolution buffer and weight memory
weight_mem = fsize * num_output * run_shape[-1][2]
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= kernel_size[min(i,len(kernel_size)-1)]
conv_buff *= run_shape[-1][4][i]
weight_mem *= kernel_size[min(i,len(kernel_size)-1)]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.Convolution(bottom, kernel_size=kernel_size, stride=stride, kstride=kstride,
num_output=num_output, pad=pad, group=group,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
def max_pool(run_shape, bottom, kernel_size=[2], stride=[2], pad=[0], kstride=[1]):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: x]
update += [[lambda x, i=i: x * kstride[min(i,len(kstride)-1)] for i in range(0,len(run_shape[-1][4]))]]
# Strictly speaking this update rule is not complete, but should be sufficient for USK
if kstride[0] == 1 and kernel_size[0] == stride[0]:
update += [[lambda x, i=i: x / (kernel_size[min(i,len(kernel_size)-1)]) for i in range(0,len(run_shape[-1][4]))]]
else:
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=kernel_size, stride=stride, pad=pad, kstride=kstride)
def upconv(run_shape, bottom, num_output_dec, num_output_conv, weight_std=0.01, kernel_size=[2], stride=[2]):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: num_output_dec]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: kernel_size[min(i,len(kernel_size)-1)] * x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
deconv = L.Deconvolution(bottom, convolution_param=dict(num_output=num_output_dec, kernel_size=kernel_size, stride=stride, pad=[0], kstride=[1], group=num_output_dec,
weight_filler=dict(type='constant', value=1), bias_term=False),
param=dict(lr_mult=0, decay_mult=0))
# The convolution buffer and weight memory
weight_mem = fsize * num_output_conv * num_output_dec
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= 2
conv_buff *= run_shape[-1][4][i]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output_conv]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
conv = L.Convolution(deconv, num_output=num_output_conv, kernel_size=[1], stride=[1], pad=[0], kstride=[1], group=1,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
return deconv, conv
def mergecrop(run_shape, bottom_a, bottom_b):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: 2*x]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.MergeCrop(bottom_a, bottom_b, forward=[1,1], backward=[1,1])
def implement_usknet(net, run_shape, fmaps_start, fmaps_end):
# Chained blob list to construct the network (forward direction)
blobs = []
# All networks start with data
blobs = blobs + [net.data]
fmaps = fmaps_start
if netconf.unet_depth > 0:
# U-Net downsampling; 2*Convolution+Pooling
for i in range(0, netconf.unet_depth):
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu] # This is the blob of interest for mergecrop (index 2 + 3 * i)
pool = max_pool(run_shape, blobs[-1], kernel_size=netconf.unet_downsampling_strategy[i], stride=netconf.unet_downsampling_strategy[i])
blobs = blobs + [pool]
fmaps = netconf.unet_fmap_inc_rule(fmaps)
# If there is no SK-Net component, fill with 2 convolutions
if (netconf.unet_depth > 0 and netconf.sknet_conv_depth == 0):
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
# Else use the SK-Net instead
else:
for i in range(0, netconf.sknet_conv_depth):
# TODO: Not implemented yet (fixme)
run_shape = run_shape
if netconf.unet_depth > 0:
# U-Net upsampling; Upconvolution+MergeCrop+2*Convolution
for i in range(0, netconf.unet_depth):
deconv, conv = upconv(run_shape, blobs[-1], fmaps, netconf.unet_fmap_dec_rule(fmaps), kernel_size=netconf.unet_downsampling_strategy[i], stride=netconf.unet_downsampling_strategy[i], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [conv]
fmaps = netconf.unet_fmap_dec_rule(fmaps)
# Here, layer (2 + 3 * i) with reversed i (high to low) is picked
mergec = mergecrop(run_shape, blobs[-1], blobs[-1 + 3 * (netconf.unet_depth - i)])
blobs = blobs + [mergec]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv = convolution(run_shape, blobs[-1], fmaps_end, kernel_size=[1], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [conv]
# Return the last blob of the network (goes to error objective)
return blobs[-1]
def caffenet(netmode):
# Start Caffe proto net
net = caffe.NetSpec()
# Specify input data structures
if netmode == caffe_pb2.TEST:
if netconf.loss_function == 'malis':
fmaps_end = 11
if netconf.loss_function == 'euclid':
fmaps_end = 11
if netconf.loss_function == 'softmax':
fmaps_end = 2
net.data, net.datai = data_layer([1,1,572,572])
net.silence = L.Silence(net.datai, ntop=0)
# Shape specs:
# 00. Convolution buffer size
# 01. Weight memory size
# 03. Num. channels
# 04. [d] parameter running value
# 05. [w] parameter running value
run_shape_in = [[0,0,1,[1,1],[572,572]]]
run_shape_out = run_shape_in
last_blob = implement_usknet(net, run_shape_out, 64, fmaps_end)
# Implement the prediction layer
if netconf.loss_function == 'malis':
net.prob = L.Sigmoid(last_blob, ntop=1)
if netconf.loss_function == 'euclid':
net.prob = L.Sigmoid(last_blob, ntop=1)
if netconf.loss_function == 'softmax':
net.prob = L.Softmax(last_blob, ntop=1)
for i in range(0,len(run_shape_out)):
print(run_shape_out[i])
print("Max. memory requirements: %s B" % (compute_memory_buffers(run_shape_out)+compute_memory_weights(run_shape_out)+compute_memory_blobs(run_shape_out)))
print("Weight memory: %s B" % compute_memory_weights(run_shape_out))
print("Max. conv buffer: %s B" % compute_memory_buffers(run_shape_out))
else:
if netconf.loss_function == 'malis':
net.data, net.datai = data_layer([1,1,572,572])
net.label, net.labeli = data_layer([1,1,388,388])
net.label_affinity, net.label_affinityi = data_layer([1,11,16,388,388])
net.affinity_edges, net.affinity_edgesi = data_layer([1,1,11,3])
net.silence = L.Silence(net.datai, net.labeli, net.label_affinityi, net.affinity_edgesi, ntop=0)
fmaps_end = 11
if netconf.loss_function == 'euclid':
net.data, net.datai = data_layer([1,1,572,572])
net.label, net.labeli = data_layer([1,3,388,388])
net.scale, net.scalei = data_layer([1,3,388,388])
net.silence = L.Silence(net.datai, net.labeli, net.scalei, ntop=0)
fmaps_end = 11
if netconf.loss_function == 'softmax':
net.data, net.datai = data_layer([1,1,572,572])
# Currently only supports binary classification
net.label, net.labeli = data_layer([1,1,388,388])
net.silence = L.Silence(net.datai, net.labeli, ntop=0)
fmaps_end = 2
run_shape_in = [[0,1,1,[1,1],[572,338]]]
run_shape_out = run_shape_in
# Start the actual network
last_blob = implement_usknet(net, run_shape_out, 64, fmaps_end)
for i in range(0,len(run_shape_out)):
print(run_shape_out[i])
print("Max. memory requirements: %s B" % (compute_memory_buffers(run_shape_out)+compute_memory_weights(run_shape_out)+2*compute_memory_blobs(run_shape_out)))
print("Weight memory: %s B" % compute_memory_weights(run_shape_out))
print("Max. conv buffer: %s B" % compute_memory_buffers(run_shape_out))
# Implement the loss
if netconf.loss_function == 'malis':
last_blob = L.Sigmoid(last_blob, in_place=True)
net.loss = L.MalisLoss(last_blob, net.label_affinity, net.label, net.affinity_edges, ntop=0)
if netconf.loss_function == 'euclid':
last_blob = L.Sigmoid(last_blob, in_place=True)
net.loss = L.EuclideanLoss(last_blob, net.label, net.scale, ntop=0)
if netconf.loss_function == 'softmax':
net.loss = L.SoftmaxWithLoss(last_blob, net.label, ntop=0)
# Return the protocol buffer of the generated network
return net.to_proto()
def make_net():
with open('net/net_train.prototxt', 'w') as f:
print(caffenet(caffe_pb2.TRAIN), file=f)
with open('net/net_test.prototxt', 'w') as f:
print(caffenet(caffe_pb2.TEST), file=f)
def make_solver():
with open('net/solver.prototxt', 'w') as f:
print('train_net: \"net/net_train.prototxt\"', file=f)
print('base_lr: 0.00001', file=f)
print('momentum: 0.99', file=f)
print('weight_decay: 0.000005', file=f)
print('lr_policy: \"inv\"', file=f)
print('gamma: 0.0001', file=f)
print('power: 0.75', file=f)
print('max_iter: 100000', file=f)
print('snapshot: 2000', file=f)
print('snapshot_prefix: \"net\"', file=f)
print('display: 50', file=f)
make_net()
make_solver()
|
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import queue as Queue
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from ovs.db import idl
from neutron.agent.ovsdb import api
from neutron.agent.ovsdb.native import commands as cmd
from neutron.agent.ovsdb.native import connection
from neutron.agent.ovsdb.native import helpers
from neutron.agent.ovsdb.native import idlutils
from neutron.i18n import _LE
OPTS = [
cfg.StrOpt('ovsdb_connection',
default='tcp:127.0.0.1:6640',
help=_('The connection string for the native OVSDB backend')),
]
cfg.CONF.register_opts(OPTS, 'OVS')
# TODO(twilson) DEFAULT.ovs_vsctl_timeout should be OVS.vsctl_timeout
cfg.CONF.import_opt('ovs_vsctl_timeout', 'neutron.agent.common.ovs_lib')
LOG = logging.getLogger(__name__)
class Transaction(api.Transaction):
def __init__(self, api, ovsdb_connection, timeout,
check_error=False, log_errors=False):
self.api = api
self.check_error = check_error
self.log_errors = log_errors
self.commands = []
self.results = Queue.Queue(1)
self.ovsdb_connection = ovsdb_connection
self.timeout = timeout
def add(self, command):
"""Add a command to the transaction
returns The command passed as a convenience
"""
self.commands.append(command)
return command
def commit(self):
self.ovsdb_connection.queue_txn(self)
result = self.results.get()
if self.check_error:
if isinstance(result, idlutils.ExceptionResult):
if self.log_errors:
LOG.error(result.tb)
raise result.ex
return result
def do_commit(self):
start_time = time.time()
attempts = 0
while True:
elapsed_time = time.time() - start_time
if attempts > 0 and elapsed_time > self.timeout:
raise RuntimeError("OVS transaction timed out")
attempts += 1
# TODO(twilson) Make sure we don't loop longer than vsctl_timeout
txn = idl.Transaction(self.api.idl)
for i, command in enumerate(self.commands):
LOG.debug("Running txn command(idx=%(idx)s): %(cmd)s",
{'idx': i, 'cmd': command})
try:
command.run_idl(txn)
except Exception:
with excutils.save_and_reraise_exception() as ctx:
txn.abort()
if not self.check_error:
ctx.reraise = False
seqno = self.api.idl.change_seqno
status = txn.commit_block()
if status == txn.TRY_AGAIN:
LOG.debug("OVSDB transaction returned TRY_AGAIN, retrying")
idlutils.wait_for_change(
self.api.idl, self.timeout - elapsed_time,
seqno)
continue
elif status == txn.ERROR:
msg = _LE("OVSDB Error: %s") % txn.get_error()
if self.log_errors:
LOG.error(msg)
if self.check_error:
# For now, raise similar error to vsctl/utils.execute()
raise RuntimeError(msg)
return
elif status == txn.ABORTED:
LOG.debug("Transaction aborted")
return
elif status == txn.UNCHANGED:
LOG.debug("Transaction caused no change")
return [cmd.result for cmd in self.commands]
class OvsdbIdl(api.API):
ovsdb_connection = connection.Connection(cfg.CONF.OVS.ovsdb_connection,
cfg.CONF.ovs_vsctl_timeout,
'Open_vSwitch')
def __init__(self, context):
super(OvsdbIdl, self).__init__(context)
# it's a chicken and egg problem: by default, the manager that
# corresponds to the connection URI is in most cases not enabled in
# local ovsdb, so we still need ovs-vsctl to set it to allow
# connections
helpers.enable_connection_uri(self.ovsdb_connection.connection)
OvsdbIdl.ovsdb_connection.start()
self.idl = OvsdbIdl.ovsdb_connection.idl
@property
def _tables(self):
return self.idl.tables
@property
def _ovs(self):
return list(self._tables['Open_vSwitch'].rows.values())[0]
def transaction(self, check_error=False, log_errors=True, **kwargs):
return Transaction(self, OvsdbIdl.ovsdb_connection,
self.context.vsctl_timeout,
check_error, log_errors)
def add_br(self, name, may_exist=True):
return cmd.AddBridgeCommand(self, name, may_exist)
def del_br(self, name, if_exists=True):
return cmd.DelBridgeCommand(self, name, if_exists)
def br_exists(self, name):
return cmd.BridgeExistsCommand(self, name)
def port_to_br(self, name):
return cmd.PortToBridgeCommand(self, name)
def iface_to_br(self, name):
# For our purposes, ports and interfaces always have the same name
return cmd.PortToBridgeCommand(self, name)
def list_br(self):
return cmd.ListBridgesCommand(self)
def br_get_external_id(self, name, field):
return cmd.BrGetExternalIdCommand(self, name, field)
def br_set_external_id(self, name, field, value):
return cmd.BrSetExternalIdCommand(self, name, field, value)
def db_set(self, table, record, *col_values):
return cmd.DbSetCommand(self, table, record, *col_values)
def db_clear(self, table, record, column):
return cmd.DbClearCommand(self, table, record, column)
def db_get(self, table, record, column):
return cmd.DbGetCommand(self, table, record, column)
def db_list(self, table, records=None, columns=None, if_exists=False):
return cmd.DbListCommand(self, table, records, columns, if_exists)
def db_find(self, table, *conditions, **kwargs):
return cmd.DbFindCommand(self, table, *conditions, **kwargs)
def set_controller(self, bridge, controllers):
return cmd.SetControllerCommand(self, bridge, controllers)
def del_controller(self, bridge):
return cmd.DelControllerCommand(self, bridge)
def get_controller(self, bridge):
return cmd.GetControllerCommand(self, bridge)
def set_fail_mode(self, bridge, mode):
return cmd.SetFailModeCommand(self, bridge, mode)
def add_port(self, bridge, port, may_exist=True):
return cmd.AddPortCommand(self, bridge, port, may_exist)
def del_port(self, port, bridge=None, if_exists=True):
return cmd.DelPortCommand(self, port, bridge, if_exists)
def list_ports(self, bridge):
return cmd.ListPortsCommand(self, bridge)
|
|
#!/usr/bin/env python
import sys
import pysam
import argparse
import random
import subprocess
import os
import bamsurgeon.replacereads as rr
import bamsurgeon.aligners as aligners
import bamsurgeon.mutation as mutation
import traceback
from bamsurgeon.common import *
from uuid import uuid4
from re import sub
from shutil import move
from multiprocessing import Pool
from collections import defaultdict as dd
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0)
def countReadCoverage(bam,chrom,start,end):
""" calculate coverage of aligned reads over region
"""
coverage = []
start = int(start)
end = int(end)
for i in range(end-start+1):
coverage.append(0.0)
i = 0
if chrom in bam.references:
for pcol in bam.pileup(chrom,start,end):
n = 0
if pcol.pos >= start and pcol.pos <= end:
for read in pcol.pileups:
if read.alignment.mapq >= 0 and not read.alignment.is_duplicate:
n += 1
coverage[i] = n
i += 1
return coverage
def replace(origbamfile, mutbamfile, outbamfile, seed=None):
''' open .bam file and call replacereads
'''
origbam = pysam.Samfile(origbamfile, 'rb')
mutbam = pysam.Samfile(mutbamfile, 'rb')
outbam = pysam.Samfile(outbamfile, 'wb', template=origbam)
rr.replaceReads(origbam, mutbam, outbam, keepqual=True, seed=seed)
origbam.close()
mutbam.close()
outbam.close()
def get_mutstr(chrom, start, end, ins, ref):
return 'FIX get_mutstr'
def dictlist(fn):
d = {}
with open(fn, 'r') as inlist:
for name in inlist:
d[name.strip()] = True
return d
def makemut(args, chrom, start, end, vaf, ins, avoid, alignopts):
''' is ins is a sequence, it will is inserted at start, otherwise delete from start to end'''
if args.seed is not None: random.seed(int(args.seed) + int(start))
mutid = chrom + '_' + str(start) + '_' + str(end) + '_' + str(vaf)
if ins is None:
mutid += ':DEL'
else:
mutid += ':INS:' + ins
try:
bamfile = pysam.Samfile(args.bamFileName, 'rb')
bammate = pysam.Samfile(args.bamFileName, 'rb') # use for mates to avoid iterator problems
reffile = pysam.Fastafile(args.refFasta)
tmpbams = []
is_insertion = ins is not None
is_deletion = ins is None
snvfrac = float(args.snvfrac)
mutstr = get_mutstr(chrom, start, end, ins, reffile)
del_ln = 0
if is_deletion:
del_ln = end-start
mutpos = start
mutpos_list = [start]
# optional CNV file
cnv = None
if (args.cnvfile):
cnv = pysam.Tabixfile(args.cnvfile, 'r')
log = open('addindel_logs_' + os.path.basename(args.outBamFile) + '/' + os.path.basename(args.outBamFile) + "." + "_".join((chrom,str(start),str(end))) + ".log",'w')
tmpoutbamname = args.tmpdir + "/" + mutid + ".tmpbam." + str(uuid4()) + ".bam"
print "INFO\t" + now() + "\t" + mutid + "\tcreating tmp bam: ",tmpoutbamname #DEBUG
outbam_muts = pysam.Samfile(tmpoutbamname, 'wb', template=bamfile)
mutfail, hasSNP, maxfrac, outreads, mutreads, mutmates = mutation.mutate(args, log, bamfile, bammate, chrom, mutpos, mutpos+del_ln+1, mutpos_list, avoid=avoid, mutid_list=[mutid], is_insertion=is_insertion, is_deletion=is_deletion, ins_seq=ins, reffile=reffile, indel_start=start, indel_end=end)
if mutfail:
outbam_muts.close()
os.remove(tmpoutbamname)
return None
# pick reads to change
readlist = []
for extqname,read in outreads.iteritems():
if read.seq != mutreads[extqname]:
readlist.append(extqname)
print "len(readlist):",str(len(readlist))
readlist.sort()
random.shuffle(readlist)
if len(readlist) < int(args.mindepth):
sys.stderr.write("WARN\t" + now() + "\t" + mutid + "\tskipped, too few reads in region: " + str(len(readlist)) + "\n")
outbam_muts.close()
os.remove(tmpoutbamname)
return None
if vaf is None:
vaf = float(args.mutfrac) # default minor allele freq if not otherwise specified
if cnv: # cnv file is present
if chrom in cnv.contigs:
for cnregion in cnv.fetch(chrom,start,end):
cn = float(cnregion.strip().split()[3]) # expect chrom,start,end,CN
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\t" + ' '.join(("copy number in snp region:",chrom,str(start),str(end),"=",str(cn))) + "\n")
if float(cn) > 0.0:
vaf = 1.0/float(cn)
else:
vaf = 0.0
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tadjusted VAF: " + str(vaf) + "\n")
else:
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tselected VAF: " + str(vaf) + "\n")
lastread = int(len(readlist)*vaf)
# pick at least args.minmutreads if possible
if lastread < int(args.minmutreads):
if len(readlist) > int(args.minmutreads):
lastread = int(args.minmutreads)
sys.stdout.write("WARN\t" + now() + "\t" + mutid + "\tforced " + str(lastread) + " reads.\n")
else:
print "WARN\t" + now() + "\t" + mutid + "\tdropped site with fewer reads than --minmutreads"
os.remove(tmpoutbamname)
return None
readtrack = dd(list)
for readname in readlist:
orig_name, readpos, pairend = readname.split(',')
readtrack[orig_name].append('%s,%s' % (readpos, pairend))
usedreads = 0
newreadlist = []
for orig_name in readtrack:
for read_instance in readtrack[orig_name]:
newreadlist.append(orig_name + ',' + read_instance)
usedreads += 1
if usedreads >= lastread:
break
readlist = newreadlist
print "INFO\t" + now() + "\t" + mutid + "\tpicked: " + str(len(readlist)) + " reads"
wrote = 0
nmut = 0
mut_out = {}
# change reads from .bam to mutated sequences
for extqname,read in outreads.iteritems():
if read.seq != mutreads[extqname]:
if not args.nomut and extqname in readlist:
qual = read.qual # changing seq resets qual (see pysam API docs)
read.seq = mutreads[extqname] # make mutation
read.qual = qual
nmut += 1
if not hasSNP or args.force:
wrote += 1
mut_out[extqname] = read
muts_written = {}
for extqname in mut_out:
if extqname not in muts_written:
outbam_muts.write(mut_out[extqname])
muts_written[extqname] = True
if mutmates[extqname] is not None:
# is mate also in mutated list?
mate_read = mutmates[extqname]
pairname = 'F' # read is first in pair
if mate_read.is_read2:
pairname = 'S' # read is second in pair
if not mate_read.is_paired:
pairname = 'U' # read is unpaired
mateqname = ','.join((mate_read.qname,str(mate_read.pos),pairname))
if mateqname in mut_out:
# yes: output mutated mate
outbam_muts.write(mut_out[mateqname])
muts_written[mateqname] = True
else:
# no: output original mate
outbam_muts.write(mate_read)
print "INFO\t" + now() + "\t" + mutid + "\twrote: " + str(wrote) + " reads, mutated: " + str(nmut) + " reads"
if not hasSNP or args.force:
outbam_muts.close()
aligners.remap_bam(args.aligner, tmpoutbamname, args.refFasta, alignopts, mutid=mutid, paired=(not args.single), picardjar=args.picardjar)
outbam_muts = pysam.Samfile(tmpoutbamname,'rb')
coverwindow = 1
incover = countReadCoverage(bamfile,chrom,mutpos-coverwindow,mutpos+del_ln+coverwindow)
outcover = countReadCoverage(outbam_muts,chrom,mutpos-coverwindow,mutpos+del_ln+coverwindow)
avgincover = float(sum(incover))/float(len(incover))
avgoutcover = float(sum(outcover))/float(len(outcover))
spikein_frac = 0.0
if wrote > 0:
spikein_frac = float(nmut)/float(wrote)
# qc cutoff for final snv depth
if (avgoutcover > 0 and avgincover > 0 and avgoutcover/avgincover >= float(args.coverdiff)) or args.force:
tmpbams.append(tmpoutbamname)
indelstr = ''
if is_insertion:
indelstr = ':'.join(('INS', chrom, str(start), ins))
else:
indelstr = ':'.join(('DEL', chrom, str(start), str(end)))
snvstr = chrom + ":" + str(start) + "-" + str(end) + " (VAF=" + str(vaf) + ")"
log.write("\t".join(("indel",indelstr,str(mutpos),mutstr,str(avgincover),str(avgoutcover),str(spikein_frac),str(maxfrac)))+"\n")
else:
outbam_muts.close()
os.remove(tmpoutbamname)
if os.path.exists(tmpoutbamname + '.bai'):
os.remove(tmpoutbamname + '.bai')
print "WARN\t" + now() + "\t" + mutid + "\tdropped for outcover/incover < " + str(args.coverdiff)
return None
outbam_muts.close()
bamfile.close()
bammate.close()
log.close()
return sorted(tmpbams)
except Exception, e:
sys.stderr.write("*"*60 + "\nencountered error in mutation spikein: " + mutid + "\n")
traceback.print_exc(file=sys.stdout)
sys.stderr.write("*"*60 + "\n")
if os.path.exists(tmpoutbamname):
os.remove(tmpoutbamname)
if os.path.exists(tmpoutbamname + '.bai'):
os.remove(tmpoutbamname + '.bai')
return None
def main(args):
print "INFO\t" + now() + "\tstarting " + sys.argv[0] + " called with args: " + ' '.join(sys.argv) + "\n"
bedfile = open(args.varFileName, 'r')
reffile = pysam.Fastafile(args.refFasta)
if not os.path.exists(args.bamFileName + '.bai'):
sys.stderr.write("ERROR\t" + now() + "\tinput bam must be indexed, not .bai file found for " + args.bamFileName + " \n")
sys.exit(1)
alignopts = {}
if args.alignopts is not None:
alignopts = dict([o.split(':') for o in args.alignopts.split(',')])
aligners.checkoptions(args.aligner, alignopts, args.picardjar)
# load readlist to avoid, if specified
avoid = None
if args.avoidreads is not None:
avoid = dictlist(args.avoidreads)
# make a temporary file to hold mutated reads
outbam_mutsfile = "addindel." + str(uuid4()) + ".muts.bam"
bamfile = pysam.Samfile(args.bamFileName, 'rb')
outbam_muts = pysam.Samfile(outbam_mutsfile, 'wb', template=bamfile)
outbam_muts.close()
bamfile.close()
tmpbams = []
if not os.path.exists(args.tmpdir):
os.mkdir(args.tmpdir)
print "INFO\t" + now() + "\tcreated tmp directory: " + args.tmpdir
if not os.path.exists('addindel_logs_' + os.path.basename(args.outBamFile)):
os.mkdir('addindel_logs_' + os.path.basename(args.outBamFile))
print "created directory: addindel_logs_" + os.path.basename(args.outBamFile)
assert os.path.exists('addindel_logs_' + os.path.basename(args.outBamFile)), "could not create output directory!"
assert os.path.exists(args.tmpdir), "could not create temporary directory!"
pool = Pool(processes=int(args.procs))
results = []
ntried = 0
for bedline in bedfile:
if ntried < int(args.numsnvs) or int(args.numsnvs) == 0:
c = bedline.strip().split()
chrom = c[0]
start = int(c[1])
end = int(c[2])
vaf = float(c[3])
type = c[4]
ins = None
assert type in ('INS', 'DEL')
if type == 'INS':
ins = c[5]
# make mutation (submit job to thread pool)
result = pool.apply_async(makemut, [args, chrom, start, end, vaf, ins, avoid, alignopts])
results.append(result)
ntried += 1
for result in results:
try:
tmpbamlist = result.get()
if tmpbamlist is not None:
for tmpbam in tmpbamlist:
if os.path.exists(tmpbam):
tmpbams.append(tmpbam)
except AssertionError:
print "****************************************************"
print "* WARNING: assertion failed somewhere, check logs. *"
print "****************************************************"
if len(tmpbams) == 0:
print "INFO\t" + now() + "\tno succesful mutations"
sys.exit()
tmpbams.sort()
# merge tmp bams
if len(tmpbams) == 1:
os.rename(tmpbams[0],outbam_mutsfile)
elif len(tmpbams) > 1:
mergebams(tmpbams,outbam_mutsfile,maxopen=int(args.maxopen))
bedfile.close()
# cleanup
for bam in tmpbams:
if os.path.exists(bam):
os.remove(bam)
if os.path.exists(bam + '.bai'):
os.remove(bam + '.bai')
if args.skipmerge:
print "INFO\t" + now() + "\tskipping merge, plase merge reads from", outbam_mutsfile, "manually."
else:
if args.tagreads:
from bamsurgeon.markreads import markreads
tmp_tag_bam = 'tag.%s.bam' % str(uuid4())
markreads(outbam_mutsfile, tmp_tag_bam)
move(tmp_tag_bam, outbam_mutsfile)
print "INFO\t" + now() + "\ttagged reads."
print "INFO\t" + now() + "\tdone making mutations, merging mutations into", args.bamFileName, "-->", args.outBamFile
replace(args.bamFileName, outbam_mutsfile, args.outBamFile, seed=args.seed)
#cleanup
os.remove(outbam_mutsfile)
def run():
# run this script
parser = argparse.ArgumentParser(description='adds INDELs to reads, outputs modified reads as .bam along with mates')
parser.add_argument('-v', '--varfile', dest='varFileName', required=True, help='Target regions to try and add a SNV, as BED')
parser.add_argument('-f', '--bamfile', dest='bamFileName', required=True, help='sam/bam file from which to obtain reads')
parser.add_argument('-r', '--reference', dest='refFasta', required=True, help='reference genome, fasta indexed with bwa index -a stdsw _and_ samtools faidx')
parser.add_argument('-o', '--outbam', dest='outBamFile', required=True, help='.bam file name for output')
parser.add_argument('-s', '--snvfrac', dest='snvfrac', default=1, help='maximum allowable linked SNP MAF (for avoiding haplotypes) (default = 1)')
parser.add_argument('-m', '--mutfrac', dest='mutfrac', default=0.5, help='allelic fraction at which to make SNVs (default = 0.5)')
parser.add_argument('-n', '--numsnvs', dest='numsnvs', default=0, help="maximum number of mutations to try (default: entire input)")
parser.add_argument('-c', '--cnvfile', dest='cnvfile', default=None, help="tabix-indexed list of genome-wide absolute copy number values (e.g. 2 alleles = no change)")
parser.add_argument('-d', '--coverdiff', dest='coverdiff', default=0.1, help="allow difference in input and output coverage (default=0.1)")
parser.add_argument('-p', '--procs', dest='procs', default=1, help="split into multiple processes (default=1)")
parser.add_argument('--picardjar', default=None, help='path to picard.jar')
parser.add_argument('--mindepth', default=10, help='minimum read depth to make mutation (default = 10)')
parser.add_argument('--maxdepth', default=2000, help='maximum read depth to make mutation (default = 2000)')
parser.add_argument('--minmutreads', default=3, help='minimum number of mutated reads to output per site')
parser.add_argument('--avoidreads', default=None, help='file of read names to avoid (mutations will be skipped if overlap)')
parser.add_argument('--nomut', action='store_true', default=False, help="dry run")
parser.add_argument('--det', action='store_true', default=False, help="deterministic base changes: make transitions only")
parser.add_argument('--force', action='store_true', default=False, help="force mutation to happen regardless of nearby SNP or low coverage")
parser.add_argument('--single', action='store_true', default=False, help="input BAM is simgle-ended (default is paired-end)")
parser.add_argument('--maxopen', dest='maxopen', default=1000, help="maximum number of open files during merge (default 1000)")
parser.add_argument('--requirepaired', action='store_true', default=False, help='skip mutations if unpaired reads are present')
parser.add_argument('--aligner', default='backtrack', help='supported aligners: ' + ','.join(aligners.supported_aligners_bam))
parser.add_argument('--alignopts', default=None, help='aligner-specific options as comma delimited list of option1:value1,option2:value2,...')
parser.add_argument('--tagreads', action='store_true', default=False, help='add BS tag to altered reads')
parser.add_argument('--skipmerge', action='store_true', default=False, help="final output is tmp file to be merged")
parser.add_argument('--ignorepileup', action='store_true', default=False, help="do not check pileup depth in mutation regions")
parser.add_argument('--tmpdir', default='addindel.tmp', help='temporary directory (default=addindel.tmp)')
parser.add_argument('--seed', default=None, help='seed random number generation')
args = parser.parse_args()
main(args)
if __name__ == '__main__':
run()
|
|
import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner, TestCase
from django.utils import unittest
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestSuiteRunner')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
def get_tests(app_module):
parts = app_module.__name__.split('.')
prefix, last = parts[:-1], parts[-1]
try:
test_module = import_module('.'.join(prefix + [TEST_MODULE]))
except ImportError:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
# app_module either points to a models.py file, or models/__init__.py
# Tests are therefore either in same directory, or one level up
if last == 'models':
app_root = import_module('.'.join(prefix))
else:
app_root = app_module
if not module_has_submodule(app_root, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def build_suite(app_module):
"""
Create a complete Django test suite for the provided application module.
"""
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
test_module))
try:
suite.addTest(doctest.DocTestSuite(
test_module, checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""
Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase "
"or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(
TestClass)
except TypeError:
raise ValueError(
"Test label '%s' does not refer to a test class"
% label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (
module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on it's own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = (
connection.settings_dict['TEST_MIRROR'])
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = (
connection.settings_dict['TEST_DEPENDENCIES'])
else:
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get(
'TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
test_db_name = None
# Actually create the database for the first connection
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, True))
if test_db_name is None:
test_db_name = connection.creation.create_test_db(
self.verbosity, autoclobber=not self.interactive)
else:
connection.settings_dict['NAME'] = test_db_name
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = (
connections[mirror_alias].settings_dict['NAME'])
connections[alias].features = connections[mirror_alias].features
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(
verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
old_names, mirrors = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Alex Grigorevskiy
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Testing state space related functions.
"""
import unittest
import numpy as np
import GPy
import GPy.models.state_space_model as SS_model
from .state_space_main_tests import generate_x_points, generate_sine_data, \
generate_linear_data, generate_brownian_data, generate_linear_plus_sin
from nose import SkipTest
#from state_space_main_tests import generate_x_points, generate_sine_data, \
# generate_linear_data, generate_brownian_data, generate_linear_plus_sin
class StateSpaceKernelsTests(np.testing.TestCase):
def setUp(self):
pass
def run_for_model(self, X, Y, ss_kernel, kalman_filter_type = 'regular',
use_cython=False, check_gradients=True,
optimize=True, optimize_max_iters=250, predict_X=None,
compare_with_GP=True, gp_kernel=None,
mean_compare_decimal=10, var_compare_decimal=7):
m1 = SS_model.StateSpace(X,Y, ss_kernel,
kalman_filter_type=kalman_filter_type,
use_cython=use_cython)
m1.likelihood[:] = Y.var()/100.
if check_gradients:
self.assertTrue(m1.checkgrad())
if 1:#optimize:
m1.optimize(optimizer='lbfgsb', max_iters=1)
if compare_with_GP and (predict_X is None):
predict_X = X
self.assertTrue(compare_with_GP)
if compare_with_GP:
m2 = GPy.models.GPRegression(X,Y, gp_kernel)
m2[:] = m1[:]
if (predict_X is not None):
x_pred_reg_1 = m1.predict(predict_X)
x_quant_reg_1 = m1.predict_quantiles(predict_X)
x_pred_reg_2 = m2.predict(predict_X)
x_quant_reg_2 = m2.predict_quantiles(predict_X)
np.testing.assert_array_almost_equal(x_pred_reg_1[0], x_pred_reg_2[0], mean_compare_decimal)
np.testing.assert_array_almost_equal(x_pred_reg_1[1], x_pred_reg_2[1], var_compare_decimal)
np.testing.assert_array_almost_equal(x_quant_reg_1[0], x_quant_reg_2[0], mean_compare_decimal)
np.testing.assert_array_almost_equal(x_quant_reg_1[1], x_quant_reg_2[1], mean_compare_decimal)
np.testing.assert_array_almost_equal(m1.gradient, m2.gradient, var_compare_decimal)
np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), var_compare_decimal)
def test_Matern32_kernel(self,):
np.random.seed(234) # seed the random number generator
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
plot = False, points_num=50, x_interval = (0, 20), random=True)
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
ss_kernel = GPy.kern.sde_Matern32(1,active_dims=[0,])
gp_kernel = GPy.kern.Matern32(1,active_dims=[0,])
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
predict_X=X,
compare_with_GP=True,
gp_kernel=gp_kernel,
mean_compare_decimal=5, var_compare_decimal=5)
def test_Matern52_kernel(self,):
np.random.seed(234) # seed the random number generator
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
plot = False, points_num=50, x_interval = (0, 20), random=True)
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
ss_kernel = GPy.kern.sde_Matern52(1,active_dims=[0,])
gp_kernel = GPy.kern.Matern52(1,active_dims=[0,])
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
optimize = True, predict_X=X,
compare_with_GP=True, gp_kernel=gp_kernel,
mean_compare_decimal=5, var_compare_decimal=5)
def test_RBF_kernel(self,):
#import pdb;pdb.set_trace()
np.random.seed(234) # seed the random number generator
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
plot = False, points_num=50, x_interval = (0, 20), random=True)
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
ss_kernel = GPy.kern.sde_RBF(1, 110., 1.5, active_dims=[0,], balance=True, approx_order=10)
gp_kernel = GPy.kern.RBF(1, 110., 1.5, active_dims=[0,])
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
predict_X=X,
gp_kernel=gp_kernel,
optimize_max_iters=1000,
mean_compare_decimal=2, var_compare_decimal=1)
def test_periodic_kernel(self,):
np.random.seed(322) # seed the random number generator
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
plot = False, points_num=50, x_interval = (0, 20), random=True)
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
ss_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
ss_kernel.lengthscale.constrain_bounded(0.27, 1000)
ss_kernel.period.constrain_bounded(0.17, 100)
gp_kernel = GPy.kern.StdPeriodic(1,active_dims=[0,])
gp_kernel.lengthscale.constrain_bounded(0.27, 1000)
gp_kernel.period.constrain_bounded(0.17, 100)
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
predict_X=X,
gp_kernel=gp_kernel,
mean_compare_decimal=3, var_compare_decimal=3)
def test_quasi_periodic_kernel(self,):
np.random.seed(329) # seed the random number generator
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
plot = False, points_num=50, x_interval = (0, 20), random=True)
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
ss_kernel = GPy.kern.sde_Matern32(1)*GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
ss_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
ss_kernel.std_periodic.period.constrain_bounded(0.15, 100)
gp_kernel = GPy.kern.Matern32(1)*GPy.kern.StdPeriodic(1,active_dims=[0,])
gp_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
gp_kernel.std_periodic.period.constrain_bounded(0.15, 100)
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
predict_X=X,
gp_kernel=gp_kernel,
mean_compare_decimal=1, var_compare_decimal=2)
def test_linear_kernel(self,):
np.random.seed(234) # seed the random number generator
(X,Y) = generate_linear_data(x_points=None, tangent=2.0, add_term=20.0, noise_var=2.0,
plot = False, points_num=50, x_interval = (0, 20), random=True)
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
ss_kernel = GPy.kern.sde_Linear(1,X,active_dims=[0,]) + GPy.kern.sde_Bias(1, active_dims=[0,])
gp_kernel = GPy.kern.Linear(1, active_dims=[0,]) + GPy.kern.Bias(1, active_dims=[0,])
self.run_for_model(X, Y, ss_kernel, check_gradients= False,
predict_X=X,
gp_kernel=gp_kernel,
mean_compare_decimal=5, var_compare_decimal=5)
def test_brownian_kernel(self,):
np.random.seed(234) # seed the random number generator
(X,Y) = generate_brownian_data(x_points=None, kernel_var=2.0, noise_var = 0.1,
plot = False, points_num=50, x_interval = (0, 20), random=True)
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
ss_kernel = GPy.kern.sde_Brownian()
gp_kernel = GPy.kern.Brownian()
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
predict_X=X,
gp_kernel=gp_kernel,
mean_compare_decimal=4, var_compare_decimal=4)
def test_exponential_kernel(self,):
np.random.seed(12345) # seed the random number generator
(X,Y) = generate_linear_data(x_points=None, tangent=1.0, add_term=20.0, noise_var=2.0,
plot = False, points_num=10, x_interval = (0, 20), random=True)
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
ss_kernel = GPy.kern.sde_Exponential(1, Y.var(), X.ptp()/2., active_dims=[0,])
gp_kernel = GPy.kern.Exponential(1, Y.var(), X.ptp()/2., active_dims=[0,])
Y -= Y.mean()
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
predict_X=X,
gp_kernel=gp_kernel,
optimize_max_iters=1000,
mean_compare_decimal=2, var_compare_decimal=2)
def test_kernel_addition_svd(self,):
#np.random.seed(329) # seed the random number generator
np.random.seed(42)
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=5.0, noise_var=2.0,
plot = False, points_num=100, x_interval = (0, 40), random=True)
(X1,Y1) = generate_linear_data(x_points=X, tangent=1.0, add_term=20.0, noise_var=0.0,
plot = False, points_num=100, x_interval = (0, 40), random=True)
# Sine data <-
Y = Y + Y1
Y -= Y.mean()
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
def get_new_kernels():
ss_kernel = GPy.kern.sde_Linear(1, X, variances=1) + GPy.kern.sde_StdPeriodic(1, period=5.0, variance=300, lengthscale=3, active_dims=[0,])
#ss_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
#ss_kernel.std_periodic.period.constrain_bounded(3, 8)
gp_kernel = GPy.kern.Linear(1, variances=1) + GPy.kern.StdPeriodic(1, period=5.0, variance=300, lengthscale=3, active_dims=[0,])
#gp_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
#gp_kernel.std_periodic.period.constrain_bounded(3, 8)
return ss_kernel, gp_kernel
# Cython is available only with svd.
ss_kernel, gp_kernel = get_new_kernels()
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'svd',
use_cython=True, optimize_max_iters=10, check_gradients=False,
predict_X=X,
gp_kernel=gp_kernel,
mean_compare_decimal=3, var_compare_decimal=3)
ss_kernel, gp_kernel = get_new_kernels()
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'svd',
use_cython=False, optimize_max_iters=10, check_gradients=False,
predict_X=X,
gp_kernel=gp_kernel,
mean_compare_decimal=3, var_compare_decimal=3)
def test_kernel_addition_regular(self,):
#np.random.seed(329) # seed the random number generator
np.random.seed(42)
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=5.0, noise_var=2.0,
plot = False, points_num=100, x_interval = (0, 40), random=True)
(X1,Y1) = generate_linear_data(x_points=X, tangent=1.0, add_term=20.0, noise_var=0.0,
plot = False, points_num=100, x_interval = (0, 40), random=True)
# Sine data <-
Y = Y + Y1
Y -= Y.mean()
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
def get_new_kernels():
ss_kernel = GPy.kern.sde_Linear(1, X, variances=1) + GPy.kern.sde_StdPeriodic(1, period=5.0, variance=300, lengthscale=3, active_dims=[0,])
#ss_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
#ss_kernel.std_periodic.period.constrain_bounded(3, 8)
gp_kernel = GPy.kern.Linear(1, variances=1) + GPy.kern.StdPeriodic(1, period=5.0, variance=300, lengthscale=3, active_dims=[0,])
#gp_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
#gp_kernel.std_periodic.period.constrain_bounded(3, 8)
return ss_kernel, gp_kernel
ss_kernel, gp_kernel = get_new_kernels()
try:
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'regular',
use_cython=False, optimize_max_iters=10, check_gradients=True,
predict_X=X,
gp_kernel=gp_kernel,
mean_compare_decimal=2, var_compare_decimal=2)
except AssertionError:
raise SkipTest("Skipping Regular kalman filter for kernel addition, because it is not stable (normal situation) for this data.")
def test_kernel_multiplication(self,):
np.random.seed(329) # seed the random number generator
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
plot = False, points_num=50, x_interval = (0, 20), random=True)
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
def get_new_kernels():
ss_kernel = GPy.kern.sde_Matern32(1)*GPy.kern.sde_Matern52(1)
gp_kernel = GPy.kern.Matern32(1)*GPy.kern.sde_Matern52(1)
return ss_kernel, gp_kernel
ss_kernel, gp_kernel = get_new_kernels()
#import ipdb;ipdb.set_trace()
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'svd',
use_cython=True, optimize_max_iters=10, check_gradients=True,
predict_X=X,
gp_kernel=gp_kernel,
mean_compare_decimal=2, var_compare_decimal=2)
ss_kernel, gp_kernel = get_new_kernels()
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'regular',
use_cython=False, optimize_max_iters=10, check_gradients=True,
predict_X=X,
gp_kernel=gp_kernel,
mean_compare_decimal=2, var_compare_decimal=2)
ss_kernel, gp_kernel = get_new_kernels()
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'svd',
use_cython=False, optimize_max_iters=10, check_gradients=True,
predict_X=X,
gp_kernel=gp_kernel,
mean_compare_decimal=2, var_compare_decimal=2)
def test_forecast_regular(self,):
# Generate data ->
np.random.seed(339) # seed the random number generator
#import pdb; pdb.set_trace()
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=5.0, noise_var=2.0,
plot = False, points_num=100, x_interval = (0, 40), random=True)
(X1,Y1) = generate_linear_data(x_points=X, tangent=1.0, add_term=20.0, noise_var=0.0,
plot = False, points_num=100, x_interval = (0, 40), random=True)
Y = Y + Y1
X_train = X[X <= 20]
Y_train = Y[X <= 20]
X_test = X[X > 20]
Y_test = Y[X > 20]
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
X_train.shape = (X_train.shape[0],1); Y_train.shape = (Y_train.shape[0],1)
X_test.shape = (X_test.shape[0],1); Y_test.shape = (Y_test.shape[0],1)
# Generate data <-
#import pdb; pdb.set_trace()
periodic_kernel = GPy.kern.StdPeriodic(1,active_dims=[0,])
gp_kernel = GPy.kern.Linear(1, active_dims=[0,]) + GPy.kern.Bias(1, active_dims=[0,]) + periodic_kernel
gp_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
gp_kernel.std_periodic.period.constrain_bounded(0.15, 100)
periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
ss_kernel = GPy.kern.sde_Linear(1,X,active_dims=[0,]) + \
GPy.kern.sde_Bias(1, active_dims=[0,]) + periodic_kernel
ss_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
ss_kernel.std_periodic.period.constrain_bounded(0.15, 100)
self.run_for_model(X_train, Y_train, ss_kernel, kalman_filter_type = 'regular',
use_cython=False, optimize_max_iters=30, check_gradients=True,
predict_X=X_test,
gp_kernel=gp_kernel,
mean_compare_decimal=2, var_compare_decimal=2)
def test_forecast_svd(self,):
# Generate data ->
np.random.seed(339) # seed the random number generator
#import pdb; pdb.set_trace()
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=5.0, noise_var=2.0,
plot = False, points_num=100, x_interval = (0, 40), random=True)
(X1,Y1) = generate_linear_data(x_points=X, tangent=1.0, add_term=20.0, noise_var=0.0,
plot = False, points_num=100, x_interval = (0, 40), random=True)
Y = Y + Y1
X_train = X[X <= 20]
Y_train = Y[X <= 20]
X_test = X[X > 20]
Y_test = Y[X > 20]
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
X_train.shape = (X_train.shape[0],1); Y_train.shape = (Y_train.shape[0],1)
X_test.shape = (X_test.shape[0],1); Y_test.shape = (Y_test.shape[0],1)
# Generate data <-
#import pdb; pdb.set_trace()
periodic_kernel = GPy.kern.StdPeriodic(1,active_dims=[0,])
gp_kernel = GPy.kern.Linear(1, active_dims=[0,]) + GPy.kern.Bias(1, active_dims=[0,]) + periodic_kernel
gp_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
gp_kernel.std_periodic.period.constrain_bounded(0.15, 100)
periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
ss_kernel = GPy.kern.sde_Linear(1,X,active_dims=[0,]) + \
GPy.kern.sde_Bias(1, active_dims=[0,]) + periodic_kernel
ss_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
ss_kernel.std_periodic.period.constrain_bounded(0.15, 100)
self.run_for_model(X_train, Y_train, ss_kernel, kalman_filter_type = 'svd',
use_cython=False, optimize_max_iters=30, check_gradients=False,
predict_X=X_test,
gp_kernel=gp_kernel,
mean_compare_decimal=2, var_compare_decimal=2)
def test_forecast_svd_cython(self,):
# Generate data ->
np.random.seed(339) # seed the random number generator
#import pdb; pdb.set_trace()
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=5.0, noise_var=2.0,
plot = False, points_num=100, x_interval = (0, 40), random=True)
(X1,Y1) = generate_linear_data(x_points=X, tangent=1.0, add_term=20.0, noise_var=0.0,
plot = False, points_num=100, x_interval = (0, 40), random=True)
Y = Y + Y1
X_train = X[X <= 20]
Y_train = Y[X <= 20]
X_test = X[X > 20]
Y_test = Y[X > 20]
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
X_train.shape = (X_train.shape[0],1); Y_train.shape = (Y_train.shape[0],1)
X_test.shape = (X_test.shape[0],1); Y_test.shape = (Y_test.shape[0],1)
# Generate data <-
#import pdb; pdb.set_trace()
periodic_kernel = GPy.kern.StdPeriodic(1,active_dims=[0,])
gp_kernel = GPy.kern.Linear(1, active_dims=[0,]) + GPy.kern.Bias(1, active_dims=[0,]) + periodic_kernel
gp_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
gp_kernel.std_periodic.period.constrain_bounded(0.15, 100)
periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
ss_kernel = GPy.kern.sde_Linear(1,X,active_dims=[0,]) + \
GPy.kern.sde_Bias(1, active_dims=[0,]) + periodic_kernel
ss_kernel.std_periodic.lengthscale.constrain_bounded(0.25, 1000)
ss_kernel.std_periodic.period.constrain_bounded(0.15, 100)
self.run_for_model(X_train, Y_train, ss_kernel, kalman_filter_type = 'svd',
use_cython=True, optimize_max_iters=30, check_gradients=False,
predict_X=X_test,
gp_kernel=gp_kernel,
mean_compare_decimal=2, var_compare_decimal=2)
if __name__ == "__main__":
print("Running state-space inference tests...")
unittest.main()
#tt = StateSpaceKernelsTests('test_RBF_kernel')
#import pdb; pdb.set_trace()
#tt.test_Matern32_kernel()
#tt.test_Matern52_kernel()
#tt.test_RBF_kernel()
#tt.test_periodic_kernel()
#tt.test_quasi_periodic_kernel()
#tt.test_linear_kernel()
#tt.test_brownian_kernel()
#tt.test_exponential_kernel()
#tt.test_kernel_addition()
#tt.test_kernel_multiplication()
#tt.test_forecast()
|
|
import inspect
import sys
from kafka.errors import (
KafkaError,
IllegalStateError,
IllegalArgumentError,
NoBrokersAvailable,
NodeNotReadyError,
KafkaProtocolError,
CorrelationIdError,
Cancelled,
TooManyInFlightRequests,
StaleMetadata,
UnrecognizedBrokerVersion,
CommitFailedError,
AuthenticationMethodNotSupported,
AuthenticationFailedError,
BrokerResponseError,
# Numbered errors
NoError, # 0
UnknownError, # -1
OffsetOutOfRangeError, # 1
CorruptRecordException, # 2
UnknownTopicOrPartitionError, # 3
InvalidFetchRequestError, # 4
LeaderNotAvailableError, # 5
NotLeaderForPartitionError, # 6
RequestTimedOutError, # 7
BrokerNotAvailableError, # 8
ReplicaNotAvailableError, # 9
MessageSizeTooLargeError, # 10
StaleControllerEpochError, # 11
OffsetMetadataTooLargeError, # 12
StaleLeaderEpochCodeError, # 13
GroupLoadInProgressError, # 14
GroupCoordinatorNotAvailableError, # 15
NotCoordinatorForGroupError, # 16
InvalidTopicError, # 17
RecordListTooLargeError, # 18
NotEnoughReplicasError, # 19
NotEnoughReplicasAfterAppendError, # 20
InvalidRequiredAcksError, # 21
IllegalGenerationError, # 22
InconsistentGroupProtocolError, # 23
InvalidGroupIdError, # 24
UnknownMemberIdError, # 25
InvalidSessionTimeoutError, # 26
RebalanceInProgressError, # 27
InvalidCommitOffsetSizeError, # 28
TopicAuthorizationFailedError, # 29
GroupAuthorizationFailedError, # 30
ClusterAuthorizationFailedError, # 31
InvalidTimestampError, # 32
UnsupportedSaslMechanismError, # 33
IllegalSaslStateError, # 34
UnsupportedVersionError, # 35
TopicAlreadyExistsError, # 36
InvalidPartitionsError, # 37
InvalidReplicationFactorError, # 38
InvalidReplicationAssignmentError, # 39
InvalidConfigurationError, # 40
NotControllerError, # 41
InvalidRequestError, # 42
UnsupportedForMessageFormatError, # 43
PolicyViolationError, # 44
KafkaUnavailableError,
KafkaTimeoutError,
KafkaConnectionError,
UnsupportedCodecError,
)
__all__ = [
# aiokafka custom errors
"ConsumerStoppedError", "NoOffsetForPartitionError", "RecordTooLargeError",
"ProducerClosed",
# Kafka Python errors
"KafkaError",
"IllegalStateError",
"IllegalArgumentError",
"NoBrokersAvailable",
"NodeNotReadyError",
"KafkaProtocolError",
"CorrelationIdError",
"Cancelled",
"TooManyInFlightRequests",
"StaleMetadata",
"UnrecognizedBrokerVersion",
"CommitFailedError",
"AuthenticationMethodNotSupported",
"AuthenticationFailedError",
"BrokerResponseError",
# Numbered errors
"NoError", # 0
"UnknownError", # -1
"OffsetOutOfRangeError", # 1
"CorruptRecordException", # 2
"UnknownTopicOrPartitionError", # 3
"InvalidFetchRequestError", # 4
"LeaderNotAvailableError", # 5
"NotLeaderForPartitionError", # 6
"RequestTimedOutError", # 7
"BrokerNotAvailableError", # 8
"ReplicaNotAvailableError", # 9
"MessageSizeTooLargeError", # 10
"StaleControllerEpochError", # 11
"OffsetMetadataTooLargeError", # 12
"StaleLeaderEpochCodeError", # 13
"GroupLoadInProgressError", # 14
"GroupCoordinatorNotAvailableError", # 15
"NotCoordinatorForGroupError", # 16
"InvalidTopicError", # 17
"RecordListTooLargeError", # 18
"NotEnoughReplicasError", # 19
"NotEnoughReplicasAfterAppendError", # 20
"InvalidRequiredAcksError", # 21
"IllegalGenerationError", # 22
"InconsistentGroupProtocolError", # 23
"InvalidGroupIdError", # 24
"UnknownMemberIdError", # 25
"InvalidSessionTimeoutError", # 26
"RebalanceInProgressError", # 27
"InvalidCommitOffsetSizeError", # 28
"TopicAuthorizationFailedError", # 29
"GroupAuthorizationFailedError", # 30
"ClusterAuthorizationFailedError", # 31
"InvalidTimestampError", # 32
"UnsupportedSaslMechanismError", # 33
"IllegalSaslStateError", # 34
"UnsupportedVersionError", # 35
"TopicAlreadyExistsError", # 36
"InvalidPartitionsError", # 37
"InvalidReplicationFactorError", # 38
"InvalidReplicationAssignmentError", # 39
"InvalidConfigurationError", # 40
"NotControllerError", # 41
"InvalidRequestError", # 42
"UnsupportedForMessageFormatError", # 43
"PolicyViolationError", # 44
"KafkaUnavailableError",
"KafkaTimeoutError",
"KafkaConnectionError",
"UnsupportedCodecError",
]
class CoordinatorNotAvailableError(GroupCoordinatorNotAvailableError):
message = "COORDINATOR_NOT_AVAILABLE"
class NotCoordinatorError(NotCoordinatorForGroupError):
message = "NOT_COORDINATOR"
class CoordinatorLoadInProgressError(GroupLoadInProgressError):
message = "COORDINATOR_LOAD_IN_PROGRESS"
InvalidMessageError = CorruptRecordException
GroupCoordinatorNotAvailableError = CoordinatorNotAvailableError
NotCoordinatorForGroupError = NotCoordinatorError
GroupLoadInProgressError = CoordinatorLoadInProgressError
class ConsumerStoppedError(Exception):
""" Raised on `get*` methods of Consumer if it's cancelled, even pending
ones.
"""
class IllegalOperation(Exception):
""" Raised if you try to execute an operation, that is not available with
current configuration. For example trying to commit if no group_id was
given.
"""
class NoOffsetForPartitionError(KafkaError):
pass
class RecordTooLargeError(KafkaError):
pass
class ProducerClosed(KafkaError):
pass
class ProducerFenced(KafkaError):
"""Another producer with the same transactional ID went online.
NOTE: As it seems this will be raised by Broker if transaction timeout
occurred also.
"""
def __init__(
self,
msg="There is a newer producer using the same transactional_id or"
"transaction timeout occurred (check that processing time is "
"below transaction_timeout_ms)"
):
super().__init__(msg)
class OutOfOrderSequenceNumber(BrokerResponseError):
errno = 45
message = 'OUT_OF_ORDER_SEQUENCE_NUMBER'
description = 'The broker received an out of order sequence number'
class DuplicateSequenceNumber(BrokerResponseError):
errno = 46
message = 'DUPLICATE_SEQUENCE_NUMBER'
description = 'The broker received a duplicate sequence number'
class InvalidProducerEpoch(BrokerResponseError):
errno = 47
message = 'INVALID_PRODUCER_EPOCH'
description = (
'Producer attempted an operation with an old epoch. Either '
'there is a newer producer with the same transactionalId, or the '
'producer\'s transaction has been expired by the broker.'
)
class InvalidTxnState(BrokerResponseError):
errno = 48
message = 'INVALID_TXN_STATE'
description = (
'The producer attempted a transactional operation in an invalid state'
)
class InvalidProducerIdMapping(BrokerResponseError):
errno = 49
message = 'INVALID_PRODUCER_ID_MAPPING'
description = (
'The producer attempted to use a producer id which is not currently '
'assigned to its transactional id'
)
class InvalidTransactionTimeout(BrokerResponseError):
errno = 50
message = 'INVALID_TRANSACTION_TIMEOUT'
description = (
'The transaction timeout is larger than the maximum value allowed by'
' the broker (as configured by transaction.max.timeout.ms).'
)
class ConcurrentTransactions(BrokerResponseError):
errno = 51
message = 'CONCURRENT_TRANSACTIONS'
description = (
'The producer attempted to update a transaction while another '
'concurrent operation on the same transaction was ongoing'
)
class TransactionCoordinatorFenced(BrokerResponseError):
errno = 52
message = 'TRANSACTION_COORDINATOR_FENCED'
description = (
'Indicates that the transaction coordinator sending a WriteTxnMarker'
' is no longer the current coordinator for a given producer'
)
class TransactionalIdAuthorizationFailed(BrokerResponseError):
errno = 53
message = 'TRANSACTIONAL_ID_AUTHORIZATION_FAILED'
description = 'Transactional Id authorization failed'
class SecurityDisabled(BrokerResponseError):
errno = 54
message = 'SECURITY_DISABLED'
description = 'Security features are disabled'
class OperationNotAttempted(BrokerResponseError):
errno = 55
message = 'OPERATION_NOT_ATTEMPTED'
description = (
'The broker did not attempt to execute this operation. This may happen'
' for batched RPCs where some operations in the batch failed, causing '
'the broker to respond without trying the rest.'
)
class KafkaStorageError(BrokerResponseError):
errno = 56
message = 'KAFKA_STORAGE_ERROR'
description = (
'The user-specified log directory is not found in the broker config.'
)
class LogDirNotFound(BrokerResponseError):
errno = 57
message = 'LOG_DIR_NOT_FOUND'
description = (
'The user-specified log directory is not found in the broker config.'
)
class SaslAuthenticationFailed(BrokerResponseError):
errno = 58
message = 'SASL_AUTHENTICATION_FAILED'
description = 'SASL Authentication failed.'
class UnknownProducerId(BrokerResponseError):
errno = 59
message = 'UNKNOWN_PRODUCER_ID'
description = (
'This exception is raised by the broker if it could not locate the '
'producer metadata associated with the producerId in question. This '
'could happen if, for instance, the producer\'s records were deleted '
'because their retention time had elapsed. Once the last records of '
'the producerId are removed, the producer\'s metadata is removed from'
' the broker, and future appends by the producer will return this '
'exception.'
)
class ReassignmentInProgress(BrokerResponseError):
errno = 60
message = 'REASSIGNMENT_IN_PROGRESS'
description = 'A partition reassignment is in progress'
class DelegationTokenAuthDisabled(BrokerResponseError):
errno = 61
message = 'DELEGATION_TOKEN_AUTH_DISABLED'
description = 'Delegation Token feature is not enabled'
class DelegationTokenNotFound(BrokerResponseError):
errno = 62
message = 'DELEGATION_TOKEN_NOT_FOUND'
description = 'Delegation Token is not found on server.'
class DelegationTokenOwnerMismatch(BrokerResponseError):
errno = 63
message = 'DELEGATION_TOKEN_OWNER_MISMATCH'
description = 'Specified Principal is not valid Owner/Renewer.'
class DelegationTokenRequestNotAllowed(BrokerResponseError):
errno = 64
message = 'DELEGATION_TOKEN_REQUEST_NOT_ALLOWED'
description = (
'Delegation Token requests are not allowed on PLAINTEXT/1-way SSL '
'channels and on delegation token authenticated channels.'
)
class DelegationTokenAuthorizationFailed(BrokerResponseError):
errno = 65
message = 'DELEGATION_TOKEN_AUTHORIZATION_FAILED'
description = 'Delegation Token authorization failed.'
class DelegationTokenExpired(BrokerResponseError):
errno = 66
message = 'DELEGATION_TOKEN_EXPIRED'
description = 'Delegation Token is expired.'
class InvalidPrincipalType(BrokerResponseError):
errno = 67
message = 'INVALID_PRINCIPAL_TYPE'
description = 'Supplied principalType is not supported'
class NonEmptyGroup(BrokerResponseError):
errno = 68
message = 'NON_EMPTY_GROUP'
description = 'The group is not empty'
class GroupIdNotFound(BrokerResponseError):
errno = 69
message = 'GROUP_ID_NOT_FOUND'
description = 'The group id does not exist'
class FetchSessionIdNotFound(BrokerResponseError):
errno = 70
message = 'FETCH_SESSION_ID_NOT_FOUND'
description = 'The fetch session ID was not found'
class InvalidFetchSessionEpoch(BrokerResponseError):
errno = 71
message = 'INVALID_FETCH_SESSION_EPOCH'
description = 'The fetch session epoch is invalid'
class ListenerNotFound(BrokerResponseError):
errno = 72
message = 'LISTENER_NOT_FOUND'
description = (
'There is no listener on the leader broker that matches the'
' listener on which metadata request was processed'
)
def _iter_broker_errors():
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, BrokerResponseError) and \
obj != BrokerResponseError:
yield obj
kafka_errors = {x.errno: x for x in _iter_broker_errors()}
def for_code(error_code):
return kafka_errors.get(error_code, UnknownError)
|
|
import json
import click
import six
from odlclient import client
from odlclient import cmdutils
@click.group()
@click.option('--debug/--no-debug', default=False,
help='Displays http request/response')
@click.option('--verbose/--no-verbose', default=False,
help='Displays http response body')
@click.pass_context
def cmd(ctx, debug, verbose):
ctx.obj = {'debug': debug, 'verbose': verbose}
def _get_odl_client():
odl_client = client.ODL.get_client_with_env()
ctx = click.get_current_context()
odl_client.debug = ctx.obj['debug']
odl_client.verbose = ctx.obj['verbose']
return odl_client
@cmd.group(help='Nodes')
def node():
pass
_node_formatter = {
'table_count': lambda x: len([t for t in x.tables if len(t.flows)]),
'connector_count': lambda x: len(x.connectors),
}
@node.command(help='Displays node list') # noqa
def list():
odl = _get_odl_client()
nodes = odl.nodes.list_all()
columns = ['id', 'ip_address', 'connector_count', 'table_count',
'hardware', 'software']
cmdutils.print_list(columns, nodes, formatter=_node_formatter)
@node.command(help='Displays Node details')
@click.argument('node-id')
def show(node_id):
odl = _get_odl_client()
node = odl.nodes.get(node_id)
columns = ['id', 'ip_address', 'connector_count', 'table_count',
'hardware', 'software', 'switch_features', 'description',
'meter_features', 'manufacturer', 'serial_number']
cmdutils.print_desc(columns, node, formatter=_node_formatter)
@cmd.group(help='Node Connectors')
def connector():
pass
@connector.command(help='Displays Node connector list') # noqa
@click.argument('node-id')
def list(node_id):
odl = _get_odl_client()
connectors = odl.nodes.get(node_id).connectors
cmdutils.print_list(['port_number', 'name', 'id', 'state'], connectors)
@connector.command(help='Displays Node connector details') # noqa
@click.argument('node-id')
@click.argument('port-number')
def show(node_id, port_number):
odl = _get_odl_client()
connectors = odl.nodes.get(node_id).connectors
connector = [c for c in connectors if c.port_number == port_number][0]
columns = [
"id", "port_number", "name", "current_speed",
"flow_capable_node_connector_statistics", "advertised_features",
"configuration", "hardware_address", "maximum_speed", "state",
"supported", "current_feature", "peer_features"]
cmdutils.print_desc(columns, connector)
@cmd.group(help='Table')
def table():
pass
_flow_formatter = {
'flow_count': lambda x: len(x.flows)
}
@table.command(help='Displays Table list') # noqa
@click.argument('node-id')
def list(node_id):
odl = _get_odl_client()
node = odl.nodes.get(node_id)
cmdutils.print_list(
['id', 'flow_count'],
sorted([t for t in node.tables if len(t.flows)], key=lambda x: x.id),
formatter=_flow_formatter)
@table.command(help='Displays Table details') # noqa
@click.argument('node-id')
@click.argument('table-id', type=int)
def show(node_id, table_id):
odl = _get_odl_client()
node = odl.nodes.get(node_id)
t = [t for t in node.tables if t.id == table_id][0]
columns = ['id', 'flow_count', 'flow_hash_id_map',
'aggregate_flow_statistics', 'flow_table_statistics']
cmdutils.print_desc(columns, t, formatter=_flow_formatter)
@cmd.group(help='Flows')
def flow():
pass
def format_dict(d):
return ', '.join(
[': '.join([k, json.dumps(v)]) for k, v in six.iteritems(d)])
def match_formatter(x):
return format_dict(x.match)
def instruction_formatter(x):
actions = []
if not x.instructions:
return 'Drop'
for instruction in sorted(
x.instructions['instruction'], key=lambda i: i['order']):
del instruction['order']
if 'apply-actions' in instruction:
for action in sorted(
instruction['apply-actions']['action'],
key=lambda a: a['order']):
del action['order']
actions.append(format_dict(action))
else:
actions.append(format_dict(instruction))
return ', '.join(actions)
@flow.command(help='Displays Flow list') # noqa
@click.argument('node-id')
def list(node_id):
odl = _get_odl_client()
node = odl.nodes.get(node_id)
flows = []
for table in node.tables:
flows += table.flows
cmdutils.print_list(
['id', 'table_id', 'priority', 'match', 'instructions'],
sorted(flows, key=lambda f: (f.table_id, -f.priority)),
formatter={'match': match_formatter,
'instructions': instruction_formatter})
class InstructionKeys(object):
def __init__(self):
self.actions = {}
def __call__(self, key):
def deco(func):
self.actions[key] = func
return func
return deco
def gen(self, key, value, order):
return self.actions[key](value, order)
instruction_keys = InstructionKeys()
@instruction_keys('output')
def _output(value, order):
return {'output-action': {
'output-node-connector': int(value)}, 'order': order}
def _parse_instructions(instructions):
if not instructions:
return None
actions = []
order = 0
for instruction in instructions.split(','):
key, v = instruction.strip().split(':', 1)
actions.append(instruction_keys.gen(key, v, order))
order += 1
return {
'instruction': [
{'apply-actions': {'action': actions}, 'order': 0}]
}
def _parse_mac(mac_addr):
if '/' in mac_addr:
addr, mask = mac_addr.split('/')
else:
addr = mac_addr
mask = 'ff:ff:ff:ff:ff:ff'
return {
'address': addr,
'mask': mask,
}
@flow.command(help='Creates a Flow')
@click.argument('node-id')
@click.argument('table-id')
@click.argument('flow-id')
@click.option('--priority', type=int)
@click.option('--in-port', help='Input Port Number(Match)')
@click.option('--dl-src',
help='Ethernet Source Address(Match). xx:xx:xx:xx:xx:xx/xx:xx:xx:xx:xx:xx or xx:xx:xx:xx:xx:xx') # noqa
@click.option('--dl-dst',
help='Ethernet Destination Address(Match). e.g. xx:xx:xx:xx:xx:xx/xx:xx:xx:xx:xx:xx or xx:xx:xx:xx:xx:xx') # noqa
@click.option('--tun-id', help='tunnel ID')
@click.option('--instructions',
help='Instructions. e.g. output:1,outpu:2')
def create(node_id, table_id, flow_id, priority, in_port, dl_src, dl_dst,
tun_id, instructions):
odl = _get_odl_client()
# Match Ruel
match = {}
if in_port:
match['in-port'] = in_port
if dl_src or dl_dst:
dl = {}
if dl_src:
dl['ethernet-source'] = _parse_mac(dl_src)
if dl_dst:
dl['ethernet-destination'] = _parse_mac(dl_dst)
match['ethernet-match'] = dl
if tun_id:
match['tunnel'] = {'tunnel-id': tun_id}
# Instructions
parsed_instructions = _parse_instructions(instructions)
# Create a Flow
data = odl.flows.create(
node_id, table_id, flow_id, priority, match=match,
instructions=parsed_instructions
)
print data
def main():
cmd()
if __name__ == '__main__':
main()
|
|
import logging
import reversion
import json
from django.db import models
from django.conf import settings
from rest_framework.exceptions import ValidationError
from ..fields import SequenceField
from .base import AbstractBase, SequenceMixin
LOGGER = logging.getLogger(__file__)
@reversion.register
class ContactType(AbstractBase):
"""
Captures the different types of contacts that we have in the real world.
The most common contacts are email, phone numbers, landline etc.
"""
name = models.CharField(
max_length=100, unique=True,
help_text="A short name, preferrably 6 characters long, representing a"
"certain type of contact e.g EMAIL")
description = models.TextField(
null=True, blank=True,
help_text='A brief description of the contact type.')
def __unicode__(self):
return self.name
@reversion.register
class Contact(AbstractBase):
"""
Holds ways in which entities can communicate.
The commincation ways are not limited provided that all parties
willing to communicate will be able to do so. The commucation
ways may include emails, phone numbers, landlines etc.
"""
contact = models.CharField(
max_length=100,
help_text="The actual contact of the person e.g test@mail.com,"
" 07XXYYYZZZ")
contact_type = models.ForeignKey(
ContactType,
help_text="The type of contact that the given contact is e.g email"
" or phone number",
on_delete=models.PROTECT)
def __unicode__(self):
return "{}::{}".format(self.contact_type.name, self.contact)
class Town(AbstractBase):
name = models.CharField(
max_length=100, unique=True, null=True, blank=True,
help_text="Name of the town")
def __unicode__(self):
return self.name
@reversion.register
class PhysicalAddress(AbstractBase):
"""
The physical properties of a facility.
These are physical properties of the facility and included is the
plot number and nearest landmark. This information in conjunction with
GPS codes is useful in locating the facility.
"""
town = models.ForeignKey(
Town, null=True, blank=True,
help_text="The town where the entity is located e.g Nakuru")
postal_code = models.CharField(
null=True, blank=True,
max_length=100,
help_text="The 5 digit number for the post office address. e.g 00900")
address = models.TextField(
null=True, blank=True,
help_text="This is the actual post office number of the entity"
"e.g 6790")
nearest_landmark = models.TextField(
null=True, blank=True,
help_text="well-known physical features /structure that can be used to"
" simplify directions to a given place. e.g town market or village ")
plot_number = models.CharField(
max_length=100, null=True, blank=True,
help_text="This is the same number found on the title deeds of the"
"piece of land on which this facility is located")
def __unicode__(self):
return "{}: {}".format(self.postal_code, self.address)
class Meta(AbstractBase.Meta):
verbose_name_plural = 'physical addresses'
class AdministrativeUnitBase(SequenceMixin, AbstractBase):
"""Base class for County, Constituency and Ward"""
name = models.CharField(
max_length=100,
help_text="Name of the administrative unit e.g Nairobi")
code = SequenceField(
unique=True,
help_text="A unique_code 4 digit number representing the region.")
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.code:
self.code = self.generate_next_code_sequence()
super(AdministrativeUnitBase, self).save(*args, **kwargs)
class Meta(AbstractBase.Meta):
abstract = True
def _lookup_facility_coordinates(area_boundary):
"""A helper used by the County, Constituency and Ward classes"""
from mfl_gis.models import FacilityCoordinates
facility_coordinates = FacilityCoordinates.objects.filter(
coordinates__contained=area_boundary.mpoly
) if area_boundary and area_boundary.mpoly else []
return {
facility_coordinate.facility.name:
json.loads(facility_coordinate.coordinates.geojson)
for facility_coordinate in facility_coordinates
}
@reversion.register
class County(AdministrativeUnitBase):
"""
This is the largest administrative/political division in Kenya.
Kenya is divided in 47 different counties.
Code generation is handled by the custom save method in
AdministrativeUnitBase
"""
@property
def facility_coordinates(self):
"""Look up the facilities that are in this unit's boundaries"""
try:
return _lookup_facility_coordinates(self.countyboundary)
except: # Handling RelatedObjectDoesNotExist is a little funky
LOGGER.info('No boundaries found for {}'.format(self))
return _lookup_facility_coordinates(None)
class Meta(AdministrativeUnitBase.Meta):
verbose_name_plural = 'counties'
@reversion.register
class Constituency(AdministrativeUnitBase):
"""
Counties in Kenya are divided into constituencies.
A Constituency is a political sub division of a county.
There are 290 constituencies in total.
In most cases they coincide with sub counties.
Code generation is handled by the custom save method in
AdministrativeUnitBase
"""
county = models.ForeignKey(
County,
help_text="Name of the county where the constituency is located",
on_delete=models.PROTECT)
@property
def facility_coordinates(self):
"""Look up the facilities that are in this unit's boundaries"""
try:
return _lookup_facility_coordinates(self.constituencyboundary)
except: # Handling RelatedObjectDoesNotExist is a little funky
LOGGER.info('No boundaries found for {}'.format(self))
return _lookup_facility_coordinates(None)
class Meta(AdministrativeUnitBase.Meta):
verbose_name_plural = 'constituencies'
unique_together = ('name', 'county')
@reversion.register
class Ward(AdministrativeUnitBase):
"""
The Kenyan counties are sub divided into wards.
This is an administrative sub-division of the counties.
A constituency can have one or more wards.
In most cases the sub county is also the constituency.
Code generation is handled by the custom save method in
AdministrativeUnitBase
"""
constituency = models.ForeignKey(
Constituency,
help_text="The constituency where the ward is located.",
on_delete=models.PROTECT)
@property
def county(self):
return self.constituency.county
@property
def facility_coordinates(self):
"""Look up the facilities that are in this unit's boundaries"""
try:
return _lookup_facility_coordinates(self.wardboundary)
except: # Handling RelatedObjectDoesNotExist is a little funky
LOGGER.info('No boundaries found for {}'.format(self))
return _lookup_facility_coordinates(None)
@reversion.register
class UserCounty(AbstractBase):
"""
Will store a record of the counties that a user has been incharge of.
A user can only be incharge of only one county at a time.
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='user_counties',
on_delete=models.PROTECT)
county = models.ForeignKey(County, on_delete=models.PROTECT)
def __unicode__(self):
return "{}: {}".format(self.user.email, self.county.name)
def validate_only_one_county_active(self):
"""
A user can be incharge of only one county at the a time.
"""
counties = self.__class__.objects.filter(
user=self.user, active=True, deleted=False)
if counties.count() > 0 and not self.deleted:
raise ValidationError(
"A user can only be active in one county at a time")
def save(self, *args, **kwargs):
self.validate_only_one_county_active()
super(UserCounty, self).save(*args, **kwargs)
class Meta(AbstractBase.Meta):
verbose_name_plural = 'user_counties'
@reversion.register
class UserContact(AbstractBase):
"""
Stores a user's contacts.
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='user_contacts', on_delete=models.PROTECT)
contact = models.ForeignKey(Contact)
def __unicode__(self):
return "{}: {}".format(self.user.get_full_name, self.contact.contact)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.pywrap_tensorflow import StatusNotOK
def check_op_order(graph):
"""Sanity check on the ordering of op id."""
for op in graph.get_operations():
for v in op.inputs:
assert v.op._id < op._id or op.type == "Merge", (
"The id of %s must be less than the id of %s" % (v.op.name, op.name))
return True
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def isum(s):
i = tf.constant(0, name="i")
c = lambda i, s: tf.less(i, 10)
b = lambda i, s: [tf.add(i, 1), tf.add(i, s)]
_, r_s = control_flow_ops.While(c, b, [i, s])
return r_s
class ControlFlowTest(tf.test.TestCase):
def testRefIdentity(self):
with self.test_session():
v = tf.Variable(7)
v = control_flow_ops._Identity(v)
op = tf.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(check_op_order(v.graph))
self.assertTrue(isinstance(v2, tf.Tensor))
tf.initialize_all_variables().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.test_session():
v = tf.Variable(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = tf.constant(9)
enter_nine = control_flow_ops.enter(nine, "foo_1")
op = tf.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
tf.initialize_all_variables().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.test_session():
v = tf.Variable(7)
p = tf.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v.ref(), p)
v2 = tf.assign(v1[1], 9)
tf.initialize_all_variables().run()
self.assertEqual(9, v2.eval())
def testEnterMulExit(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = control_flow_ops.enter(data, "foo_1", False)
five = tf.constant(5)
enter_five = control_flow_ops.enter(five, "foo_1", False)
mul_op = tf.mul(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeIndexedSlices(self):
with self.test_session():
values = tf.constant([1, 2, 3, 4, 5, 6])
indices = tf.constant([0, 2, 4, 6, 8, 10])
data = tf.IndexedSlices(values, indices)
pred = tf.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values.eval()
ind = merge_op.indices.eval()
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
def testSwitchDeadBranch(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = tf.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
StatusNotOK, lambda e: "The tensor returned for" in str(e)):
dead_branch.eval()
def testSwitchMergeLess(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
zero = tf.convert_to_tensor(0)
one = tf.convert_to_tensor(1)
less_op = tf.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = tf.constant(1)
add_op = tf.add(switch_op[0], one)
id_op = tf.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = tf.constant(1)
add_op = tf.add(switch_op[0], one)
five = tf.constant(5)
mul_op = tf.mul(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
with self.test_session():
false = tf.convert_to_tensor(False)
n = tf.constant(10)
enter_false = control_flow_ops.enter(false, "foo_1", False)
enter_n = control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_1(self):
with self.test_session():
zero = tf.constant(0)
one = tf.constant(1)
n = tf.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
with tf.device("/gpu:0"):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = tf.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = tf.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testLoop_2(self):
with self.test_session():
zero = tf.constant(0)
one = tf.constant(1)
n = tf.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = tf.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = tf.add(switch_i[1], enter_one)
with tf.device("/gpu:0"):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testCondBool(self):
values = tf.constant(10)
fn1 = lambda: tf.add(values, 1)
fn2 = lambda: tf.sub(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = tf.cond(False, fn1, fn2)
def testCondIndexedSlices(self):
with self.test_session():
values = tf.constant(10)
indices = tf.constant(0)
x = tf.IndexedSlices(values, indices)
pred = tf.less(1, 2)
fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), indices)
fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), indices)
r = tf.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
def testCondIndexedSlicesDifferentTypes(self):
with self.test_session():
values = tf.constant(10)
i_32 = tf.convert_to_tensor(0, name="one", dtype=tf.int32)
i_64 = tf.convert_to_tensor(0, name="one", dtype=tf.int64)
x = tf.IndexedSlices(values, i_32)
pred = tf.less(1, 2)
fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), i_32)
fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), i_64)
r = tf.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
def testCondColocation(self):
with self.test_session(use_gpu=True):
with tf.device("/cpu:0"):
v = tf.Variable(7.0)
x = tf.constant(10.0)
pred = tf.less(1.0, 2.0)
fn1 = lambda: tf.add(v, 1.0)
fn2 = lambda: tf.sub(x, 1.0)
r = tf.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
x = tf.constant(10)
pred = tf.less(1, 2)
fn1 = lambda: tf.add(x, 1)
fn2 = lambda: tf.sub(x, 1)
r = tf.cond(pred, fn1, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.test_session():
x = tf.constant(10)
r = tf.cond(tf.less(1, 0), lambda: tf.add(x, 1), lambda: tf.sub(x, 1))
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(9, result)
def testCond_3(self):
with self.test_session():
x = tf.constant(10)
pred = tf.less(1, 2)
fn1 = lambda: tf.add(x, 1)
fn2 = lambda: tf.sub(x, 1)
fn3 = lambda: tf.add(tf.cond(pred, fn1, fn2), 1)
r = tf.cond(pred, fn3, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(12, result)
def testCond_4(self):
with self.test_session():
v1 = tf.Variable(7)
v2 = tf.Variable(7)
v3 = tf.Variable(7)
age = tf.constant(3)
max_age = tf.constant(2)
pred = tf.greater(age, max_age)
fn1 = lambda: [tf.assign(v1, 1).op, tf.assign(v2, 2).op]
fn2 = lambda: [tf.assign(v3, 3).op, tf.constant(10).op]
r = tf.cond(pred, fn1, fn2)
tf.initialize_all_variables().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertTrue(check_op_order(age.graph))
self.assertAllEqual(True, result)
self.assertAllEqual(7, v1.eval())
self.assertAllEqual(2, v2.eval())
self.assertAllEqual(7, v3.eval())
def testCond_5(self):
with self.test_session():
alive = tf.constant(True, name="alive")
count = tf.constant(0, name="count")
def body(i):
return tf.cond(
alive, lambda: [tf.less(i, 3), tf.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, count.eval())
def testCond_6(self):
with self.test_session():
v1 = tf.Variable([7])
age = tf.constant(3)
pred = tf.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = tf.cond(pred, fn1, fn2)
tf.initialize_all_variables().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.test_session() as sess:
x = tf.constant(10)
y = tf.constant(200)
pred = tf.less(1, 2)
fn1 = lambda: [tf.add(x, 1), tf.add(x, 2)]
fn2 = lambda: [y, y]
r = tf.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], sess.run(r))
def testCondGrad_1(self):
with self.test_session():
x = tf.constant(10.0, name="x")
pred = tf.less(1, 2)
fn1 = lambda: tf.identity(x)
fn2 = lambda: tf.identity(x)
r = tf.cond(pred, fn1, fn2)
grad = tf.gradients(r, [x])[0]
result = grad.eval()
self.assertAllEqual(1.0, result)
def testCondGrad_2(self):
with self.test_session():
c = tf.placeholder(tf.int32, shape=[])
x = tf.constant(10.0)
pred = tf.less(c, 2)
fn1 = lambda: tf.mul(x, 42.0)
fn2 = lambda: tf.mul(x, 3.0)
r = tf.cond(pred, fn1, fn2)
grad = tf.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
def testCondGrad_Gather(self):
with self.test_session() as sess:
v1 = tf.Variable([1.0, 42.0])
c = tf.placeholder(tf.int32, shape=[])
pred = tf.less(c, 2)
fn1 = lambda: tf.identity(v1)
fn2 = lambda: tf.gather(v1, [1, 1])
r = tf.cond(pred, fn1, fn2)
grad = tf.gradients(r, [v1])[0]
tf.initialize_all_variables().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
# Microbenchmark: 10,000 iterations took 0.21s.
def testWhile_1(self):
with self.test_session():
n = tf.constant(0)
c = lambda x: tf.less(x, 10000)
b = lambda x: tf.add(x, 1)
r = control_flow_ops.While(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithRefs_1(self):
with self.test_session() as sess:
x = tf.Variable(0).ref()
i = tf.constant(0)
c = lambda i, x: tf.less(i, 100)
self.assertEqual(x.dtype, tf.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, tf.int32_ref)
return (i+1, gen_array_ops._ref_identity(x))
r = control_flow_ops.While(c, b, [i, x], parallel_iterations=5)
tf.initialize_all_variables().run()
self.assertEqual(r[0].dtype, tf.int32)
self.assertEqual(r[1].dtype, tf.int32_ref)
value_i, value_x = sess.run(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.test_session():
s = tf.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
def testWhile_3(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [tf.add(m, 1), tf.add(c, 1)]
o = tf.add(o, m)
o = tf.add(o, c)
i = tf.add(i, 1)
return [i, m, c, o]
i = tf.convert_to_tensor(0)
m = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor(0)
d = tf.convert_to_tensor(100)
r = control_flow_ops.While(
lambda i, m, c, o: tf.less(i, d), compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(10100, result)
def testWhile_4(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [tf.gather(x, i), tf.gather(x, i)]
o = tf.add(o, m)
o = tf.add(o, c)
i = tf.add(i, 1)
return [i, m, c, o]
i = tf.convert_to_tensor(0)
m = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor(0)
x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = tf.size(x)
r = control_flow_ops.While(
lambda i, m, c, o: tf.less(i, s), compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(42, result)
def testWhile_5(self):
with self.test_session():
def compute(i, c, o):
c = tf.slice(x, tf.expand_dims(i, 0), [1])
o = tf.concat(0, [o, c])
i = tf.add(i, 1)
return [i, c, o]
i = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor([0])
x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = tf.size(x)
r = control_flow_ops.While(
lambda i, c, o: tf.less(i, s), compute, [i, c, o])
result = r[2].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(1.0)
c = lambda x: tf.less(x, 10.0)
b = lambda x: tf.add(x, 1.0)
r = control_flow_ops.While(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(1.0)
c = lambda x: tf.less(x, 10.0)
def b(x):
with tf.device("/cpu:0"):
return tf.add(x, 1.0)
r = control_flow_ops.While(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_2(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testNestedWhile_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(0)
def cpu_sum(s):
c = lambda i, s: tf.less(i, 10)
def b(i, s):
i1 = tf.add(i, 1)
with tf.device("/cpu:0"):
s1 = tf.add(i, s)
return i1, s1
_, r_s = control_flow_ops.While(c, b, [n, s])
return r_s
c = lambda x: tf.less(x, 200)
b = lambda x: tf.add(x, cpu_sum(n))
r = control_flow_ops.While(c, b, [n])
self.assertEqual(225, r.eval())
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def testWhileWithControl_1(self):
with self.test_session():
n = tf.constant(0)
r = tf.constant(0)
condition = lambda n_, r_: tf.less(n_, 10)
def body(n_, r_):
n_ = tf.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = tf.constant(12)
return [n_, r_]
res = control_flow_ops.While(condition,
body,
[n, r],
parallel_iterations=1)
self.assertAllEqual(12, res[1].eval())
def testWhileWithControl_2(self):
with self.test_session():
r = tf.constant(0)
condition = lambda r_: tf.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = tf.constant(12)
return [r_]
res = control_flow_ops.While(condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
def testCondWhile_1(self):
with self.test_session():
n = tf.convert_to_tensor(0, name="n")
c = lambda x: tf.less(x, 10)
b = lambda x: tf.add(x, 1)
r = tf.cond(tf.less(0, 1),
lambda: control_flow_ops.While(c, b, [n]),
lambda: n)
self.assertAllEqual(10, r.eval())
def testCondWhile_2(self):
with self.test_session():
n = tf.convert_to_tensor(0)
c = lambda x: tf.less(x, 10)
b = lambda x: tf.add(x, 1)
r = tf.cond(tf.less(1, 0), lambda: tf.add(n, 1),
lambda: control_flow_ops.While(c, b, [n]))
self.assertAllEqual(10, r.eval())
def testWhileCond_1(self):
with self.test_session():
i = tf.convert_to_tensor(0, name="i")
n = tf.convert_to_tensor(10, name="n")
one = tf.convert_to_tensor(1, name="one")
c = lambda x: tf.less(x, n)
b = lambda x: tf.cond(
tf.constant(True), lambda: tf.add(x, one), lambda: tf.sub(x, one))
r = control_flow_ops.While(c, b, [i])
self.assertAllEqual(10, r.eval())
def testWhileCond_2(self):
with self.test_session():
n = tf.convert_to_tensor(0, name="n")
c = lambda x: tf.less(x, 10)
b = lambda x: tf.cond(tf.constant(True), lambda: tf.add(x, 1), lambda: n)
r = control_flow_ops.While(c, b, [n])
self.assertAllEqual(10, r.eval())
def testWhileCond_3(self):
with self.test_session():
n = tf.convert_to_tensor(0)
c = lambda x: tf.less(x, 10)
b = lambda x: tf.cond(tf.less(0, 1), lambda: tf.add(x, 1),
lambda: tf.sub(x, 1))
r = control_flow_ops.While(c, b, [n])
self.assertAllEqual(10, r.eval())
# NOTE: It is ok to have parallel_iterations > 1
def testWhileUpdateVariable_1(self):
with self.test_session():
select = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j):
return tf.less(j, 3)
def loop_body(j):
ns = tf.scatter_update(select, j, 10.0)
nj = tf.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.While(loop_iterator,
loop_body,
[n],
parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
tf.initialize_all_variables().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
def testWhileUpdateVariable_2(self):
with self.test_session():
select1 = tf.Variable([3.0, 4.0, 5.0])
select2 = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j):
return tf.less(j, 3)
def loop_body(j):
ns1 = tf.scatter_update(select1, j, 10.0)
ns2 = tf.scatter_update(select2, j, 10.0)
nj = tf.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.While(loop_iterator,
loop_body,
[n],
parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
tf.initialize_all_variables().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
def testWhileUpdateVariable_3(self):
with self.test_session():
select = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j, _):
return tf.less(j, 3)
def loop_body(j, _):
ns = tf.scatter_update(select, j, 10.0)
nj = tf.add(j, 1)
return [nj, ns]
r = control_flow_ops.While(loop_iterator,
loop_body,
[n, tf.identity(select)],
parallel_iterations=1)
tf.initialize_all_variables().run()
result = r[1].eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
# b/24814703
def testWhileUpdateVariable_4(self):
with self.test_session():
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
tf.initialize_all_variables().run()
c = tf.constant(0, name="c")
asn1 = tf.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return tf.less(i, 10)
# Loop body
def loop_body(i):
asn2 = tf.assign_add(var_b, asn1, name="b_add")
with tf.control_dependencies([asn2]):
ni = tf.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.While(pred, loop_body, [c],
parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
# b/24736492
def testWhileUpdateVariable_5(self):
with self.test_session():
# Create some variables.
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
tf.initialize_all_variables().run()
# Change condition to check var_b
def pred(_):
return tf.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = tf.assign_add(var_a, tf.constant(1), name="a_add")
asn2 = tf.assign_add(var_b, tf.constant(1), name="b_add")
with tf.control_dependencies([asn1, asn2]):
inc_b = tf.identity(var_b)
return inc_b
lpa = control_flow_ops.While(pred, loop_body, [var_b], 1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
# b/24814668
def testWhileUpdateVariable_6(self):
with self.test_session():
# Create some variables.
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
c = tf.constant(0)
tf.initialize_all_variables().run()
# Loop condition
def pred(i):
return tf.less(i, 10)
# Loop body
def loop_body(i):
asn1 = tf.assign_add(var_a, 1, name="a_add")
with tf.control_dependencies([asn1]):
asn2 = tf.assign_add(var_b, var_a, name="b_add")
with tf.control_dependencies([asn2]):
ni = tf.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.While(pred, loop_body, [c], 1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
def testWhileQueue_1(self):
with self.test_session():
q = tf.FIFOQueue(-1, tf.int32)
i = tf.constant(0)
def c(i):
return tf.less(i, 10)
def b(i):
ni = tf.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.While(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
def testWhileStack_1(self):
with self.test_session():
s = gen_data_flow_ops._stack(tf.int32, stack_name="foo")
i = tf.constant(0)
def c(i):
return tf.less(i, 10)
def b(i):
ni = tf.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops._stack_push(s, i)], ni)
return ni
r = control_flow_ops.While(c, b, [i], parallel_iterations=1)
x = tf.constant(0)
def c1(i, _):
return tf.greater(i, 0)
def b1(i, x):
ni = tf.sub(i, 1)
nx = x + gen_data_flow_ops._stack_pop(s, tf.int32)
return [ni, nx]
_, rx = control_flow_ops.While(c1, b1, [r, x], parallel_iterations=1)
self.assertEqual(45, rx.eval())
def testWhileGrad_Square(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(tf.less(1, 2), lambda: r, lambda: v)
r = tf.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileGrad_Shape(self):
with self.test_session():
x = tf.placeholder(tf.float32, shape=[None])
v = tf.constant(2.0, name="v")
n = tf.constant(0, name="n")
c = lambda i, v: tf.less(i, 5)
b = lambda i, v: [i + 1, tf.mul(x, v)]
r = control_flow_ops.While(c, b, [n, v], parallel_iterations=1)
r = tf.gradients(r[1], x)[0]
self.assertEqual(r.get_shape().as_list(), [None])
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
def testWhileGrad_MultipleUses(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
r = tf.mul(r, r)
r = tf.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
def testWhileGrad_LoopAdd(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
r = tf.add(r, r)
r = tf.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.test_session(use_gpu=use_gpu) as sess:
a = tf.constant(3.0, name="a")
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = lambda v: tf.mul(v, a)
r = control_flow_ops.While(c, b, [v],
parallel_iterations=p_iters)
grad_a, grad_v = tf.gradients(r, [a, v])
grad_a_val, grad_v_val = sess.run([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def testWhileGrad_Variable(self):
with self.test_session():
a = tf.Variable(3.0)
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = lambda v: tf.mul(v, a)
r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
r = tf.gradients(r, a)
tf.initialize_all_variables().run()
self.assertAllClose(216.0, r[0].eval())
def testWhileGrad_ys_xs(self):
with self.test_session():
x = tf.constant(3.0, name="x")
y = tf.constant(2.0, name="y")
c = lambda x, y: tf.less(x, 100.0)
def b(x, y):
y1 = tf.add(x, y)
x1 = tf.mul(x, y1)
return x1, y1
rx, ry = control_flow_ops.While(c, b, [x, y], parallel_iterations=1)
r = tf.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0].eval())
r = tf.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0].eval())
r = tf.gradients([rx], x)
self.assertAllClose(295.0, r[0].eval())
r = tf.gradients([rx], y)
self.assertAllClose(120.0, r[0].eval())
def testWhileGrad_Dependency(self):
with self.test_session():
i = tf.constant(0, name="i")
x = tf.constant(2.0, name="x")
c = lambda i, x: tf.less(i, 10)
def b(i, x):
x = tf.mul(x, 2.0)
i = tf.add(i, 1)
return i, x
ri, rx = control_flow_ops.While(c, b, [i, x], parallel_iterations=1)
r = tf.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0].eval())
r = tf.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_NoGradient(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = control_flow_ops.While(c, b, [v], back_prop=False)
r = tf.add(r, v)
r = tf.gradients(r, v)
self.assertAllClose(1.0, r[0].eval())
def testWhileGrad_SerialTwoLoops(self):
with self.test_session():
i = tf.constant(0, name="i")
x = tf.constant(2.0, name="x")
c = lambda i, x: tf.less(i, 5)
def b(i, x):
x = tf.mul(x, 2.0)
i = tf.add(i, 1)
return i, x
_, rx = control_flow_ops.While(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.While(c, b, [i, rx], parallel_iterations=1)
r = tf.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_ParallelTwoLoops(self):
with self.test_session():
i = tf.constant(0, name="i")
x = tf.constant(2.0, name="x")
c = lambda i, x: tf.less(i, 5)
def b(i, x):
x = tf.mul(x, 2.0)
i = tf.add(i, 1)
return i, x
_, r1 = control_flow_ops.While(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.While(c, b, [i, x], parallel_iterations=1)
rx = tf.add(r1, r2)
r = tf.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval())
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = tf.constant(1.0)
def inner_loop(s):
c = lambda x: tf.less(x, 4.0)
b = lambda x: tf.mul(x, 2.0)
return control_flow_ops.While(c, b, [s])
c = lambda x: tf.less(x, 2.0)
b = lambda x: tf.mul(inner_loop(x), 2.0)
r = control_flow_ops.While(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
def testNestedWhileGrad_SerialInner(self):
with self.test_session():
v = tf.constant(1.0)
def inner_loop1(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return control_flow_ops.While(c, b, [z, s])
def inner_loop2(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return control_flow_ops.While(c, b, [z, s])
c = lambda x: tf.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.While(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
def testNestedWhileGrad_ParallelInner(self):
with self.test_session():
v = tf.constant(1.0)
def inner_loop1(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return control_flow_ops.While(c, b, [z, s])
def inner_loop2(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return control_flow_ops.While(c, b, [z, s])
c = lambda x: tf.less(x, 128.0)
b = lambda x: tf.mul(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.While(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = tf.convert_to_tensor(2.0, name="v")
n = tf.convert_to_tensor(100.0, name="n")
one = tf.convert_to_tensor(1.0, name="one")
c = lambda x: tf.less(x, n)
b = lambda x: control_flow_ops.cond(tf.constant(True),
lambda: tf.square(x),
lambda: tf.sub(x, one))
r = control_flow_ops.While(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
def testWhileCondGrad_UnknownShape(self):
with self.test_session() as sess:
v = tf.placeholder(tf.float32)
n = tf.convert_to_tensor(100.0, name="n")
one = tf.convert_to_tensor(1.0, name="one")
c = lambda x: tf.less(x, n)
b = lambda x: control_flow_ops.cond(tf.constant(True),
lambda: tf.square(x),
lambda: tf.sub(x, one))
r = control_flow_ops.While(c, b, [v])
r = tf.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
def testWhileWithRefsWithGradients_1(self):
with self.test_session() as sess:
x = tf.Variable(0).ref()
i = tf.constant(0)
c = lambda i, x: tf.less(i, 10)
self.assertEqual(x.dtype, tf.int32_ref)
# pylint: disable=protected-access
def body(i, x):
self.assertEqual(x.dtype, tf.int32_ref)
return (i+1, gen_array_ops._ref_identity(x))
# pylint: enable=protected-access
r = control_flow_ops.While(c, body, [i, x], parallel_iterations=5)
grad_ys = [tf.Variable(73).ref()]
grad = tf.gradients([r[1]], [x], grad_ys=grad_ys)
tf.initialize_all_variables().run()
self.assertEqual(r[0].dtype, tf.int32)
self.assertEqual(r[1].dtype, tf.int32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
def testFoldl_Simple(self):
with self.test_session():
elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
r = control_flow_ops.foldl(
lambda a, x: tf.mul(tf.add(a, x), 2), elems)
self.assertAllEqual(208, r.eval())
r = control_flow_ops.foldl(
lambda a, x: tf.mul(tf.add(a, x), 2), elems, initializer=10)
self.assertAllEqual(880, r.eval())
def testFoldr_Simple(self):
with self.test_session():
elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
r = control_flow_ops.foldr(
lambda a, x: tf.mul(tf.add(a, x), 2), elems)
self.assertAllEqual(450, r.eval())
r = control_flow_ops.foldr(
lambda a, x: tf.mul(tf.add(a, x), 2), elems, initializer=10)
self.assertAllEqual(1282, r.eval())
def testFold_Grad(self):
with self.test_session():
elems = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = tf.constant(2.0, name="v")
r = control_flow_ops.foldl(
lambda a, x: tf.mul(a, x), elems, initializer=v)
r = tf.gradients(r, v)[0]
self.assertAllEqual(720.0, r.eval())
r = control_flow_ops.foldr(
lambda a, x: tf.mul(a, x), elems, initializer=v)
r = tf.gradients(r, v)[0]
self.assertAllEqual(720.0, r.eval())
def testMap_Simple(self):
with self.test_session():
nums = [1, 2, 3, 4, 5, 6]
elems = tf.constant(nums, name="data")
r = control_flow_ops.map_fn(
lambda x: tf.mul(tf.add(x, 3), 2), elems)
self.assertAllEqual(np.array([(x + 3) * 2 for x in nums]), r.eval())
def testOneValueCond(self):
with self.test_session():
c = tf.placeholder(tf.int32, shape=[])
one = tf.convert_to_tensor(1, name="one")
two = tf.convert_to_tensor(2, name="two")
p = tf.greater_equal(c, 1)
i = tf.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, tf.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
def testExampleCond(self):
with self.test_session():
x = tf.convert_to_tensor([-2.0, 2.0], name="x")
d = tf.placeholder(tf.int32, shape=[])
def l2():
return tf.sqrt(tf.reduce_sum(tf.square(x)))
def l1():
return tf.reduce_sum(tf.abs(x))
i = tf.cond(tf.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
def testCase(self):
with self.test_session():
x = tf.constant(1)
y = tf.constant(2)
z = tf.constant(3)
f1 = lambda: tf.constant(17)
f2 = lambda: tf.constant(23)
f3 = lambda: tf.constant(-1)
r1 = tf.case({x < y: f1, x > z: f2}, default=f3, exclusive=True)
self.assertAllEqual(r1.eval(), 17)
r2 = tf.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2.eval(), 23)
# Duplicate events can happen, first one is selected
r3 = tf.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3.eval(), 17)
# Duplicate events cause an error if exclusive = True
r4 = tf.case([(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError(
"More than one condition evaluated as True but exclusive=True."):
r4.eval()
# Check that the default is called if none of the others are
r5 = tf.case({x > y: f1}, default=f3)
self.assertAllEqual(r5.eval(), -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
assert not ran_once[ix]
ran_once[ix] = True
return tf.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
r6 = tf.case([(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=break_run_twice(2))
self.assertAllEqual(r6.eval(), 0)
def testOneOpCond(self):
with self.test_session():
v = tf.Variable(0)
c = tf.convert_to_tensor(0)
one = tf.convert_to_tensor(1)
two = tf.convert_to_tensor(2)
p = tf.greater_equal(c, 1)
def a():
return tf.assign(v, one)
def b():
return tf.assign(v, two)
i = tf.cond(p, a, b)
self.assertTrue(isinstance(i, tf.Tensor))
tf.initialize_all_variables().run()
self.assertEqual(0, v.eval())
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
def testWithOpsDependencies(self):
with self.test_session() as sess:
v = tf.Variable(0.0)
c = tf.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v.ref(),
dependencies=[v.initializer])
c_val, real_v_val = sess.run([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
def testWithTensorDependencies(self):
with self.test_session():
v = tf.Variable(0.0)
c1 = tf.constant(10)
c2 = tf.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v",
output_tensor=c1,
dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
def testWithIndexedSlicesDependencies(self):
with self.test_session():
v = tf.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = tf.IndexedSlices(v, tf.constant([1]))
gather_v_at_1 = tf.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = tf.gather(
v_at_1_after_init.values, v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
with tf.Graph().as_default():
# device set on tensor => same device on dep.
with tf.device("/job:ps"):
vd = tf.Variable([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = tf.Variable([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = tf.Variable([0.0])
with tf.device("/job:worker/gpu:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
self.assertDeviceEqual("/job:worker/gpu:1", with_vdef_dep.device)
def testGroup(self):
with self.test_session() as sess:
v1 = tf.Variable([0.0])
v2 = tf.Variable([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = sess.run([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
def testMergeShapes(self):
# All inputs unknown.
p1 = tf.placeholder(tf.float32)
p2 = tf.placeholder(tf.float32)
p3 = tf.placeholder(tf.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[None, 2])
p2 = tf.placeholder(tf.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[None, None])
p2 = tf.placeholder(tf.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
def testRefSelect(self):
index = tf.placeholder(tf.int32)
# All inputs unknown.
p1 = tf.placeholder(tf.float32_ref)
p2 = tf.placeholder(tf.float32_ref)
p3 = tf.placeholder(tf.float32_ref)
s = control_flow_ops.ref_select(index, [p1, p2, p3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
p1 = tf.placeholder(tf.float32_ref, shape=[1, 2])
p2 = tf.placeholder(tf.float32_ref, shape=[2, 1])
s = control_flow_ops.ref_select(index, [p1, p2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but same.
p1 = tf.placeholder(tf.float32_ref, shape=[1, 2])
p2 = tf.placeholder(tf.float32_ref, shape=[1, 2])
s = control_flow_ops.ref_select(index, [p1, p2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
p1 = tf.placeholder(tf.float32_ref, shape=[1, 2])
p2 = tf.placeholder(tf.float32_ref)
p2.set_shape([None, 2])
s = control_flow_ops.ref_select(index, [p1, p2])
self.assertEqual(None, s.get_shape())
class TupleTest(tf.test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable([1.0])
add1 = tf.add(
control_flow_ops.with_dependencies([v1.initializer], v1.ref()),
2.0)
v2 = tf.Variable([10.0])
add2 = tf.add(
control_flow_ops.with_dependencies([v2.initializer], v2.ref()),
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1.ref()),
tf.constant([1]))
v2 = tf.Variable(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2.ref()),
tf.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = tf.gather(st1.values, st1.indices)
g2 = tf.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
def testAcceptTensorsAsControlInputs(self):
with self.test_session():
var = tf.Variable(0)
assign = tf.assign(var, 1)
t, = tf.tuple([tf.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
self.assertEquals(1, var.eval())
if __name__ == "__main__":
tf.test.main()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The testing Environment class."""
import logging
import shutil
import time
import traceback
from xml.etree import ElementTree
from xml.sax.saxutils import escape
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.chrome.options import Options
# Message strings to look for in chrome://password-manager-internals
MESSAGE_ASK = "Message: Decision: ASK the user"
MESSAGE_SAVE = "Message: Decision: SAVE the password"
class TestResult:
"""Stores the information related to a test result. """
def __init__(self, name, test_type, successful, message):
"""Creates a new TestResult.
Args:
name: The tested website name.
test_type: The test type.
successful: Whether or not the test was successful.
message: The error message of the test.
"""
self.name = name
self.test_type = test_type
self.successful = successful
self.message = message
class Environment:
"""Sets up the testing Environment. """
def __init__(self, chrome_path, chromedriver_path, profile_path,
passwords_path, enable_automatic_password_saving,
numeric_level=None, log_to_console=False, log_file=""):
"""Creates a new testing Environment.
Args:
chrome_path: The chrome binary file.
chromedriver_path: The chromedriver binary file.
profile_path: The chrome testing profile folder.
passwords_path: The usernames and passwords file.
enable_automatic_password_saving: If True, the passwords are going to be
saved without showing the prompt.
numeric_level: The log verbosity.
log_to_console: If True, the debug logs will be shown on the console.
log_file: The file where to store the log. If it's empty, the log will
not be stored.
Raises:
Exception: An exception is raised if |profile_path| folder could not be
removed.
"""
# Setting up the login.
if numeric_level is not None:
if log_file:
# Set up logging to file.
logging.basicConfig(level=numeric_level,
filename=log_file,
filemode='w')
if log_to_console:
console = logging.StreamHandler()
console.setLevel(numeric_level)
# Add the handler to the root logger.
logging.getLogger('').addHandler(console)
elif log_to_console:
logging.basicConfig(level=numeric_level)
# Cleaning the chrome testing profile folder.
try:
shutil.rmtree(profile_path)
except Exception, e:
# The tests execution can continue, but this make them less stable.
logging.error("Error: Could not wipe the chrome profile directory (%s). \
This affects the stability of the tests. Continuing to run tests."
% e)
# If |chrome_path| is not defined, this means that we are in the dashboard
# website, and we just need to get the list of all websites. In this case,
# we don't need to initilize the webdriver.
if chrome_path:
options = Options()
self.enable_automatic_password_saving = enable_automatic_password_saving
if enable_automatic_password_saving:
options.add_argument("enable-automatic-password-saving")
# Chrome path.
options.binary_location = chrome_path
# Chrome testing profile path.
options.add_argument("user-data-dir=%s" % profile_path)
# The webdriver. It's possible to choose the port the service is going to
# run on. If it's left to 0, a free port will be found.
self.driver = webdriver.Chrome(chromedriver_path, 0, options)
# The password internals window.
self.internals_window = self.driver.current_window_handle
if passwords_path:
# An xml tree filled with logins and passwords.
self.passwords_tree = ElementTree.parse(passwords_path).getroot()
else:
raise Exception("Error: |passwords_path| needs to be provided if"
"|chrome_path| is provided, otherwise the tests could not be run")
# Password internals page.
self.internals_page = "chrome://password-manager-internals/"
# The Website window.
self.website_window = None
# The WebsiteTests list.
self.websitetests = []
# The enabled WebsiteTests list.
self.working_tests = []
# The disabled WebsiteTests list.
self.disabled_tests = []
# Map messages to the number of their appearance in the log.
self.message_count = dict()
self.message_count[MESSAGE_ASK] = 0
self.message_count[MESSAGE_SAVE] = 0
# The tests needs two tabs to work. A new tab is opened with the first
# GoTo. This is why we store here whether or not it's the first time to
# execute GoTo.
self.first_go_to = True
# List of all tests results.
self.tests_results = []
def AddWebsiteTest(self, websitetest, disabled=False):
"""Adds a WebsiteTest to the testing Environment.
Args:
websitetest: The WebsiteTest instance to be added.
disabled: Whether test is disabled.
"""
websitetest.environment = self
if hasattr(self, "driver"):
websitetest.driver = self.driver
if hasattr(self, "passwords_tree") and self.passwords_tree is not None:
if not websitetest.username:
username_tag = (
self.passwords_tree.find(
".//*[@name='%s']/username" % websitetest.name))
if username_tag.text:
websitetest.username = username_tag.text
if not websitetest.password:
password_tag = (
self.passwords_tree.find(
".//*[@name='%s']/password" % websitetest.name))
if password_tag.text:
websitetest.password = password_tag.text
self.websitetests.append(websitetest)
if disabled:
self.disabled_tests.append(websitetest.name)
else:
self.working_tests.append(websitetest.name)
def RemoveAllPasswords(self):
"""Removes all the stored passwords."""
logging.info("\nRemoveAllPasswords\n")
self.driver.get("chrome://settings/passwords")
self.driver.switch_to_frame("settings")
while True:
try:
self.driver.execute_script("document.querySelector('"
"#saved-passwords-list .row-delete-button').click()")
time.sleep(1)
except NoSuchElementException:
break
except WebDriverException:
break
def OpenTabAndGoToInternals(self, url):
"""If there is no |self.website_window|, opens a new tab and navigates to
|url| in the new tab. Navigates to the passwords internals page in the
first tab. Raises an exception otherwise.
Args:
url: Url to go to in the new tab.
Raises:
Exception: An exception is raised if |self.website_window| already
exists.
"""
if self.website_window:
raise Exception("Error: The window was already opened.")
self.driver.get("chrome://newtab")
# There is no straightforward way to open a new tab with chromedriver.
# One work-around is to go to a website, insert a link that is going
# to be opened in a new tab, click on it.
a = self.driver.execute_script(
"var a = document.createElement('a');"
"a.target = '_blank';"
"a.href = arguments[0];"
"a.innerHTML = '.';"
"document.body.appendChild(a);"
"return a;",
url)
a.click()
time.sleep(1)
self.website_window = self.driver.window_handles[-1]
self.driver.get(self.internals_page)
self.driver.switch_to_window(self.website_window)
def SwitchToInternals(self):
"""Switches from the Website window to internals tab."""
self.driver.switch_to_window(self.internals_window)
def SwitchFromInternals(self):
"""Switches from internals tab to the Website window."""
self.driver.switch_to_window(self.website_window)
def _DidMessageAppearUntilTimeout(self, log_message, timeout):
"""Checks whether the save password prompt is shown.
Args:
log_message: Log message to look for in the password internals.
timeout: There is some delay between the login and the password
internals update. The method checks periodically during the first
|timeout| seconds if the internals page reports the prompt being
shown. If the prompt is not reported shown within the first
|timeout| seconds, it is considered not shown at all.
Returns:
True if the save password prompt is shown.
False otherwise.
"""
log = self.driver.find_element_by_css_selector("#log-entries")
count = log.text.count(log_message)
if count > self.message_count[log_message]:
self.message_count[log_message] = count
return True
elif timeout > 0:
time.sleep(1)
return self._DidMessageAppearUntilTimeout(log_message, timeout - 1)
else:
return False
def CheckForNewMessage(self, log_message, message_should_show_up,
error_message, timeout=3):
"""Detects whether the save password prompt is shown.
Args:
log_message: Log message to look for in the password internals. The
only valid values are the constants MESSAGE_* defined at the
beginning of this file.
message_should_show_up: Whether or not the message is expected to be
shown.
error_message: Error message for the exception.
timeout: There is some delay between the login and the password
internals update. The method checks periodically during the first
|timeout| seconds if the internals page reports the prompt being
shown. If the prompt is not reported shown within the first
|timeout| seconds, it is considered not shown at all.
Raises:
Exception: An exception is raised in case the result does not match the
expectation
"""
if (self._DidMessageAppearUntilTimeout(log_message, timeout) !=
message_should_show_up):
raise Exception(error_message)
def AllTests(self, prompt_test):
"""Runs the tests on all the WebsiteTests.
Args:
prompt_test: If True, tests caring about showing the save-password
prompt are going to be run, otherwise tests which don't care about
the prompt are going to be run.
Raises:
Exception: An exception is raised if the tests fail.
"""
if prompt_test:
self.PromptTestList(self.websitetests)
else:
self.TestList(self.websitetests)
def DisabledTests(self, prompt_test):
"""Runs the tests on all the disabled WebsiteTests.
Args:
prompt_test: If True, tests caring about showing the save-password
prompt are going to be run, otherwise tests which don't care about
the prompt are going to be executed.
Raises:
Exception: An exception is raised if the tests fail.
"""
self.Test(self.disabled_tests, prompt_test)
def WorkingTests(self, prompt_test):
"""Runs the tests on all the enabled WebsiteTests.
Args:
prompt_test: If True, tests caring about showing the save-password
prompt are going to be run, otherwise tests which don't care about
the prompt are going to be executed.
Raises:
Exception: An exception is raised if the tests fail.
"""
self.Test(self.working_tests, prompt_test)
def Test(self, tests, prompt_test):
"""Runs the tests on websites named in |tests|.
Args:
tests: A list of the names of the WebsiteTests that are going to be
tested.
prompt_test: If True, tests caring about showing the save-password
prompt are going to be run, otherwise tests which don't care about
the prompt are going to be executed.
Raises:
Exception: An exception is raised if the tests fail.
"""
websitetests = []
for websitetest in self.websitetests:
if websitetest.name in tests:
websitetests.append(websitetest)
if prompt_test:
self.PromptTestList(websitetests)
else:
self.TestList(websitetests)
def TestList(self, websitetests):
"""Runs the tests on the websites in |websitetests|.
Args:
websitetests: A list of WebsiteTests that are going to be tested.
Raises:
Exception: An exception is raised if the tests fail.
"""
self.RemoveAllPasswords()
for websitetest in websitetests:
successful = True
error = ""
try:
websitetest.was_run = True
websitetest.WrongLoginTest()
websitetest.SuccessfulLoginTest()
websitetest.SuccessfulLoginWithAutofilledPasswordTest()
self.RemoveAllPasswords()
websitetest.SuccessfulLoginTest()
except Exception:
successful = False
error = traceback.format_exc()
self.tests_results.append(TestResult(websitetest.name, "normal",
successful, escape(error)))
def PromptTestList(self, websitetests):
"""Runs the prompt tests on the websites in |websitetests|.
Args:
websitetests: A list of WebsiteTests that are going to be tested.
Raises:
Exception: An exception is raised if the tests fail.
"""
self.RemoveAllPasswords()
for websitetest in websitetests:
successful = True
error = ""
try:
websitetest.was_run = True
websitetest.PromptTest()
except Exception:
successful = False
error = traceback.format_exc()
self.tests_results.append(TestResult(websitetest.name, "prompt",
successful, escape(error)))
def Quit(self):
"""Closes the tests."""
# Close the webdriver.
self.driver.quit()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import time
import json
import sys
from collections import OrderedDict
from random import random, randrange, uniform
from pokemongo_bot import inventory
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.human_behaviour import sleep, action_delay
from pokemongo_bot.inventory import Pokemon
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.base_dir import _base_dir
from datetime import datetime, timedelta
from .utils import getSeconds
CATCH_STATUS_SUCCESS = 1
CATCH_STATUS_FAILED = 2
CATCH_STATUS_VANISHED = 3
CATCH_STATUS_MISSED = 4
ENCOUNTER_STATUS_SUCCESS = 1
ENCOUNTER_STATUS_NOT_IN_RANGE = 5
ENCOUNTER_STATUS_POKEMON_INVENTORY_FULL = 7
INCENSE_ENCOUNTER_AVAILABLE = 1
INCENSE_ENCOUNTER_NOT_AVAILABLE = 2
ITEM_POKEBALL = 1
ITEM_GREATBALL = 2
ITEM_ULTRABALL = 3
ITEM_RAZZBERRY = 701
ITEM_PINAPBERRY = 705
DEFAULT_UNSEEN_AS_VIP = True
LOGIC_TO_FUNCTION = {
'or': lambda x, y, z: x or y or z,
'and': lambda x, y, z: x and y and z,
'orand': lambda x, y, z: x or y and z,
'andor': lambda x, y, z: x and y or z
}
DEBUG_ON = False
class PokemonCatchWorker(BaseTask):
def __init__(self, pokemon, bot, config={}):
self.pokemon = pokemon
# Load CatchPokemon config if no config supplied
if not config:
for value in bot.workers:
if hasattr(value, 'catch_pokemon'):
config = value.config
self.config = config
super(PokemonCatchWorker, self).__init__(bot, config)
if self.config.get('debug', False): DEBUG_ON = True
def initialize(self):
self.position = self.bot.position
self.pokemon_list = self.bot.pokemon_list
self.inventory = inventory.items()
self.pokedex = inventory.pokedex()
self.spawn_point_guid = ''
self.response_key = ''
self.response_status_key = ''
self.rest_completed = False
self.caught_last_24 = 0
#Config
self.min_ultraball_to_keep = self.config.get('min_ultraball_to_keep', 10)
self.berry_threshold = self.config.get('berry_threshold', 0.35)
self.vip_berry_threshold = self.config.get('vip_berry_threshold', 0.9)
self.treat_unseen_as_vip = self.config.get('treat_unseen_as_vip', DEFAULT_UNSEEN_AS_VIP)
self.daily_catch_limit = self.config.get('daily_catch_limit', 800)
self.use_pinap_on_vip = self.config.get('use_pinap_on_vip', False)
self.pinap_on_level_below = self.config.get('pinap_on_level_below', 0)
self.pinap_operator = self.config.get('pinap_operator', "or")
self.pinap_ignore_threshold = self.config.get('pinap_ignore_threshold', False)
self.vanish_settings = self.config.get('vanish_settings', {})
self.consecutive_vanish_limit = self.vanish_settings.get('consecutive_vanish_limit', 10)
self.rest_duration_min = getSeconds(self.vanish_settings.get('rest_duration_min', "02:00:00"))
self.rest_duration_max = getSeconds(self.vanish_settings.get('rest_duration_max', "04:00:00"))
self.catch_throw_parameters = self.config.get('catch_throw_parameters', {})
self.catch_throw_parameters_spin_success_rate = self.catch_throw_parameters.get('spin_success_rate', 0.6)
self.catch_throw_parameters_excellent_rate = self.catch_throw_parameters.get('excellent_rate', 0.1)
self.catch_throw_parameters_great_rate = self.catch_throw_parameters.get('great_rate', 0.5)
self.catch_throw_parameters_nice_rate = self.catch_throw_parameters.get('nice_rate', 0.3)
self.catch_throw_parameters_normal_rate = self.catch_throw_parameters.get('normal_rate', 0.1)
self.catch_throw_parameters_hit_rate = self.catch_throw_parameters.get('hit_rate', 0.8)
self.catchsim_config = self.config.get('catch_simulation', {})
self.catchsim_catch_wait_min = self.catchsim_config.get('catch_wait_min', 2)
self.catchsim_catch_wait_max = self.catchsim_config.get('catch_wait_max', 6)
self.catchsim_flee_count = int(self.catchsim_config.get('flee_count', 3))
self.catchsim_flee_duration = self.catchsim_config.get('flee_duration', 2)
self.catchsim_berry_wait_min = self.catchsim_config.get('berry_wait_min', 2)
self.catchsim_berry_wait_max = self.catchsim_config.get('berry_wait_max', 3)
self.catchsim_changeball_wait_min = self.catchsim_config.get('changeball_wait_min', 2)
self.catchsim_changeball_wait_max = self.catchsim_config.get('changeball_wait_max', 3)
self.catchsim_newtodex_wait_min = self.catchsim_config.get('newtodex_wait_min', 20)
self.catchsim_newtodex_wait_max = self.catchsim_config.get('newtodex_wait_max', 30)
############################################################################
# public methods
############################################################################
def work(self, response_dict=None):
response_dict = response_dict or self.create_encounter_api_call()
# validate response
if not response_dict:
return WorkerResult.ERROR
responses = response_dict['responses']
response = responses[self.response_key]
if response[self.response_status_key] != ENCOUNTER_STATUS_SUCCESS and response[self.response_status_key] != INCENSE_ENCOUNTER_AVAILABLE:
if response[self.response_status_key] == ENCOUNTER_STATUS_NOT_IN_RANGE:
self.emit_event('pokemon_not_in_range', formatted='Pokemon went out of range!')
elif response[self.response_status_key] == INCENSE_ENCOUNTER_NOT_AVAILABLE:
self.emit_event('pokemon_not_in_range', formatted='Incensed Pokemon went out of range!')
elif response[self.response_status_key] == ENCOUNTER_STATUS_POKEMON_INVENTORY_FULL:
self.emit_event('pokemon_inventory_full', formatted='Your Pokemon inventory is full! Could not catch!')
return WorkerResult.ERROR
# get pokemon data
pokemon_data = response['wild_pokemon']['pokemon_data'] if 'wild_pokemon' in response else response['pokemon_data']
pokemon = Pokemon(pokemon_data)
# check if vip pokemon
is_vip = self._is_vip_pokemon(pokemon)
# skip ignored pokemon
if (not self._should_catch_pokemon(pokemon) and not is_vip) or self.bot.catch_disabled:
if not hasattr(self.bot,'skipped_pokemon'):
self.bot.skipped_pokemon = []
# Check if pokemon already skipped and suppress alert if so
for skipped_pokemon in self.bot.skipped_pokemon:
if pokemon.pokemon_id == skipped_pokemon.pokemon_id and \
pokemon.cp_exact == skipped_pokemon.cp_exact and \
pokemon.ivcp == skipped_pokemon.ivcp:
return WorkerResult.SUCCESS
if self.bot.catch_disabled:
self.logger.info("Not catching {}. All catching tasks are currently disabled until {}.".format(pokemon,self.bot.catch_resume_at.strftime("%H:%M:%S")))
# Add the encounter_id to the Pokemon
pokemon.encounter_id = self.pokemon['encounter_id']
self.bot.skipped_pokemon.append(pokemon)
self.emit_event(
'pokemon_appeared',
formatted='Skip ignored {pokemon}! (CP: {cp} IV: {iv} A/D/S: {iv_display} Shiny: {shiny})',
data={
'pokemon': pokemon.name,
'cp': str(int(pokemon.cp)),
'iv': str(pokemon.iv),
'iv_display': str(pokemon.iv_display),
'shiny': pokemon.shiny,
}
)
return WorkerResult.SUCCESS
if inventory.items().get(ITEM_POKEBALL).count < 1:
if inventory.items().get(ITEM_GREATBALL).count < 1:
if inventory.items().get(ITEM_ULTRABALL).count < 1:
return WorkerResult.ERROR
elif (not is_vip) and inventory.items().get(ITEM_ULTRABALL).count <= self.min_ultraball_to_keep:
return WorkerResult.ERROR
# log encounter
self.emit_event(
'pokemon_appeared',
formatted='A wild {} appeared! (CP: {} IV: {} A/D/S: {} NCP: {} Shiny: {})'.format(pokemon.name, pokemon.cp, pokemon.iv, pokemon.iv_display, round(pokemon.cp_percent, 2),pokemon.shiny, ),
data={
'pokemon': pokemon.name,
'ncp': round(pokemon.cp_percent, 2),
'cp': pokemon.cp,
'iv': pokemon.iv,
'iv_display': pokemon.iv_display,
'encounter_id': self.pokemon['encounter_id'],
'latitude': self.pokemon['latitude'],
'longitude': self.pokemon['longitude'],
'pokemon_id': pokemon.pokemon_id,
'shiny': pokemon.shiny,
}
)
# simulate app
time.sleep(3)
# check for VIP pokemon
if is_vip:
self.emit_event('vip_pokemon', formatted='This is a VIP pokemon. Catch!!!')
# check catch limits before catch
with self.bot.database as conn:
c = conn.cursor()
c.execute("SELECT DISTINCT COUNT(encounter_id) FROM catch_log WHERE dated >= datetime('now','-1 day')")
result = c.fetchone()
while True:
if result[0] < self.daily_catch_limit:
# catch that pokemon!
encounter_id = self.pokemon['encounter_id']
catch_rate_by_ball = [0] + response['capture_probability']['capture_probability'] # offset so item ids match indces
self._do_catch(pokemon, encounter_id, catch_rate_by_ball, is_vip=is_vip)
break
else:
self.emit_event('catch_limit', formatted='WARNING! You have reached your daily catch limit')
sys.exit(2)
break
# simulate app
time.sleep(5)
def create_encounter_api_call(self):
encounter_id = self.pokemon['encounter_id']
player_latitude = self.pokemon['latitude']
player_longitude = self.pokemon['longitude']
request = self.bot.api.create_request()
if 'spawn_point_id' in self.pokemon:
spawn_point_id = self.pokemon['spawn_point_id']
self.spawn_point_guid = spawn_point_id
self.response_key = 'ENCOUNTER'
self.response_status_key = 'status'
request.encounter(
encounter_id=encounter_id,
spawn_point_id=spawn_point_id,
player_latitude=player_latitude,
player_longitude=player_longitude
)
elif 'fort_id' in self.pokemon:
fort_id = self.pokemon['fort_id']
self.spawn_point_guid = fort_id
self.response_key = 'DISK_ENCOUNTER'
self.response_status_key = 'result'
request.disk_encounter(
encounter_id=encounter_id,
fort_id=fort_id,
player_latitude=player_latitude,
player_longitude=player_longitude
)
else:
# This must be a incensed mon
self.response_key = 'INCENSE_ENCOUNTER'
self.response_status_key = 'result'
request.incense_encounter(
encounter_id=encounter_id,
encounter_location=self.pokemon['encounter_location']
)
return request.call()
############################################################################
# helpers
############################################################################
def _pokemon_matches_config(self, config, pokemon, default_logic='and'):
pokemon_config = config.get(pokemon.name, config.get('any'))
if not pokemon_config:
return False
catch_results = {
'ncp': False,
'cp': False,
'iv': False,
'fa': True,
'ca': True
}
catch_logic = pokemon_config.get('logic', default_logic)
candies = inventory.candies().get(pokemon.pokemon_id).quantity
threshold = pokemon_config.get('candy_threshold', -1)
if threshold > 0 and candies >= threshold: # Got enough candies
return False
if pokemon_config.get('never_catch', False):
return False
if pokemon_config.get('always_catch', False):
return True
if pokemon_config.get('catch_above_ncp',-1) >= 0:
if pokemon.cp_percent >= pokemon_config.get('catch_above_ncp'):
catch_results['ncp'] = True
if pokemon_config.get('catch_above_cp',-1) >= 0:
if pokemon.cp >= pokemon_config.get('catch_above_cp'):
catch_results['cp'] = True
if pokemon_config.get('catch_below_cp',-1) >= 0:
if pokemon.cp <= pokemon_config.get('catch_below_cp'):
catch_results['cp'] = True
if pokemon_config.get('catch_above_iv',-1) >= 0:
if pokemon.iv > pokemon_config.get('catch_above_iv', pokemon.iv):
catch_results['iv'] = True
catch_results['fa'] = ( len(pokemon_config.get('fast_attack', [])) == 0 or unicode(pokemon.fast_attack) in map(lambda x: unicode(x), pokemon_config.get('fast_attack', [])))
catch_results['ca'] = ( len(pokemon_config.get('charged_attack', [])) == 0 or unicode(pokemon.charged_attack) in map(lambda x: unicode(x), pokemon_config.get('charged_attack', [])))
self.bot.logger.debug("Our comparison results: FA: {}, CA: {}, CP: {}, NCP: {}, IV: {}".format(catch_results['fa'], catch_results['ca'], catch_results['cp'], catch_results['ncp'], catch_results['iv']))
# check if encountered pokemon is our locked pokemon
if self.bot.capture_locked and self.bot.capture_locked != pokemon.pokemon_id:
self.bot.logger.debug("Pokemon locked!")
return False
# build catch results
cr = {
'ncp': False,
'cp': False,
'iv': False
}
if catch_logic == 'and':
cr['ncp'] = True,
cr['cp'] = True,
cr['iv'] = True
elif catch_logic == 'andor':
cr['ncp'] = True,
cr['cp'] = True
elif catch_logic == 'orand':
cr['cp'] = True,
cr['iv'] = True
if pokemon_config.get('catch_above_ncp',-1) >= 0: cr['ncp'] = catch_results['ncp']
if pokemon_config.get('catch_above_cp',-1) >= 0: cr['cp'] = catch_results['cp']
if pokemon_config.get('catch_below_cp',-1) >= 0: cr['cp'] = catch_results['cp']
if pokemon_config.get('catch_above_iv',-1) >= 0: cr['iv'] = catch_results['iv']
if DEBUG_ON:
print "Debug information for match rules..."
print "catch_results ncp = {}".format(catch_results['ncp'])
print "catch_results cp = {}".format(catch_results['cp'])
print "catch_results iv = {}".format(catch_results['iv'])
print "cr = {}".format(cr)
print "catch_above_ncp = {}".format(pokemon_config.get('catch_above_ncp'))
print "catch_above_cp iv = {}".format(pokemon_config.get('catch_above_cp'))
print "catch_below_cp iv = {}".format(pokemon_config.get('catch_below_cp'))
print "catch_above_iv iv = {}".format(pokemon_config.get('catch_above_iv'))
print "Pokemon {}".format(pokemon.name)
print "pokemon ncp = {}".format(pokemon.cp_percent)
print "pokemon cp = {}".format(pokemon.cp)
print "pokemon iv = {}".format(pokemon.iv)
print "catch logic = {}".format(catch_logic)
if LOGIC_TO_FUNCTION[catch_logic](*cr.values()):
return catch_results['fa'] and catch_results['ca']
else:
return False
def _should_catch_pokemon(self, pokemon):
return self._pokemon_matches_config(self.bot.config.catch, pokemon)
def _is_vip_pokemon(self, pokemon):
# having just a name present in the list makes them vip
# Not seen pokemons also will become vip if it's not disabled in config
if self.bot.config.vips.get(pokemon.name) == {} or (self.treat_unseen_as_vip and not self.pokedex.seen(pokemon.pokemon_id)):
return True
# Treat all shiny pokemon as VIP!
if pokemon.shiny:
return True
return self._pokemon_matches_config(self.bot.config.vips, pokemon, default_logic='or')
def _pct(self, rate_by_ball):
return '{0:.2f}'.format(rate_by_ball * 100)
def _use_berry(self, berry_id, berry_count, encounter_id, catch_rate_by_ball, current_ball):
# Delay to simulate selecting berry
action_delay(self.catchsim_berry_wait_min, self.catchsim_berry_wait_max)
new_catch_rate_by_ball = []
self.emit_event(
'pokemon_catch_rate',
level='debug',
formatted='Catch rate of {catch_rate} with {ball_name} is low. Throwing {berry_name} (have {berry_count})',
data={
'catch_rate': self._pct(catch_rate_by_ball[current_ball]),
'ball_name': self.inventory.get(current_ball).name,
'berry_name': self.inventory.get(berry_id).name,
'berry_count': berry_count
}
)
response_dict = self.bot.api.use_item_encounter(
item=berry_id,
encounter_id=encounter_id,
spawn_point_guid=self.spawn_point_guid
)
responses = response_dict['responses']
if response_dict['status_code'] == 1:
# update catch rates using multiplier
if 'capture_probability' in responses['USE_ITEM_ENCOUNTER']:
for rate in catch_rate_by_ball:
new_catch_rate_by_ball.append(float(responses['USE_ITEM_ENCOUNTER']['capture_probability']['capture_probability'][current_ball-1]))
self.emit_event(
'threw_berry',
formatted="Threw a {berry_name}! Catch rate with {ball_name} is now: {new_catch_rate}",
data={
'berry_name': self.inventory.get(berry_id).name,
'ball_name': self.inventory.get(current_ball).name,
'new_catch_rate': self._pct(new_catch_rate_by_ball[current_ball])
}
)
# softban?
else:
new_catch_rate_by_ball = catch_rate_by_ball
self.bot.softban = True
self.emit_event(
'softban',
level='warning',
formatted='Failed to use berry. You may be softbanned.'
)
with self.bot.database as conn:
c = conn.cursor()
c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='softban_log'")
result = c.fetchone()
while True:
if result[0] == 1:
source = str("PokemonCatchWorker")
status = str("Possible Softban")
conn.execute('''INSERT INTO softban_log (status, source) VALUES (?, ?)''', (status, source))
break
else:
self.emit_event(
'softban_log',
sender=self,
level='info',
formatted="softban_log table not found, skipping log"
)
# unknown status code
else:
new_catch_rate_by_ball = catch_rate_by_ball
self.emit_event(
'threw_berry_failed',
formatted='Unknown response when throwing berry: {status_code}.',
data={
'status_code': response_dict['status_code']
}
)
return new_catch_rate_by_ball
def _do_catch(self, pokemon, encounter_id, catch_rate_by_ball, is_vip=False):
# settings that may be exposed at some point
"""
:type pokemon: Pokemon
"""
if self.use_pinap_on_vip and is_vip and pokemon.level <= self.pinap_on_level_below and self.pinap_operator == "and":
berry_id = ITEM_PINAPBERRY
else:
berry_id = ITEM_RAZZBERRY
if self.pinap_operator == "or":
if (self.use_pinap_on_vip and is_vip) or (pokemon.level <= self.pinap_on_level_below):
berry_id = ITEM_PINAPBERRY
berry_count = self.inventory.get(berry_id).count
ball_count = {}
for ball_id in [ITEM_POKEBALL, ITEM_GREATBALL, ITEM_ULTRABALL]:
ball_count[ball_id] = self.inventory.get(ball_id).count
# use `min_ultraball_to_keep` from config if is not None
min_ultraball_to_keep = ball_count[ITEM_ULTRABALL]
if self.min_ultraball_to_keep is not None and self.min_ultraball_to_keep >= 0:
min_ultraball_to_keep = self.min_ultraball_to_keep
maximum_ball = ITEM_GREATBALL if ball_count[ITEM_ULTRABALL] < min_ultraball_to_keep else ITEM_ULTRABALL
ideal_catch_rate_before_throw = self.vip_berry_threshold if is_vip else self.berry_threshold
ideal_catch_rate_before_throw = 1 if self.pinap_ignore_threshold and berry_id == ITEM_PINAPBERRY else ideal_catch_rate_before_throw
used_berry = False
original_catch_rate_by_ball = catch_rate_by_ball
if DEBUG_ON:
print "Pokemon Level: " + str(pokemon.level) + " Berries count: " + str(berry_count) + " Berries ID: " + str(berry_id) + " Catch rate: " + str(ideal_catch_rate_before_throw)
while True:
# find lowest available ball
current_ball = ITEM_POKEBALL
while ball_count[current_ball] == 0 and current_ball < maximum_ball:
current_ball += 1
if ball_count[current_ball] == 0:
self.emit_event('no_pokeballs', formatted='No pokeballs left! Fleeing...')
return WorkerResult.ERROR
# check future ball count
num_next_balls = 0
next_ball = current_ball
while next_ball < maximum_ball:
next_ball += 1
num_next_balls += ball_count[next_ball]
# If pinap berry is not enough, use razz berry
if berry_count == 0 and berry_id == ITEM_PINAPBERRY:
berry_id = ITEM_RAZZBERRY
ideal_catch_rate_before_throw = self.vip_berry_threshold if is_vip else self.berry_threshold
berry_count = self.inventory.get(berry_id).count
# check if we've got berries to spare
berries_to_spare = berry_count > 0 if is_vip else berry_count > num_next_balls + 30
changed_ball = False
# use a berry if we are under our ideal rate and have berries to spare
if catch_rate_by_ball[current_ball] < ideal_catch_rate_before_throw and berries_to_spare and not used_berry:
new_catch_rate_by_ball = self._use_berry(berry_id, berry_count, encounter_id, catch_rate_by_ball, current_ball)
if new_catch_rate_by_ball != catch_rate_by_ball:
catch_rate_by_ball = new_catch_rate_by_ball
self.inventory.get(berry_id).remove(1)
berry_count -= 1
used_berry = True
# pick the best ball to catch with
best_ball = current_ball
while best_ball < maximum_ball:
best_ball += 1
if catch_rate_by_ball[current_ball] < ideal_catch_rate_before_throw and ball_count[best_ball] > 0:
# if current ball chance to catch is under our ideal rate, and player has better ball - then use it
current_ball = best_ball
changed_ball = True
# if the rate is still low and we didn't throw a berry before, throw one
if catch_rate_by_ball[current_ball] < ideal_catch_rate_before_throw and berry_count > 0 and not used_berry:
new_catch_rate_by_ball = self._use_berry(berry_id, berry_count, encounter_id, catch_rate_by_ball, current_ball)
if new_catch_rate_by_ball != catch_rate_by_ball:
catch_rate_by_ball = new_catch_rate_by_ball
self.inventory.get(berry_id).remove(1)
berry_count -= 1
used_berry = True
# If we change ball then wait to simulate user selecting it
if changed_ball:
action_delay(self.catchsim_changeball_wait_min, self.catchsim_changeball_wait_max)
# Randomize the quality of the throw
# Default structure
throw_parameters = {'normalized_reticle_size': 1.950,
'spin_modifier': 1.0,
'normalized_hit_position': 1.0,
'throw_type_label': 'Excellent'}
self.generate_spin_parameter(throw_parameters)
self.generate_throw_quality_parameters(throw_parameters)
# try to catch pokemon!
ball_count[current_ball] -= 1
self.inventory.get(current_ball).remove(1)
# Take some time to throw the ball from config options
action_delay(self.catchsim_catch_wait_min, self.catchsim_catch_wait_max)
self.emit_event(
'threw_pokeball',
formatted='{throw_type}{spin_label} throw! Used {ball_name}, with chance {success_percentage} ({count_left} left)',
data={
'throw_type': throw_parameters['throw_type_label'],
'spin_label': throw_parameters['spin_label'],
'ball_name': self.inventory.get(current_ball).name,
'success_percentage': self._pct(catch_rate_by_ball[current_ball]),
'count_left': ball_count[current_ball]
}
)
hit_pokemon = 1
if random() >= self.catch_throw_parameters_hit_rate and not is_vip:
hit_pokemon = 0
response_dict = self.bot.api.catch_pokemon(
encounter_id=encounter_id,
pokeball=current_ball,
normalized_reticle_size=throw_parameters['normalized_reticle_size'],
spawn_point_id=self.spawn_point_guid,
hit_pokemon=hit_pokemon,
spin_modifier=throw_parameters['spin_modifier'],
normalized_hit_position=throw_parameters['normalized_hit_position']
)
try:
catch_pokemon_status = response_dict['responses']['CATCH_POKEMON']['status']
except KeyError:
break
# retry failed pokemon
if catch_pokemon_status == CATCH_STATUS_FAILED:
self.emit_event(
'pokemon_capture_failed',
formatted='{pokemon} capture failed.. trying again!',
data={'pokemon': pokemon.name}
)
used_berry = False
catch_rate_by_ball = original_catch_rate_by_ball
# sleep according to flee_count and flee_duration config settings
# randomly chooses a number of times to 'show' wobble animation between 1 and flee_count
# multiplies this by flee_duration to get total sleep
if self.catchsim_flee_count:
sleep((randrange(self.catchsim_flee_count)+1) * self.catchsim_flee_duration)
continue
# abandon if pokemon vanished
elif catch_pokemon_status == CATCH_STATUS_VANISHED:
#insert into DB
with self.bot.database as conn:
c = conn.cursor()
c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='vanish_log'")
result = c.fetchone()
while True:
if result[0] == 1:
conn.execute('''INSERT INTO vanish_log (pokemon, cp, iv, encounter_id, pokemon_id) VALUES (?, ?, ?, ?, ?)''', (pokemon.name, pokemon.cp, pokemon.iv, str(encounter_id), pokemon.pokemon_id))
break
else:
self.emit_event(
'vanish_log',
sender=self,
level='info',
formatted="vanish_log table not found, skipping log"
)
break
self.emit_event(
'pokemon_vanished',
formatted='{} vanished!'.format(pokemon.name),
data={
'pokemon': pokemon.name,
'encounter_id': self.pokemon['encounter_id'],
'latitude': self.pokemon['latitude'],
'longitude': self.pokemon['longitude'],
'pokemon_id': pokemon.pokemon_id
}
)
with self.bot.database as conn:
c = conn.cursor()
c.execute("SELECT DISTINCT COUNT(encounter_id) FROM vanish_log WHERE dated > (SELECT dated FROM catch_log WHERE dated IN (SELECT MAX(dated) FROM catch_log))")
result = c.fetchone()
self.consecutive_vanishes_so_far = result[0]
if self.rest_completed == False and self.consecutive_vanishes_so_far >= self.consecutive_vanish_limit:
self.start_rest()
if self._pct(catch_rate_by_ball[current_ball]) == 100:
self.bot.softban = True
# pokemon caught!
elif catch_pokemon_status == CATCH_STATUS_SUCCESS:
if self.rest_completed == True:
self.rest_completed = False
pokemon.unique_id = response_dict['responses']['CATCH_POKEMON']['captured_pokemon_id']
self.bot.metrics.captured_pokemon(pokemon.name, pokemon.cp, pokemon.iv_display, pokemon.iv)
awards = response_dict['responses']['CATCH_POKEMON']['capture_award']
exp_gain, candy_gain, stardust_gain = self.extract_award(awards)
with self.bot.database as conn:
c = conn.cursor()
c.execute(
"SELECT DISTINCT COUNT(encounter_id) FROM catch_log WHERE dated >= datetime('now','-1 day')")
result = c.fetchone()
if is_vip:
self.emit_event(
'pokemon_vip_caught',
formatted='Vip Captured {pokemon}! (CP: {cp} IV: {iv} {iv_display} NCP: {ncp} Shiny: {shiny}) Catch Limit: ({caught_last_24_hour}/{daily_catch_limit}) +{exp} exp +{stardust} stardust',
data={
'pokemon': pokemon.name,
'ncp': str(round(pokemon.cp_percent, 2)),
'cp': str(int(pokemon.cp)),
'iv': str(pokemon.iv),
'iv_display': str(pokemon.iv_display),
'shiny': pokemon.shiny,
'exp': str(exp_gain),
'stardust': stardust_gain,
'encounter_id': str(self.pokemon['encounter_id']),
'latitude': str(self.pokemon['latitude']),
'longitude': str(self.pokemon['longitude']),
'pokemon_id': str(pokemon.pokemon_id),
'caught_last_24_hour': str(result[0]),
'daily_catch_limit': str(self.daily_catch_limit)
}
)
else:
self.emit_event(
'pokemon_caught',
formatted='Captured {pokemon}! (CP: {cp} IV: {iv} {iv_display} NCP: {ncp} Shiny: {shiny}) Catch Limit: ({caught_last_24_hour}/{daily_catch_limit}) +{exp} exp +{stardust} stardust',
data={
'pokemon': pokemon.name,
'ncp': str(round(pokemon.cp_percent, 2)),
'cp': str(int(pokemon.cp)),
'iv': str(pokemon.iv),
'iv_display': str(pokemon.iv_display),
'shiny': pokemon.shiny,
'exp': str(exp_gain),
'stardust': stardust_gain,
'encounter_id': str(self.pokemon['encounter_id']),
'latitude': str(self.pokemon['latitude']),
'longitude': str(self.pokemon['longitude']),
'pokemon_id': str(pokemon.pokemon_id),
'caught_last_24_hour': str(result[0]),
'daily_catch_limit': str(self.daily_catch_limit)
}
)
inventory.pokemons().add(pokemon)
inventory.player().exp += exp_gain
self.bot.stardust += stardust_gain
candy = inventory.candies().get(pokemon.pokemon_id)
candy.add(candy_gain)
self.emit_event(
'gained_candy',
formatted='Candy gained: {gained_candy}. You now have {quantity} {type} candy!',
data = {
'gained_candy': str(candy_gain),
'quantity': candy.quantity,
'type': candy.type
},
)
self.bot.softban = False
try:
with self.bot.database as conn:
c = conn.cursor()
c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='catch_log'")
result = c.fetchone()
while True:
if result[0] == 1:
conn.execute('''INSERT INTO catch_log (pokemon, cp, iv, encounter_id, pokemon_id) VALUES (?, ?, ?, ?, ?)''', (pokemon.name, pokemon.cp, pokemon.iv, str(encounter_id), pokemon.pokemon_id))
break
else:
self.emit_event(
'catch_log',
sender=self,
level='info',
formatted="catch_log table not found, skipping log"
)
break
user_data_caught = os.path.join(_base_dir, 'data', 'caught-%s.json' % self.bot.config.username)
with open(user_data_caught, 'ab') as outfile:
json.dump(OrderedDict({
'datetime': str(datetime.now()),
'pokemon': pokemon.name,
'cp': pokemon.cp,
'iv': pokemon.iv,
'encounter_id': self.pokemon['encounter_id'],
'pokemon_id': pokemon.pokemon_id,
'latitude': self.pokemon['latitude'],
'longitude': self.pokemon['longitude']
}), outfile)
outfile.write('\n')
# if it is a new pokemon to our dex, simulate app animation delay
if exp_gain >= 500:
sleep (randrange(self.catchsim_newtodex_wait_min, self.catchsim_newtodex_wait_max))
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
elif catch_pokemon_status == CATCH_STATUS_MISSED:
self.emit_event(
'pokemon_capture_failed',
formatted='Pokeball thrown to {pokemon} missed.. trying again!',
data={'pokemon': pokemon.name}
)
# Take some time to throw the ball from config options
action_delay(self.catchsim_catch_wait_min, self.catchsim_catch_wait_max)
continue
break
def extract_award(self, awards):
return sum(awards['xp']), sum(awards['candy']), sum(awards['stardust'])
def generate_spin_parameter(self, throw_parameters):
spin_success_rate = self.catch_throw_parameters_spin_success_rate
if random() <= spin_success_rate:
throw_parameters['spin_modifier'] = 0.5 + 0.5 * random()
throw_parameters['spin_label'] = ' Curveball'
else:
throw_parameters['spin_modifier'] = 0.499 * random()
throw_parameters['spin_label'] = ''
def generate_throw_quality_parameters(self, throw_parameters):
throw_excellent_chance = self.catch_throw_parameters_excellent_rate
throw_great_chance = self.catch_throw_parameters_great_rate
throw_nice_chance = self.catch_throw_parameters_nice_rate
throw_normal_throw_chance = self.catch_throw_parameters_normal_rate
# Total every chance types, pick a random number in the range and check what type of throw we got
total_chances = throw_excellent_chance + throw_great_chance \
+ throw_nice_chance + throw_normal_throw_chance
random_throw = random() * total_chances
if random_throw <= throw_excellent_chance:
throw_parameters['normalized_reticle_size'] = 1.70 + 0.25 * random()
throw_parameters['normalized_hit_position'] = 1.0
throw_parameters['throw_type_label'] = 'Excellent'
return
random_throw -= throw_excellent_chance
if random_throw <= throw_great_chance:
throw_parameters['normalized_reticle_size'] = 1.30 + 0.399 * random()
throw_parameters['normalized_hit_position'] = 1.0
throw_parameters['throw_type_label'] = 'Great'
return
random_throw -= throw_great_chance
if random_throw <= throw_nice_chance:
throw_parameters['normalized_reticle_size'] = 1.00 + 0.299 * random()
throw_parameters['normalized_hit_position'] = 1.0
throw_parameters['throw_type_label'] = 'Nice'
return
# Not a any kind of special throw, let's throw a normal one
# Here the reticle size doesn't matter, we scored out of it
throw_parameters['normalized_reticle_size'] = 1.25 + 0.70 * random()
throw_parameters['normalized_hit_position'] = 0.0
throw_parameters['throw_type_label'] = 'OK'
def start_rest(self):
duration = int(uniform(self.rest_duration_min, self.rest_duration_max))
resume = datetime.now() + timedelta(seconds=duration)
self.emit_event(
'vanish_limit_reached',
formatted="Vanish limit reached! Taking a rest now for {duration}, will resume at {resume}.",
data={
'duration': str(timedelta(seconds=duration)),
'resume': resume.strftime("%H:%M:%S")
}
)
sleep(duration)
self.rest_completed = True
self.bot.login()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A TF-IDF workflow (term frequency - inverse document frequency).
For an explanation of the TF-IDF algorithm see the following link:
http://en.wikipedia.org/wiki/Tf-idf
"""
from __future__ import absolute_import
import argparse
import glob
import math
import re
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.pvalue import AsSingleton
def read_documents(pipeline, uris):
"""Read the documents at the provided uris and returns (uri, line) pairs."""
pcolls = []
for uri in uris:
pcolls.append(
pipeline
| 'Read: %s' % uri >> ReadFromText(uri)
| 'WithKey: %s' % uri >> beam.Map(lambda v, uri: (uri, v), uri))
return pcolls | 'FlattenReadPColls' >> beam.Flatten()
class TfIdf(beam.PTransform):
"""A transform containing a basic TF-IDF pipeline.
The input consists of KV objects where the key is the document's URI and
the value is a piece of the document's content.
The output is mapping from terms to scores for each document URI.
"""
def expand(self, uri_to_content):
# Compute the total number of documents, and prepare a singleton
# PCollection to use as side input.
total_documents = (
uri_to_content
| 'GetUris 1' >> beam.Keys()
| 'GetUniqueUris' >> beam.RemoveDuplicates()
| 'CountUris' >> beam.combiners.Count.Globally())
# Create a collection of pairs mapping a URI to each of the words
# in the document associated with that that URI.
def split_into_words(uri_line):
(uri, line) = uri_line
return [(uri, w.lower()) for w in re.findall(r'[A-Za-z\']+', line)]
uri_to_words = (
uri_to_content
| 'SplitWords' >> beam.FlatMap(split_into_words))
# Compute a mapping from each word to the total number of documents
# in which it appears.
word_to_doc_count = (
uri_to_words
| 'GetUniqueWordsPerDoc' >> beam.RemoveDuplicates()
| 'GetWords' >> beam.Values()
| 'CountDocsPerWord' >> beam.combiners.Count.PerElement())
# Compute a mapping from each URI to the total number of words in the
# document associated with that URI.
uri_to_word_total = (
uri_to_words
| 'GetUris 2' >> beam.Keys()
| 'CountWordsInDoc' >> beam.combiners.Count.PerElement())
# Count, for each (URI, word) pair, the number of occurrences of that word
# in the document associated with the URI.
uri_and_word_to_count = (
uri_to_words
| 'CountWord-DocPairs' >> beam.combiners.Count.PerElement())
# Adjust the above collection to a mapping from (URI, word) pairs to counts
# into an isomorphic mapping from URI to (word, count) pairs, to prepare
# for a join by the URI key.
def shift_keys(uri_word_count):
return (uri_word_count[0][0], (uri_word_count[0][1], uri_word_count[1]))
uri_to_word_and_count = (
uri_and_word_to_count
| 'ShiftKeys' >> beam.Map(shift_keys))
# Perform a CoGroupByKey (a sort of pre-join) on the prepared
# uri_to_word_total and uri_to_word_and_count tagged by 'word totals' and
# 'word counts' strings. This yields a mapping from URI to a dictionary
# that maps the above mentioned tag strings to an iterable containing the
# word total for that URI and word and count respectively.
#
# A diagram (in which '[]' just means 'iterable'):
#
# URI: {'word totals': [count], # Total words within this URI's document.
# 'word counts': [(word, count), # Counts of specific words
# (word, count), # within this URI's document.
# ... ]}
uri_to_word_and_count_and_total = (
{'word totals': uri_to_word_total, 'word counts': uri_to_word_and_count}
| 'CoGroupByUri' >> beam.CoGroupByKey())
# Compute a mapping from each word to a (URI, term frequency) pair for each
# URI. A word's term frequency for a document is simply the number of times
# that word occurs in the document divided by the total number of words in
# the document.
def compute_term_frequency(uri_count_and_total):
(uri, count_and_total) = uri_count_and_total
word_and_count = count_and_total['word counts']
# We have an iterable for one element that we want extracted.
[word_total] = count_and_total['word totals']
for word, count in word_and_count:
yield word, (uri, float(count) / word_total)
word_to_uri_and_tf = (
uri_to_word_and_count_and_total
| 'ComputeTermFrequencies' >> beam.FlatMap(compute_term_frequency))
# Compute a mapping from each word to its document frequency.
# A word's document frequency in a corpus is the number of
# documents in which the word appears divided by the total
# number of documents in the corpus.
#
# This calculation uses a side input, a Dataflow-computed auxiliary value
# presented to each invocation of our MapFn lambda. The second argument to
# the lambda (called total---note that we are unpacking the first argument)
# receives the value we listed after the lambda in Map(). Additional side
# inputs (and ordinary Python values, too) can be provided to MapFns and
# DoFns in this way.
word_to_df = (
word_to_doc_count
| 'ComputeDocFrequencies' >> beam.Map(
lambda (word, count), total: (word, float(count) / total),
AsSingleton(total_documents)))
# Join the term frequency and document frequency collections,
# each keyed on the word.
word_to_uri_and_tf_and_df = (
{'tf': word_to_uri_and_tf, 'df': word_to_df}
| 'CoGroupWordsByTf-df' >> beam.CoGroupByKey())
# Compute a mapping from each word to a (URI, TF-IDF) score for each URI.
# There are a variety of definitions of TF-IDF
# ("term frequency - inverse document frequency") score; here we use a
# basic version that is the term frequency divided by the log of the
# document frequency.
def compute_tf_idf(word_tf_and_df):
(word, tf_and_df) = word_tf_and_df
[docf] = tf_and_df['df']
for uri, tf in tf_and_df['tf']:
yield word, (uri, tf * math.log(1 / docf))
word_to_uri_and_tfidf = (
word_to_uri_and_tf_and_df
| 'ComputeTf-idf' >> beam.FlatMap(compute_tf_idf))
return word_to_uri_and_tfidf
def run(argv=None):
"""Main entry point; defines and runs the tfidf pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--uris',
required=True,
help='URIs to process.')
parser.add_argument('--output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
# Read documents specified by the uris command line option.
pcoll = read_documents(p, glob.glob(known_args.uris))
# Compute TF-IDF information for each word.
output = pcoll | TfIdf()
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | 'write' >> WriteToText(known_args.output)
# Execute the pipeline and wait until it is completed.
if __name__ == '__main__':
run()
|
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import mock
import netaddr
from neutron_lib import constants as l3_constants
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import testtools
from neutron.agent.common import config as agent_config
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import agent as neutron_l3_agent
from neutron.agent import l3_agent as l3_agent_main
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import config as common_config
from neutron.common import constants as n_const
from neutron.common import utils as common_utils
from neutron.tests.common import l3_test_common
from neutron.tests.common import net_helpers
from neutron.tests.functional import base
_uuid = uuidutils.generate_uuid
def get_ovs_bridge(br_name):
return ovs_lib.OVSBridge(br_name)
class L3AgentTestFramework(base.BaseSudoTestCase):
def setUp(self):
super(L3AgentTestFramework, self).setUp()
self.mock_plugin_api = mock.patch(
'neutron.agent.l3.agent.L3PluginApi').start().return_value
mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
self.conf = self._configure_agent('agent1')
self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1',
self.conf)
def _get_config_opts(self):
config = cfg.ConfigOpts()
config.register_opts(common_config.core_opts)
config.register_opts(common_config.core_cli_opts)
logging.register_options(config)
agent_config.register_process_monitor_opts(config)
return config
def _configure_agent(self, host, agent_mode='dvr_snat'):
conf = self._get_config_opts()
l3_agent_main.register_opts(conf)
conf.set_override(
'interface_driver',
'neutron.agent.linux.interface.OVSInterfaceDriver')
br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
br_ex = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
conf.set_override('ovs_integration_bridge', br_int.br_name)
conf.set_override('external_network_bridge', br_ex.br_name)
temp_dir = self.get_new_temp_dir()
get_temp_file_path = functools.partial(self.get_temp_file_path,
root=temp_dir)
conf.set_override('state_path', temp_dir.path)
# NOTE(cbrandily): log_file or log_dir must be set otherwise
# metadata_proxy_watch_log has no effect
conf.set_override('log_file',
get_temp_file_path('log_file'))
conf.set_override('metadata_proxy_socket',
get_temp_file_path('metadata_proxy'))
conf.set_override('ha_confs_path',
get_temp_file_path('ha_confs'))
conf.set_override('external_pids',
get_temp_file_path('external/pids'))
conf.set_override('host', host)
conf.set_override('agent_mode', agent_mode)
return conf
def _get_agent_ovs_integration_bridge(self, agent):
return get_ovs_bridge(agent.conf.ovs_integration_bridge)
def generate_router_info(self, enable_ha, ip_version=4, extra_routes=True,
enable_fip=True, enable_snat=True,
num_internal_ports=1,
dual_stack=False, v6_ext_gw_with_sub=True):
if ip_version == 6 and not dual_stack:
enable_snat = False
enable_fip = False
extra_routes = False
return l3_test_common.prepare_router_data(ip_version=ip_version,
enable_snat=enable_snat,
num_internal_ports=(
num_internal_ports),
enable_floating_ip=enable_fip,
enable_ha=enable_ha,
extra_routes=extra_routes,
dual_stack=dual_stack,
v6_ext_gw_with_sub=(
v6_ext_gw_with_sub))
def _test_conntrack_disassociate_fip(self, ha):
'''Test that conntrack immediately drops stateful connection
that uses floating IP once it's disassociated.
'''
router_info = self.generate_router_info(enable_ha=ha)
router = self.manage_router(self.agent, router_info)
port = net_helpers.get_free_namespace_port(l3_constants.PROTO_NAME_TCP,
router.ns_name)
client_address = '19.4.4.3'
server_address = '35.4.0.4'
def clean_fips(router):
router.router[l3_constants.FLOATINGIP_KEY] = []
clean_fips(router)
self._add_fip(router, client_address, fixed_address=server_address)
router.process(self.agent)
router_ns = ip_lib.IPWrapper(namespace=router.ns_name)
netcat = net_helpers.NetcatTester(
router.ns_name, router.ns_name, client_address, port,
protocol=net_helpers.NetcatTester.TCP)
self.addCleanup(netcat.stop_processes)
def assert_num_of_conntrack_rules(n):
out = router_ns.netns.execute(["conntrack", "-L",
"--orig-src", client_address])
self.assertEqual(
n, len([line for line in out.strip().split('\n') if line]))
if ha:
utils.wait_until_true(lambda: router.ha_state == 'master')
with self.assert_max_execution_time(100):
assert_num_of_conntrack_rules(0)
self.assertTrue(netcat.test_connectivity())
assert_num_of_conntrack_rules(1)
clean_fips(router)
router.process(self.agent)
assert_num_of_conntrack_rules(0)
with testtools.ExpectedException(RuntimeError):
netcat.test_connectivity()
def _test_update_floatingip_statuses(self, router_info):
router = self.manage_router(self.agent, router_info)
rpc = self.agent.plugin_rpc.update_floatingip_statuses
self.assertTrue(rpc.called)
# Assert that every defined FIP is updated via RPC
expected_fips = set([
(fip['id'], l3_constants.FLOATINGIP_STATUS_ACTIVE) for fip in
router.router[l3_constants.FLOATINGIP_KEY]])
call = [args[0] for args in rpc.call_args_list][0]
actual_fips = set(
[(fip_id, status) for fip_id, status in call[2].items()])
self.assertEqual(expected_fips, actual_fips)
def _gateway_check(self, gateway_ip, external_device):
expected_gateway = gateway_ip
ip_vers = netaddr.IPAddress(expected_gateway).version
existing_gateway = (external_device.route.get_gateway(
ip_version=ip_vers).get('gateway'))
self.assertEqual(expected_gateway, existing_gateway)
def _assert_ha_device(self, router):
def ha_router_dev_name_getter(not_used):
return router.get_ha_device_name()
self.assertTrue(self.device_exists_with_ips_and_mac(
router.router[l3_constants.HA_INTERFACE_KEY],
ha_router_dev_name_getter, router.ns_name))
def _assert_gateway(self, router, v6_ext_gw_with_sub=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
external_device = ip_lib.IPDevice(external_device_name,
namespace=router.ns_name)
for subnet in external_port['subnets']:
self._gateway_check(subnet['gateway_ip'], external_device)
if not v6_ext_gw_with_sub:
self._gateway_check(self.agent.conf.ipv6_gateway,
external_device)
def _assert_external_device(self, router):
external_port = router.get_ex_gw_port()
self.assertTrue(self.device_exists_with_ips_and_mac(
external_port, router.get_external_device_name,
router.ns_name))
def _router_lifecycle(self, enable_ha, ip_version=4,
dual_stack=False, v6_ext_gw_with_sub=True):
router_info = self.generate_router_info(enable_ha, ip_version,
dual_stack=dual_stack,
v6_ext_gw_with_sub=(
v6_ext_gw_with_sub))
router = self.manage_router(self.agent, router_info)
# Add multiple-IPv6-prefix internal router port
slaac = n_const.IPV6_SLAAC
slaac_mode = {'ra_mode': slaac, 'address_mode': slaac}
subnet_modes = [slaac_mode] * 2
self._add_internal_interface_by_subnet(router.router,
count=2,
ip_version=6,
ipv6_subnet_modes=subnet_modes)
router.process(self.agent)
if enable_ha:
port = router.get_ex_gw_port()
interface_name = router.get_external_device_name(port['id'])
self._assert_no_ip_addresses_on_interface(router.ns_name,
interface_name)
utils.wait_until_true(lambda: router.ha_state == 'master')
# Keepalived notifies of a state transition when it starts,
# not when it ends. Thus, we have to wait until keepalived finishes
# configuring everything. We verify this by waiting until the last
# device has an IP address.
device = router.router[l3_constants.INTERFACE_KEY][-1]
device_exists = functools.partial(
self.device_exists_with_ips_and_mac,
device,
router.get_internal_device_name,
router.ns_name)
utils.wait_until_true(device_exists)
self.assertTrue(self._namespace_exists(router.ns_name))
utils.wait_until_true(
lambda: self._metadata_proxy_exists(self.agent.conf, router))
self._assert_internal_devices(router)
self._assert_external_device(router)
if not (enable_ha and (ip_version == 6 or dual_stack)):
# Note(SridharG): enable the assert_gateway for IPv6 once
# keepalived on Ubuntu14.04 (i.e., check-neutron-dsvm-functional
# platform) is updated to 1.2.10 (or above).
# For more details: https://review.openstack.org/#/c/151284/
self._assert_gateway(router, v6_ext_gw_with_sub)
self.assertTrue(self.floating_ips_configured(router))
self._assert_snat_chains(router)
self._assert_floating_ip_chains(router)
self._assert_iptables_rules_converged(router)
self._assert_extra_routes(router)
ip_versions = [4, 6] if (ip_version == 6 or dual_stack) else [4]
self._assert_onlink_subnet_routes(router, ip_versions)
self._assert_metadata_chains(router)
# Verify router gateway interface is configured to receive Router Advts
# when IPv6 is enabled and no IPv6 gateway is configured.
if router.use_ipv6 and not v6_ext_gw_with_sub:
if not self.agent.conf.ipv6_gateway:
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name)
ra_state = ip_wrapper.netns.execute(['sysctl', '-b',
'net.ipv6.conf.%s.accept_ra' % external_device_name])
self.assertEqual('2', ra_state)
if enable_ha:
self._assert_ha_device(router)
self.assertTrue(router.keepalived_manager.get_process().active)
self._delete_router(self.agent, router.router_id)
self._assert_interfaces_deleted_from_ovs()
self._assert_router_does_not_exist(router)
if enable_ha:
self.assertFalse(router.keepalived_manager.get_process().active)
def manage_router(self, agent, router):
self.addCleanup(agent._safe_router_removed, router['id'])
agent._process_added_router(router)
return agent.router_info[router['id']]
def _delete_router(self, agent, router_id):
agent._router_removed(router_id)
def _add_fip(self, router, fip_address, fixed_address='10.0.0.2',
host=None, fixed_ip_address_scope=None):
fip = {'id': _uuid(),
'port_id': _uuid(),
'floating_ip_address': fip_address,
'fixed_ip_address': fixed_address,
'host': host,
'fixed_ip_address_scope': fixed_ip_address_scope}
router.router[l3_constants.FLOATINGIP_KEY].append(fip)
def _add_internal_interface_by_subnet(self, router, count=1,
ip_version=4,
ipv6_subnet_modes=None,
interface_id=None):
return l3_test_common.router_append_subnet(router, count,
ip_version, ipv6_subnet_modes, interface_id)
def _namespace_exists(self, namespace):
ip = ip_lib.IPWrapper(namespace=namespace)
return ip.netns.exists(namespace)
def _metadata_proxy_exists(self, conf, router):
pm = external_process.ProcessManager(
conf,
router.router_id,
router.ns_name)
return pm.active
def device_exists_with_ips_and_mac(self, expected_device, name_getter,
namespace):
ip_cidrs = common_utils.fixed_ip_cidrs(expected_device['fixed_ips'])
return ip_lib.device_exists_with_ips_and_mac(
name_getter(expected_device['id']), ip_cidrs,
expected_device['mac_address'], namespace)
@staticmethod
def _port_first_ip_cidr(port):
fixed_ip = port['fixed_ips'][0]
return common_utils.ip_to_cidr(fixed_ip['ip_address'],
fixed_ip['prefixlen'])
def get_device_mtu(self, target_device, name_getter, namespace):
device = ip_lib.IPDevice(name_getter(target_device), namespace)
return device.link.mtu
def get_expected_keepalive_configuration(self, router):
ha_device_name = router.get_ha_device_name()
external_port = router.get_ex_gw_port()
ex_port_ipv6 = ip_lib.get_ipv6_lladdr(external_port['mac_address'])
external_device_name = router.get_external_device_name(
external_port['id'])
external_device_cidr = self._port_first_ip_cidr(external_port)
internal_port = router.router[l3_constants.INTERFACE_KEY][0]
int_port_ipv6 = ip_lib.get_ipv6_lladdr(internal_port['mac_address'])
internal_device_name = router.get_internal_device_name(
internal_port['id'])
internal_device_cidr = self._port_first_ip_cidr(internal_port)
floating_ip_cidr = common_utils.ip_to_cidr(
router.get_floating_ips()[0]['floating_ip_address'])
default_gateway_ip = external_port['subnets'][0].get('gateway_ip')
extra_subnet_cidr = external_port['extra_subnets'][0].get('cidr')
return """vrrp_instance VR_1 {
state BACKUP
interface %(ha_device_name)s
virtual_router_id 1
priority 50
garp_master_delay 60
nopreempt
advert_int 2
track_interface {
%(ha_device_name)s
}
virtual_ipaddress {
169.254.0.1/24 dev %(ha_device_name)s
}
virtual_ipaddress_excluded {
%(floating_ip_cidr)s dev %(external_device_name)s
%(external_device_cidr)s dev %(external_device_name)s
%(internal_device_cidr)s dev %(internal_device_name)s
%(ex_port_ipv6)s dev %(external_device_name)s scope link
%(int_port_ipv6)s dev %(internal_device_name)s scope link
}
virtual_routes {
0.0.0.0/0 via %(default_gateway_ip)s dev %(external_device_name)s
8.8.8.0/24 via 19.4.4.4
%(extra_subnet_cidr)s dev %(external_device_name)s scope link
}
}""" % {
'ha_device_name': ha_device_name,
'external_device_name': external_device_name,
'external_device_cidr': external_device_cidr,
'internal_device_name': internal_device_name,
'internal_device_cidr': internal_device_cidr,
'floating_ip_cidr': floating_ip_cidr,
'default_gateway_ip': default_gateway_ip,
'int_port_ipv6': int_port_ipv6,
'ex_port_ipv6': ex_port_ipv6,
'extra_subnet_cidr': extra_subnet_cidr,
}
def _get_rule(self, iptables_manager, table, chain, predicate):
rules = iptables_manager.get_chain(table, chain)
result = next(rule for rule in rules if predicate(rule))
return result
def _assert_router_does_not_exist(self, router):
# If the namespace assertion succeeds
# then the devices and iptable rules have also been deleted,
# so there's no need to check that explicitly.
self.assertFalse(self._namespace_exists(router.ns_name))
utils.wait_until_true(
lambda: not self._metadata_proxy_exists(self.agent.conf, router))
def _assert_snat_chains(self, router):
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'snat'))
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'POSTROUTING'))
def _assert_floating_ip_chains(self, router):
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'float-snat'))
def _assert_iptables_rules_converged(self, router):
# if your code is failing on this line, it means you are not generating
# your iptables rules in the same format that iptables-save returns
# them. run iptables-save to see the format they should be in
self.assertFalse(router.iptables_manager.apply())
def _assert_metadata_chains(self, router):
metadata_port_filter = lambda rule: (
str(self.agent.conf.metadata_port) in rule.rule)
self.assertTrue(self._get_rule(router.iptables_manager,
'nat',
'PREROUTING',
metadata_port_filter))
self.assertTrue(self._get_rule(router.iptables_manager,
'filter',
'INPUT',
metadata_port_filter))
def _assert_internal_devices(self, router):
internal_devices = router.router[l3_constants.INTERFACE_KEY]
self.assertTrue(len(internal_devices))
for device in internal_devices:
self.assertTrue(self.device_exists_with_ips_and_mac(
device, router.get_internal_device_name, router.ns_name))
def _assert_extra_routes(self, router, namespace=None):
if namespace is None:
namespace = router.ns_name
routes = ip_lib.get_routing_table(4, namespace=namespace)
routes = [{'nexthop': route['nexthop'],
'destination': route['destination']} for route in routes]
for extra_route in router.router['routes']:
self.assertIn(extra_route, routes)
def _assert_onlink_subnet_routes(
self, router, ip_versions, namespace=None):
ns_name = namespace or router.ns_name
routes = []
for ip_version in ip_versions:
_routes = ip_lib.get_routing_table(ip_version,
namespace=ns_name)
routes.extend(_routes)
routes = set(route['destination'] for route in routes)
extra_subnets = router.get_ex_gw_port()['extra_subnets']
for extra_subnet in (route['cidr'] for route in extra_subnets):
self.assertIn(extra_subnet, routes)
def _assert_interfaces_deleted_from_ovs(self):
def assert_ovs_bridge_empty(bridge_name):
bridge = ovs_lib.OVSBridge(bridge_name)
self.assertFalse(bridge.get_port_name_list())
assert_ovs_bridge_empty(self.agent.conf.ovs_integration_bridge)
assert_ovs_bridge_empty(self.agent.conf.external_network_bridge)
def floating_ips_configured(self, router):
floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
external_port = router.get_ex_gw_port()
return len(floating_ips) and all(
ip_lib.device_exists_with_ips_and_mac(
router.get_external_device_name(external_port['id']),
['%s/32' % fip['floating_ip_address']],
external_port['mac_address'],
namespace=router.ns_name) for fip in floating_ips)
def fail_ha_router(self, router):
device_name = router.get_ha_device_name()
ha_device = ip_lib.IPDevice(device_name, router.ha_namespace)
ha_device.link.set_down()
@classmethod
def _get_addresses_on_device(cls, namespace, interface):
return [address['cidr'] for address in
ip_lib.IPDevice(interface, namespace=namespace).addr.list()]
def _assert_no_ip_addresses_on_interface(self, namespace, interface):
self.assertEqual(
[], self._get_addresses_on_device(namespace, interface))
def _assert_ip_address_on_interface(self,
namespace, interface, ip_address):
self.assertIn(
ip_address, self._get_addresses_on_device(namespace, interface))
def _assert_ping_reply_from_expected_address(
self, ping_result, expected_address):
ping_results = ping_result.split('\n')
self.assertGreater(
len(ping_results), 1,
"The result from ping should be multiple lines")
self.assertIn(
expected_address, ping_results[1],
("Expect to see %s in the reply of ping, but failed" %
expected_address))
|
|
# Copyright 2015 Objectif Libre
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cloudkittyclient.openstack.common.apiclient import client
from cloudkittyclient.openstack.common.apiclient import fake_client
from cloudkittyclient.tests import utils
from cloudkittyclient.v1.rating import hashmap
fixtures = {
# services
'/v1/rating/module_config/hashmap/services': {
'GET': (
{},
{'services':
[
{
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd5',
'name': 'compute'
},
{
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd6',
'name': 'volume'
},
{
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd7',
'name': 'network'
},
],
}
),
},
# a service
('/v1/rating/module_config/hashmap/services/'
'2451c2e0-2c6b-4e75-987f-93661eef0fd5'): {
'GET': (
{},
{
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd5',
'name': 'compute',
}
),
'DELETE': (
{},
{},
),
},
# a field
('/v1/rating/module_config/hashmap/fields/'
'a53db546-bac0-472c-be4b-5bf9f6117581'): {
'GET': (
{},
{
'field_id': 'a53db546-bac0-472c-be4b-5bf9f6117581',
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd5',
'name': 'flavor',
},
),
'PUT': (
{},
{},
),
},
('/v1/rating/module_config/hashmap/fields'
'?service_id=2451c2e0-2c6b-4e75-987f-93661eef0fd5'): {
'GET': (
{},
{'fields': [
{
'field_id': 'a53db546-bac0-472c-be4b-5bf9f6117581',
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd5',
'name': 'flavor',
},
{
'field_id': 'a53db546-bac0-472c-be4b-5bf9f6117582',
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd5',
'name': 'LOLOL',
},
]
},
),
'PUT': (
{},
{},
),
},
# a mapping
('/v1/rating/module_config/hashmap/mappings/'
'bff0d209-a8e4-46f8-8c1a-f231db375dcb'): {
'GET': (
{},
{
'mapping_id': 'bff0d209-a8e4-46f8-8c1a-f231db375dcb',
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd5',
'field_id': 'a53db546-bac0-472c-be4b-5bf9f6117581',
'group_id': None,
'value': 'm1.small',
'cost': 0.50,
'type': 'flat',
},
),
'PUT': (
{},
{
'mapping_id': 'bff0d209-a8e4-46f8-8c1a-f231db375dcb',
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd5',
'field_id': 'a53db546-bac0-472c-be4b-5bf9f6117581',
'group_id': None,
'value': 'm1.small',
'cost': 0.20,
'type': 'flat',
},
),
},
# some mappings
('/v1/rating/module_config/hashmap/mappings'
'?service_id=2451c2e0-2c6b-4e75-987f-93661eef0fd5'): {
'GET': (
{},
{'mappings':
[
{
'mapping_id': 'bff0d209-a8e4-46f8-8c1a-f231db375dcb',
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd5',
'field_id': None,
'group_id': None,
'value': 'm1.small',
'cost': 0.50,
'type': 'flat',
},
{
'mapping_id': 'bff0d209-a8e4-46f8-8c1a-f231db375dcc',
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd5',
'field_id': None,
'group_id': None,
'value': 'm1.tiny',
'cost': 1.10,
'type': 'flat',
},
{
'mapping_id': 'bff0d209-a8e4-46f8-8c1a-f231db375dcd',
'service_id': '2451c2e0-2c6b-4e75-987f-93661eef0fd5',
'field_id': None,
'group_id': None,
'value': 'm1.big',
'cost': 1.50,
'type': 'flat',
},
],
}
),
'PUT': (
{},
{},
),
},
'/v1/rating/module_config/hashmap/groups': {
'GET': (
{},
{'groups':
[
{
'group_id': 'aaa1c2e0-2c6b-4e75-987f-93661eef0fd5',
'name': 'object_consumption'
},
{
'group_id': 'aaa1c2e0-2c6b-4e75-987f-93661eef0fd6',
'name': 'compute_instance'
},
{
'group_id': 'aaa1c2e0-2c6b-4e75-987f-93661eef0fd7',
'name': 'netowrking'
},
],
}
),
},
('/v1/rating/module_config/hashmap/groups/'
'aaa1c2e0-2c6b-4e75-987f-93661eef0fd5'): {
'GET': (
{},
{
'group_id': 'aaa1c2e0-2c6b-4e75-987f-93661eef0fd5',
'name': 'object_consumption'
},
),
'DELETE': (
{},
{},
),
},
('/v1/rating/module_config/hashmap/groups/'
'aaa1c2e0-2c6b-4e75-987f-93661eef0fd5?recursive=True'): {
'DELETE': (
{},
{},
),
},
# a threshold
('/v1/rating/module_config/hashmap/thresholds/'
'1f136864-be73-481f-b9be-4fbda2496f72'): {
'GET': (
{},
{
'threshold_id': '1f136864-be73-481f-b9be-4fbda2496f72',
'service_id': '1329d62f-bd1c-4a88-a75a-07545e41e8d7',
'field_id': 'c7c28d87-5103-4a05-af7f-e4d0891cb7fc',
'group_id': None,
'level': 30,
'cost': 5.98,
'map_type': 'flat',
},
),
'PUT': (
{},
{
'threshold_id': '1f136864-be73-481f-b9be-4fbda2496f72',
'service_id': '1329d62f-bd1c-4a88-a75a-07545e41e8d7',
'field_id': 'c7c28d87-5103-4a05-af7f-e4d0891cb7fc',
'group_id': None,
'level': 30,
'cost': 5.99,
'type': 'flat',
},
),
'DELETE': (
{},
{},
),
},
}
class ServiceManagerTest(utils.BaseTestCase):
def setUp(self):
super(ServiceManagerTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = hashmap.ServiceManager(self.api)
def test_list_services(self):
resources = list(self.mgr.list())
expect = [
'GET', '/v1/rating/module_config/hashmap/services'
]
self.http_client.assert_called(*expect)
self.assertEqual(len(resources), 3)
self.assertEqual(
resources[0].service_id,
'2451c2e0-2c6b-4e75-987f-93661eef0fd5'
)
self.assertEqual(resources[0].name, 'compute')
self.assertEqual(resources[1].name, 'volume')
self.assertEqual(resources[2].name, 'network')
def test_get_a_service(self):
resource = self.mgr.get(
service_id='2451c2e0-2c6b-4e75-987f-93661eef0fd5'
)
expect = [
'GET', ('/v1/rating/module_config/hashmap/services/'
'2451c2e0-2c6b-4e75-987f-93661eef0fd5')
]
self.http_client.assert_called(*expect)
self.assertEqual(resource.service_id,
'2451c2e0-2c6b-4e75-987f-93661eef0fd5')
self.assertEqual(resource.name, 'compute')
class ServiceTest(utils.BaseTestCase):
def setUp(self):
super(ServiceTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = hashmap.ServiceManager(self.api)
self.resource = self.mgr.get(
service_id='2451c2e0-2c6b-4e75-987f-93661eef0fd5'
)
def test_get_fields(self):
fields = self.resource.fields[:]
expect = [
'GET', ('/v1/rating/module_config/hashmap/fields'
'?service_id=2451c2e0-2c6b-4e75-987f-93661eef0fd5'),
]
self.http_client.assert_called(*expect)
self.assertEqual(len(fields), 2)
def test_get_mappings(self):
mappings = self.resource.mappings[:]
expect = [
'GET', ('/v1/rating/module_config/hashmap/mappings'
'?service_id=2451c2e0-2c6b-4e75-987f-93661eef0fd5'),
]
self.http_client.assert_called(*expect)
self.assertEqual(len(mappings), 3)
class FieldManagerTest(utils.BaseTestCase):
def setUp(self):
super(FieldManagerTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = hashmap.FieldManager(self.api)
def test_get_a_field(self):
resource = self.mgr.get(
field_id='a53db546-bac0-472c-be4b-5bf9f6117581'
)
expect = [
'GET', ('/v1/rating/module_config/hashmap/fields/'
'a53db546-bac0-472c-be4b-5bf9f6117581')
]
self.http_client.assert_called(*expect)
self.assertEqual(resource.field_id,
'a53db546-bac0-472c-be4b-5bf9f6117581')
self.assertEqual(
resource.service_id,
'2451c2e0-2c6b-4e75-987f-93661eef0fd5'
)
self.assertEqual(resource.name, 'flavor')
class MappingManagerTest(utils.BaseTestCase):
def setUp(self):
super(MappingManagerTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = hashmap.MappingManager(self.api)
def test_get_a_mapping(self):
resource = self.mgr.get(
mapping_id='bff0d209-a8e4-46f8-8c1a-f231db375dcb'
)
expect = [
'GET', ('/v1/rating/module_config/hashmap/mappings/'
'bff0d209-a8e4-46f8-8c1a-f231db375dcb')
]
self.http_client.assert_called(*expect)
self.assertEqual(resource.mapping_id,
'bff0d209-a8e4-46f8-8c1a-f231db375dcb')
self.assertEqual(
resource.service_id,
'2451c2e0-2c6b-4e75-987f-93661eef0fd5'
)
self.assertEqual(
resource.field_id,
'a53db546-bac0-472c-be4b-5bf9f6117581'
)
self.assertEqual(resource.value, 'm1.small')
self.assertEqual(resource.cost, 0.5)
def test_update_a_mapping(self):
resource = self.mgr.get(
mapping_id='bff0d209-a8e4-46f8-8c1a-f231db375dcb'
)
resource.cost = 0.2
self.mgr.update(**resource.dirty_fields)
expect = [
'PUT', ('/v1/rating/module_config/hashmap/mappings/'
'bff0d209-a8e4-46f8-8c1a-f231db375dcb'),
{u'mapping_id': u'bff0d209-a8e4-46f8-8c1a-f231db375dcb',
u'cost': 0.2, u'type': u'flat',
u'service_id': u'2451c2e0-2c6b-4e75-987f-93661eef0fd5',
u'field_id': u'a53db546-bac0-472c-be4b-5bf9f6117581',
u'value': u'm1.small'}
]
self.http_client.assert_called(*expect)
class GroupManagerTest(utils.BaseTestCase):
def setUp(self):
super(GroupManagerTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = hashmap.GroupManager(self.api)
def test_get_a_group(self):
resource = self.mgr.get(
group_id='aaa1c2e0-2c6b-4e75-987f-93661eef0fd5'
)
expect = [
'GET', ('/v1/rating/module_config/hashmap/groups/'
'aaa1c2e0-2c6b-4e75-987f-93661eef0fd5')
]
self.http_client.assert_called(*expect)
self.assertEqual(resource.group_id,
'aaa1c2e0-2c6b-4e75-987f-93661eef0fd5')
self.assertEqual(resource.name, 'object_consumption')
def test_delete_a_group(self):
self.mgr.delete(group_id='aaa1c2e0-2c6b-4e75-987f-93661eef0fd5')
expect = [
'DELETE', ('/v1/rating/module_config/hashmap/groups/'
'aaa1c2e0-2c6b-4e75-987f-93661eef0fd5')
]
self.http_client.assert_called(*expect)
def test_delete_a_group_recursively(self):
self.mgr.delete(group_id='aaa1c2e0-2c6b-4e75-987f-93661eef0fd5',
recursive=True)
expect = [
'DELETE', ('/v1/rating/module_config/hashmap/groups/'
'aaa1c2e0-2c6b-4e75-987f-93661eef0fd5?recursive=True')
]
self.http_client.assert_called(*expect)
class GroupTest(utils.BaseTestCase):
def setUp(self):
super(GroupTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = hashmap.GroupManager(self.api)
def test_delete(self):
self.group = self.mgr.get(
group_id='aaa1c2e0-2c6b-4e75-987f-93661eef0fd5'
)
self.group.delete()
# DELETE /v1/rating/groups/aaa1c2e0-2c6b-4e75-987f-93661eef0fd5
expect = [
'DELETE', ('/v1/rating/module_config/hashmap/groups/'
'aaa1c2e0-2c6b-4e75-987f-93661eef0fd5')
]
self.http_client.assert_called(*expect)
def test_delete_recursive(self):
self.group = self.mgr.get(
group_id='aaa1c2e0-2c6b-4e75-987f-93661eef0fd5'
)
self.group.delete(recursive=True)
# DELETE
# /v1/rating/groups/aaa1c2e0-2c6b-4e75-987f-93661eef0fd5?recusrive=True
expect = [
'DELETE', ('/v1/rating/module_config/hashmap/groups/'
'aaa1c2e0-2c6b-4e75-987f-93661eef0fd5'
'?recursive=True')
]
self.http_client.assert_called(*expect)
class ThresholdManagerTest(utils.BaseTestCase):
def setUp(self):
super(ThresholdManagerTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = hashmap.ThresholdManager(self.api)
def test_get_a_threshold(self):
resource = self.mgr.get(
threshold_id='1f136864-be73-481f-b9be-4fbda2496f72'
)
expect = [
'GET', ('/v1/rating/module_config/hashmap/thresholds/'
'1f136864-be73-481f-b9be-4fbda2496f72')
]
self.http_client.assert_called(*expect)
self.assertEqual(resource.threshold_id,
'1f136864-be73-481f-b9be-4fbda2496f72')
self.assertEqual(
resource.service_id,
'1329d62f-bd1c-4a88-a75a-07545e41e8d7'
)
self.assertEqual(
resource.field_id,
'c7c28d87-5103-4a05-af7f-e4d0891cb7fc'
)
self.assertEqual(resource.level, 30)
self.assertEqual(resource.cost, 5.98)
def test_update_a_threshold(self):
resource = self.mgr.get(
threshold_id='1f136864-be73-481f-b9be-4fbda2496f72'
)
resource.cost = 5.99
self.mgr.update(**resource.dirty_fields)
expect = [
'PUT', ('/v1/rating/module_config/hashmap/thresholds/'
'1f136864-be73-481f-b9be-4fbda2496f72'),
{u'threshold_id': u'1f136864-be73-481f-b9be-4fbda2496f72',
u'cost': 5.99, u'map_type': u'flat',
u'service_id': u'1329d62f-bd1c-4a88-a75a-07545e41e8d7',
u'field_id': u'c7c28d87-5103-4a05-af7f-e4d0891cb7fc',
u'level': 30}
]
self.http_client.assert_called(*expect)
def test_delete_a_threshold(self):
self.mgr.delete(threshold_id='1f136864-be73-481f-b9be-4fbda2496f72')
expect = [
'DELETE', ('/v1/rating/module_config/hashmap/thresholds/'
'1f136864-be73-481f-b9be-4fbda2496f72')
]
self.http_client.assert_called(*expect)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:5888")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:5888")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Sinecoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
import sys
import re
import urllib, urllib2
import time
from ..plugins import SoCoPlugin
__all__ = ['Talk']
class TalkerPlugin(SoCoPlugin):
"""
The main use of this plugin is to make your Sonos system speak text. It works by sending a request to the
Google Text To Speech service, downloading an MP3 from the service, then playing the MP3 on the desired Sonos
players. It will pause and resume playback properly if you are listening to music at the time the message is
sent.
SETUP REQUIREMENTS: You must add the path to the Google Text To Speech MP3 to your Sonos music library in order
to obtain the URI for that file. Once this is done, you can find the URI "get_music_library_information()"
method in the soco package.
"""
def __init__(self,soco,mp3Path,sonosURI,zoneNames=None,maxAttempts=5):
"""
:param soco: soco instance per soco plugin instructions
:param mp3Path: The path you wish for the TTS message to be saved.
:param sonosURI: URI of mp3 file. This should point to the same file that exists at mp3Path
:param zoneNames: List of Sonos player names you wish for your message to play on. i.e. ['Kitchen','Office'].
If nothing is passed, the message will play on all Sonos players.
:param maxAttempts: Number of attempts to run soco.discover(). I found that regardless of the timeout passed to
soco.discover(), it may still fail, but multiple attempts usually works.
:return: TalkerPlugin object
"""
self.sonosURI = sonosURI
self.mp3Path = mp3Path
discovered = None
iter=0
while discovered is None and iter < maxAttempts:
discovered = soco.discover(timeout=2)
iter += 1
assert discovered is not None, 'Connection to Sonos system failed.'
zoneList = []
nameList = []
for zone in discovered:
zoneList.append(zone)
nameList.append(zone.player_name)
if zoneNames:
assert type(zoneNames) == list and all([zone in nameList for zone in zoneNames]), \
'Speaker object must be instantiated with a list of existing zone names on your network'
speakingSoCos = [zone for zone in zoneList if zone.player_name in zoneNames]
else:
speakingSoCos = zoneList
self.masterSoCo = speakingSoCos[0]
speakingSoCos.pop(0)
self.slaveSoCos = speakingSoCos
# if setup is True:
# self._setAudioDirectory()
super(TalkerPlugin, self).__init__(soco)
def talk(self,talkString='This is a test. Testing 1 2 3',volume=25):
"""
:param talkString: String you wish your Sonos system to speak
:param volume: Volume you wish for your Sonos system to speak at. The volume will be set back to the previous
value after the message has been spoken
:return: None
"""
self._formGroup()
tts = GoogleTTS()
text_lines = tts.convertTextAsLinesOfText(talkString)
tts.downloadAudioFile(text_lines,'en',open(self.mp3Path,'wb'))
oldvolumes = [self.masterSoCo.volume]
oldtracks = [self.masterSoCo.get_current_track_info()]
oldqueues = [self.masterSoCo.get_queue()]
oldStates = [self.masterSoCo.get_current_transport_info()]
allSoCos = [self.masterSoCo]
for SoCo in self.slaveSoCos:
oldvolumes.append(SoCo.volume)
oldtracks.append(SoCo.get_current_track_info())
oldqueues.append(SoCo.get_queue())
oldStates.append(SoCo.get_current_transport_info())
allSoCos.append(SoCo)
self.masterSoCo.volume = volume
self.masterSoCo.play_uri(self.sonosURI,title=u'Python Talking Script')
# self.masterSoCo.get_current_track_info()['duration']
time.sleep(float(time.strptime(self.masterSoCo.get_current_track_info()['duration'],'%H:%M:%S').tm_sec))
for ind,SoCo in enumerate(allSoCos):
SoCo.volume=oldvolumes[ind]
if oldStates[ind]['current_transport_state'] == 'PLAYING':
SoCo.play_from_queue(int(oldtracks[ind]['playlist_position'])-1)
SoCo.seek(oldtracks[ind]['position'])
self._delGroup()
def _formGroup(self):
for SoCo in self.slaveSoCos:
SoCo.join(self.masterSoCo)
def _delGroup(self):
for SoCo in self.slaveSoCos:
SoCo.unjoin()
class GoogleTTS(object):
"""
Taken from script at https://github.com/JulienD/Google-Text-To-Speech. No license info in repo.
"""
def __init__(self):
pass
def convertTextAsLinesOfText(self,text):
""" This convert a word, a short text, a long text into several parts to
smaller than 100 characters.
"""
# Sanitizes the text.
text = text.replace('\n','')
text_list = re.split('(\,|\.|\;|\:)', text)
# Splits a text into chunks of texts.
text_lines = []
for idx, val in enumerate(text_list):
if (idx % 2 == 0):
text_lines.append(val)
else :
# Combines the string + the punctuation.
joined_text = ''.join((text_lines.pop(),val))
# Checks if the chunk need to be splitted again.
if len(joined_text) < 100:
text_lines.append(joined_text)
else:
subparts = re.split('( )', joined_text)
temp_string = ""
temp_array = []
for part in subparts:
temp_string = temp_string + part
if len(temp_string) > 80:
temp_array.append(temp_string)
temp_string = ""
#append final part
temp_array.append(temp_string)
text_lines.extend(temp_array)
return text_lines
def downloadAudioFile(self,text_lines, language, audio_file):
"""
Donwloads a MP3 from Google Translatea mp3 based on a text and a
language code.
"""
for idx, line in enumerate(text_lines):
query_params = {"tl": language, "q": line, "total": len(text_lines), "idx": idx}
url = "http://translate.google.com/translate_tts?ie=UTF-8" + "&" + self.unicode_urlencode(query_params)
headers = {"Host":"translate.google.com", "User-Agent":"Mozilla 5.10"}
req = urllib2.Request(url, '', headers)
sys.stdout.write('.')
sys.stdout.flush()
if len(line) > 0:
try:
response = urllib2.urlopen(req)
audio_file.write(response.read())
time.sleep(.5)
except urllib2.HTTPError as e:
print ('%s' % e)
print 'Saved MP3 to %s' % (audio_file.name)
audio_file.close()
def unicode_urlencode(self,params):
"""
Encodes params to be injected in an url.
"""
if isinstance(params, dict):
params = params.items()
return urllib.urlencode([(k, isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params])
def testStuff():
import soco
talker = TalkerPlugin(soco,'/Users/Jeff/BitBucket/Personal/Python/SonosExperiments/AudioMessages/talkOutput.mp3',
'x-file-cifs://MACBOOKPRO-5A98/AudioMessages/talkOutput.mp3')
talker.talk(volume='75')
if __name__ == '__main__':
testStuff()
|
|
"""
5_0 to 5_0_5
Revision ID: 62a8d746d13b
Revises: 423a1643f365
Create Date: 2019-08-23 13:36:03.985636
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from manager_rest.storage.models_base import JSONString, UTCDateTime
# revision identifiers, used by Alembic.
revision = '62a8d746d13b'
down_revision = '423a1643f365'
branch_labels = None
depends_on = None
config_table = table(
'config',
column('name', sa.Text),
column('value', JSONString()),
column('schema', JSONString()),
column('is_editable', sa.Boolean),
column('updated_at', UTCDateTime()),
column('scope', sa.Text),
)
def upgrade():
op.add_column(
'executions',
sa.Column('blueprint_id', sa.Text(), nullable=True))
op.add_column(
'deployments',
sa.Column('runtime_only_evaluation', sa.Boolean(), nullable=True))
op.add_column(
'deployment_updates',
sa.Column('runtime_only_evaluation', sa.Boolean(), nullable=True))
op.add_column(
'node_instances',
sa.Column('index', sa.Integer(), nullable=True))
op.bulk_insert(config_table, [
dict(
name='ldap_ca_path',
value=op.inline_literal('null'),
scope='rest',
schema={'type': 'string'},
is_editable=True
)
])
_create_db_nodes_table()
_update_managers_table()
_update_brokers_table()
op.create_index(
op.f('node_instances__node_fk_idx'),
'node_instances',
['_node_fk'],
unique=False)
op.create_index(
op.f('nodes__deployment_fk_idx'),
'nodes',
['_deployment_fk'],
unique=False)
op.create_index(
op.f('executions_ended_at_idx'),
'executions',
['ended_at'],
unique=False,
)
op.create_index(
op.f('executions_token_idx'), 'executions', ['token'], unique=False
)
op.create_index(
op.f('agents__creator_id_idx'), 'agents', ['_creator_id'], unique=False
)
op.create_index(
op.f('agents__node_instance_fk_idx'),
'agents',
['_node_instance_fk'],
unique=False,
)
op.create_index(
op.f('agents_visibility_idx'), 'agents', ['visibility'], unique=False
)
op.create_index(
op.f('blueprints__creator_id_idx'),
'blueprints',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('blueprints_visibility_idx'),
'blueprints',
['visibility'],
unique=False,
)
op.create_index(
op.f('certificates__updater_id_idx'),
'certificates',
['_updater_id'],
unique=False,
)
op.create_index(
op.f('config__updater_id_idx'), 'config', ['_updater_id'], unique=False
)
op.create_index(
op.f('deployment_modifications__creator_id_idx'),
'deployment_modifications',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('deployment_modifications__deployment_fk_idx'),
'deployment_modifications',
['_deployment_fk'],
unique=False,
)
op.create_index(
op.f('deployment_modifications_visibility_idx'),
'deployment_modifications',
['visibility'],
unique=False,
)
op.create_index(
op.f('deployment_update_steps__creator_id_idx'),
'deployment_update_steps',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('deployment_update_steps__deployment_update_fk_idx'),
'deployment_update_steps',
['_deployment_update_fk'],
unique=False,
)
op.create_index(
op.f('deployment_update_steps_visibility_idx'),
'deployment_update_steps',
['visibility'],
unique=False,
)
op.create_index(
op.f('deployment_updates__creator_id_idx'),
'deployment_updates',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('deployment_updates__deployment_fk_idx'),
'deployment_updates',
['_deployment_fk'],
unique=False,
)
op.create_index(
op.f('deployment_updates__execution_fk_idx'),
'deployment_updates',
['_execution_fk'],
unique=False,
)
op.create_index(
op.f('deployment_updates__new_blueprint_fk_idx'),
'deployment_updates',
['_new_blueprint_fk'],
unique=False,
)
op.create_index(
op.f('deployment_updates__old_blueprint_fk_idx'),
'deployment_updates',
['_old_blueprint_fk'],
unique=False,
)
op.create_index(
op.f('deployment_updates_visibility_idx'),
'deployment_updates',
['visibility'],
unique=False,
)
op.create_index(
op.f('deployments__blueprint_fk_idx'),
'deployments',
['_blueprint_fk'],
unique=False,
)
op.create_index(
op.f('deployments__creator_id_idx'),
'deployments',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('deployments__site_fk_idx'),
'deployments',
['_site_fk'],
unique=False,
)
op.create_index(
op.f('deployments_visibility_idx'),
'deployments',
['visibility'],
unique=False,
)
op.create_index(
op.f('events__creator_id_idx'), 'events', ['_creator_id'], unique=False
)
op.create_index(
op.f('events_visibility_idx'), 'events', ['visibility'], unique=False
)
op.create_index(
op.f('executions__creator_id_idx'),
'executions',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('executions__deployment_fk_idx'),
'executions',
['_deployment_fk'],
unique=False,
)
op.create_index(
op.f('executions_visibility_idx'),
'executions',
['visibility'],
unique=False,
)
op.create_index(
op.f('groups_tenants_group_id_idx'),
'groups_tenants',
['group_id'],
unique=False,
)
op.create_index(
op.f('groups_tenants_role_id_idx'),
'groups_tenants',
['role_id'],
unique=False,
)
op.create_index(
op.f('groups_tenants_tenant_id_idx'),
'groups_tenants',
['tenant_id'],
unique=False,
)
op.create_index(
op.f('logs__creator_id_idx'), 'logs', ['_creator_id'], unique=False
)
op.create_index(
op.f('logs_visibility_idx'), 'logs', ['visibility'], unique=False
)
op.create_index(
op.f('managers__ca_cert_id_idx'),
'managers',
['_ca_cert_id'],
unique=False,
)
op.create_index(
op.f('node_instances__creator_id_idx'),
'node_instances',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('node_instances_visibility_idx'),
'node_instances',
['visibility'],
unique=False,
)
op.create_index(
op.f('nodes__creator_id_idx'), 'nodes', ['_creator_id'], unique=False
)
op.create_index(
op.f('nodes_visibility_idx'), 'nodes', ['visibility'], unique=False
)
op.create_index(
op.f('operations__creator_id_idx'),
'operations',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('operations__tasks_graph_fk_idx'),
'operations',
['_tasks_graph_fk'],
unique=False,
)
op.create_index(
op.f('operations_visibility_idx'),
'operations',
['visibility'],
unique=False,
)
op.create_index(
op.f('plugins__creator_id_idx'),
'plugins',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('plugins_visibility_idx'), 'plugins', ['visibility'], unique=False
)
op.create_index(
op.f('plugins_updates__creator_id_idx'),
'plugins_updates',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('plugins_updates__execution_fk_idx'),
'plugins_updates',
['_execution_fk'],
unique=False,
)
op.create_index(
op.f('plugins_updates__original_blueprint_fk_idx'),
'plugins_updates',
['_original_blueprint_fk'],
unique=False,
)
op.create_index(
op.f('plugins_updates__temp_blueprint_fk_idx'),
'plugins_updates',
['_temp_blueprint_fk'],
unique=False,
)
op.create_index(
op.f('plugins_updates_visibility_idx'),
'plugins_updates',
['visibility'],
unique=False,
)
op.create_index(
op.f('rabbitmq_brokers__ca_cert_id_idx'),
'rabbitmq_brokers',
['_ca_cert_id'],
unique=False,
)
op.create_index(
op.f('secrets__creator_id_idx'),
'secrets',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('secrets_visibility_idx'), 'secrets', ['visibility'], unique=False
)
op.create_index(
op.f('sites__creator_id_idx'), 'sites', ['_creator_id'], unique=False
)
op.create_index(
op.f('sites_visibility_idx'), 'sites', ['visibility'], unique=False
)
op.create_index(
op.f('snapshots__creator_id_idx'),
'snapshots',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('snapshots_visibility_idx'),
'snapshots',
['visibility'],
unique=False,
)
op.create_index(
op.f('tasks_graphs__creator_id_idx'),
'tasks_graphs',
['_creator_id'],
unique=False,
)
op.create_index(
op.f('tasks_graphs__execution_fk_idx'),
'tasks_graphs',
['_execution_fk'],
unique=False,
)
op.create_index(
op.f('tasks_graphs_visibility_idx'),
'tasks_graphs',
['visibility'],
unique=False,
)
op.create_index(
op.f('users_tenants_role_id_idx'),
'users_tenants',
['role_id'],
unique=False,
)
op.create_index(
op.f('users_tenants_tenant_id_idx'),
'users_tenants',
['tenant_id'],
unique=False,
)
op.create_index(
op.f('users_tenants_user_id_idx'),
'users_tenants',
['user_id'],
unique=False,
)
op.create_index(
op.f('events_node_id_idx'),
'events',
['node_id'],
unique=False
)
op.create_index(
op.f('executions_is_system_workflow_idx'),
'executions',
['is_system_workflow'],
unique=False
)
op.create_index(
op.f('logs_node_id_idx'),
'logs',
['node_id'],
unique=False
)
op.create_index(
op.f('node_instances_state_idx'),
'node_instances',
['state'],
unique=False
)
op.create_index(
op.f('tasks_graphs_name_idx'),
'tasks_graphs',
['name'],
unique=False
)
op.create_index(
'deployments__sife_fk_visibility_idx',
'deployments',
['_blueprint_fk', '_site_fk', 'visibility', '_tenant_id'],
unique=False
)
op.create_index(
'events_node_id_visibility_idx',
'events',
['node_id', 'visibility'],
unique=False
)
op.create_index(
'executions_dep_fk_isw_vis_tenant_id_idx',
'executions',
['_deployment_fk', 'is_system_workflow', 'visibility', '_tenant_id'],
unique=False
)
op.create_index(
'logs_node_id_visibility_execution_fk_idx',
'logs',
['node_id', 'visibility', '_execution_fk'],
unique=False
)
op.create_index(
'node_instances_state_visibility_idx',
'node_instances',
['state', 'visibility'],
unique=False
)
op.create_index(
'tasks_graphs__execution_fk_name_visibility_idx',
'tasks_graphs',
['_execution_fk', 'name', 'visibility'],
unique=False
)
op.create_primary_key(
'users_roles_pkey',
'users_roles',
['user_id', 'role_id'],
)
def downgrade():
op.drop_index(
'tasks_graphs__execution_fk_name_visibility_idx',
table_name='tasks_graphs'
)
op.drop_index(
'node_instances_state_visibility_idx',
table_name='node_instances'
)
op.drop_index(
'logs_node_id_visibility_execution_fk_idx',
table_name='logs'
)
op.drop_index(
'executions_dep_fk_isw_vis_tenant_id_idx',
table_name='executions'
)
op.drop_index(
'events_node_id_visibility_idx',
table_name='events'
)
op.drop_index(
'deployments__sife_fk_visibility_idx',
table_name='deployments'
)
op.drop_index(op.f('tasks_graphs_name_idx'), table_name='tasks_graphs')
op.drop_index(op.f('node_instances_state_idx'),
table_name='node_instances')
op.drop_index(op.f('logs_node_id_idx'), table_name='logs')
op.drop_index(op.f('executions_is_system_workflow_idx'),
table_name='executions')
op.drop_index(op.f('events_node_id_idx'), table_name='events')
op.drop_index(
op.f('users_tenants_user_id_idx'), table_name='users_tenants'
)
op.drop_index(
op.f('users_tenants_tenant_id_idx'), table_name='users_tenants'
)
op.drop_index(
op.f('users_tenants_role_id_idx'), table_name='users_tenants'
)
op.drop_index(
op.f('tasks_graphs_visibility_idx'), table_name='tasks_graphs'
)
op.drop_index(
op.f('tasks_graphs__execution_fk_idx'), table_name='tasks_graphs'
)
op.drop_index(
op.f('tasks_graphs__creator_id_idx'), table_name='tasks_graphs'
)
op.drop_index(op.f('snapshots_visibility_idx'), table_name='snapshots')
op.drop_index(op.f('snapshots__creator_id_idx'), table_name='snapshots')
op.drop_index(op.f('sites_visibility_idx'), table_name='sites')
op.drop_index(op.f('sites__creator_id_idx'), table_name='sites')
op.drop_index(op.f('secrets_visibility_idx'), table_name='secrets')
op.drop_index(op.f('secrets__creator_id_idx'), table_name='secrets')
op.drop_index(
op.f('rabbitmq_brokers__ca_cert_id_idx'), table_name='rabbitmq_brokers'
)
op.drop_index(
op.f('plugins_updates_visibility_idx'), table_name='plugins_updates'
)
op.drop_index(
op.f('plugins_updates__temp_blueprint_fk_idx'),
table_name='plugins_updates',
)
op.drop_index(
op.f('plugins_updates__original_blueprint_fk_idx'),
table_name='plugins_updates',
)
op.drop_index(
op.f('plugins_updates__execution_fk_idx'), table_name='plugins_updates'
)
op.drop_index(
op.f('plugins_updates__creator_id_idx'), table_name='plugins_updates'
)
op.drop_index(op.f('plugins_visibility_idx'), table_name='plugins')
op.drop_index(op.f('plugins__creator_id_idx'), table_name='plugins')
op.drop_index(op.f('operations_visibility_idx'), table_name='operations')
op.drop_index(
op.f('operations__tasks_graph_fk_idx'), table_name='operations'
)
op.drop_index(op.f('operations__creator_id_idx'), table_name='operations')
op.drop_index(op.f('nodes_visibility_idx'), table_name='nodes')
op.drop_index(op.f('nodes__creator_id_idx'), table_name='nodes')
op.drop_index(
op.f('node_instances_visibility_idx'), table_name='node_instances'
)
op.drop_index(
op.f('node_instances__creator_id_idx'), table_name='node_instances'
)
op.drop_index(op.f('managers__ca_cert_id_idx'), table_name='managers')
op.drop_index(op.f('logs_visibility_idx'), table_name='logs')
op.drop_index(op.f('logs__creator_id_idx'), table_name='logs')
op.drop_index(
op.f('groups_tenants_tenant_id_idx'), table_name='groups_tenants'
)
op.drop_index(
op.f('groups_tenants_role_id_idx'), table_name='groups_tenants'
)
op.drop_index(
op.f('groups_tenants_group_id_idx'), table_name='groups_tenants'
)
op.drop_index(op.f('executions_visibility_idx'), table_name='executions')
op.drop_index(
op.f('executions__deployment_fk_idx'), table_name='executions'
)
op.drop_index(op.f('executions__creator_id_idx'), table_name='executions')
op.drop_index(op.f('events_visibility_idx'), table_name='events')
op.drop_index(op.f('events__creator_id_idx'), table_name='events')
op.drop_index(op.f('deployments_visibility_idx'), table_name='deployments')
op.drop_index(op.f('deployments__site_fk_idx'), table_name='deployments')
op.drop_index(
op.f('deployments__creator_id_idx'), table_name='deployments'
)
op.drop_index(
op.f('deployments__blueprint_fk_idx'), table_name='deployments'
)
op.drop_index(
op.f('deployment_updates_visibility_idx'),
table_name='deployment_updates',
)
op.drop_index(
op.f('deployment_updates__old_blueprint_fk_idx'),
table_name='deployment_updates',
)
op.drop_index(
op.f('deployment_updates__new_blueprint_fk_idx'),
table_name='deployment_updates',
)
op.drop_index(
op.f('deployment_updates__execution_fk_idx'),
table_name='deployment_updates',
)
op.drop_index(
op.f('deployment_updates__deployment_fk_idx'),
table_name='deployment_updates',
)
op.drop_index(
op.f('deployment_updates__creator_id_idx'),
table_name='deployment_updates',
)
op.drop_index(
op.f('deployment_update_steps_visibility_idx'),
table_name='deployment_update_steps',
)
op.drop_index(
op.f('deployment_update_steps__deployment_update_fk_idx'),
table_name='deployment_update_steps',
)
op.drop_index(
op.f('deployment_update_steps__creator_id_idx'),
table_name='deployment_update_steps',
)
op.drop_index(
op.f('deployment_modifications_visibility_idx'),
table_name='deployment_modifications',
)
op.drop_index(
op.f('deployment_modifications__deployment_fk_idx'),
table_name='deployment_modifications',
)
op.drop_index(
op.f('deployment_modifications__creator_id_idx'),
table_name='deployment_modifications',
)
op.drop_index(op.f('config__updater_id_idx'), table_name='config')
op.drop_index(
op.f('certificates__updater_id_idx'), table_name='certificates'
)
op.drop_index(op.f('blueprints_visibility_idx'), table_name='blueprints')
op.drop_index(op.f('blueprints__creator_id_idx'), table_name='blueprints')
op.drop_index(op.f('agents_visibility_idx'), table_name='agents')
op.drop_index(op.f('agents__node_instance_fk_idx'), table_name='agents')
op.drop_index(op.f('agents__creator_id_idx'), table_name='agents')
op.drop_index(op.f('executions_token_idx'), table_name='executions')
op.drop_index(op.f('executions_ended_at_idx'), table_name='executions')
op.drop_index(op.f('nodes__deployment_fk_idx'), table_name='nodes')
op.drop_index(
op.f('node_instances__node_fk_idx'), table_name='node_instances'
)
op.drop_column('deployment_updates', 'runtime_only_evaluation')
op.drop_column('deployments', 'runtime_only_evaluation')
op.drop_column('executions', 'blueprint_id')
op.drop_column('node_instances', 'index')
op.execute(
config_table
.delete()
.where(
(config_table.c.name == op.inline_literal('ldap_ca_path')) &
(config_table.c.scope == op.inline_literal('rest'))
)
)
op.drop_constraint(
op.f('rabbitmq_brokers_node_id_key'),
'rabbitmq_brokers',
type_='unique'
)
op.drop_column('rabbitmq_brokers', 'node_id')
op.drop_column('rabbitmq_brokers', 'is_external')
op.drop_constraint(
op.f('managers_node_id_key'),
'managers',
type_='unique'
)
op.drop_column('managers', 'node_id')
op.drop_index(op.f('managers_last_seen_idx'), table_name='managers')
op.drop_column('managers', 'last_seen')
op.drop_column('managers', 'status_report_frequency')
op.drop_table('db_nodes')
op.drop_constraint(
'users_roles_pkey',
'users_roles',
)
def _update_managers_table():
op.add_column('managers', sa.Column('node_id', sa.Text(), nullable=True))
op.add_column('managers',
sa.Column('last_seen', UTCDateTime(), nullable=False,
server_default=sa.func.current_timestamp()))
op.add_column('managers',
sa.Column('status_report_frequency', sa.Integer(),
nullable=True))
op.execute("""
UPDATE managers
SET node_id = hostname;
""")
op.alter_column('managers', 'node_id', nullable=False)
op.create_unique_constraint(op.f('managers_node_id_key'), 'managers',
['node_id'])
op.create_index(op.f('managers_last_seen_idx'), 'managers', ['last_seen'],
unique=False)
def _update_brokers_table():
op.add_column('rabbitmq_brokers',
sa.Column('is_external',
sa.Boolean(),
nullable=False,
server_default='f'))
op.add_column('rabbitmq_brokers',
sa.Column('node_id', sa.Text(), nullable=True))
op.execute("""
UPDATE rabbitmq_brokers
SET node_id = name;
""")
op.alter_column('rabbitmq_brokers', 'node_id', nullable=False)
op.create_unique_constraint(op.f('rabbitmq_brokers_node_id_key'),
'rabbitmq_brokers', ['node_id'])
def _create_db_nodes_table():
op.create_table(
'db_nodes',
sa.Column('name', sa.Text(), nullable=False),
sa.Column('node_id', sa.Text(), nullable=False),
sa.Column('host', sa.Text(), nullable=False),
sa.Column('is_external', sa.Boolean(), nullable=False,
server_default='f'),
sa.PrimaryKeyConstraint('name', name=op.f('db_nodes_pkey')),
sa.UniqueConstraint('node_id', name=op.f('db_nodes_node_id_key')),
sa.UniqueConstraint('host', name=op.f('db_nodes_host_key'))
)
|
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Module tests."""
from __future__ import absolute_import, print_function
import json
import os
import mock
import pytest
from flask import Flask
from jsonresolver import JSONResolver
from jsonresolver.contrib.jsonschema import ref_resolver_factory
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from invenio_jsonschemas import InvenioJSONSchemas, InvenioJSONSchemasAPI, \
InvenioJSONSchemasUI
from invenio_jsonschemas.config import JSONSCHEMAS_URL_SCHEME
from invenio_jsonschemas.errors import JSONSchemaDuplicate, JSONSchemaNotFound
def test_version():
"""Test version import."""
from invenio_jsonschemas import __version__
assert __version__
def test_init(app):
"""Test extension initialization."""
app = Flask('testapp')
ext = InvenioJSONSchemas(app)
assert 'invenio-jsonschemas' in app.extensions
app = Flask('testapp')
app.config['JSONSCHEMAS_REGISTER_ENDPOINTS_UI'] = True
ext = InvenioJSONSchemasUI(app)
assert 'invenio-jsonschemas' in app.extensions
app = Flask('testapp')
app.config['JSONSCHEMAS_REGISTER_ENDPOINTS_API'] = True
ext = InvenioJSONSchemasAPI(app)
assert 'invenio-jsonschemas' in app.extensions
app = Flask('testapp')
ext = InvenioJSONSchemas()
assert 'invenio-jsonschemas' not in app.extensions
ext.init_app(app)
assert 'invenio-jsonschemas' in app.extensions
schema_template = """{{
"type": "object",
"properties": {{
"{}": {{ "type": "number" }}
}}
}}"""
def build_schemas(id):
"""Generate a dictionary of "file path" -> "JSON schema"."""
return {
'rootschema_{}.json'.format(id):
schema_template.format('rootschema_{}'.format(id)),
'sub1/subschema_{}.json'.format(id):
schema_template.format('subschema_1_{}'.format(id)),
'sub2/subschema_{}.json'.format(id):
schema_template.format('subschema_2_{}'.format(id)),
'sub3/subschema_{}.json'.format(id):
schema_template.format('subschema_3_{}'.format(id)),
}
def test_api(app, dir_factory):
"""Test API."""
ext = InvenioJSONSchemas(app, entry_point_group=None)
schema_files = build_schemas(1)
with dir_factory(schema_files) as directory:
ext.register_schemas_dir(directory)
for path in schema_files.keys():
# test get_schema_dir
assert ext.get_schema_dir(path) == directory
# test get_schema_path
assert ext.get_schema_path(path) == \
os.path.join(directory, path)
# test get_schema
assert ext.get_schema(path) == json.loads(schema_files[path])
# test list_schemas
assert set(schema_files.keys()) == set(ext.list_schemas())
# test failure when asking for non existing schemas fails
with pytest.raises(JSONSchemaNotFound) as exc_info:
ext.get_schema('not_existing_schema.json')
assert exc_info.value.schema == 'not_existing_schema.json'
# test failure when asking for non existing schemas' path
with pytest.raises(JSONSchemaNotFound) as exc_info:
ext.get_schema_path('not_existing_schema.json')
assert exc_info.value.schema == 'not_existing_schema.json'
class mock_open(object):
"""Mock the builtin 'open' and count the file requests."""
counter = 0
def __init__(self, path):
"""Initialize the open with a path."""
self.path = path
def __enter__(self, *args, **kwargs):
"""Context enter."""
self.f = open(self.path)
mock_open.counter += 1
return self.f
def __exit__(self, *args, **kwargs):
"""Context exit."""
self.f.close()
def test_cache(app, dir_factory):
"""Test cached schema loading."""
m = mock_open
with mock.patch('invenio_jsonschemas.ext.open', m):
ext = InvenioJSONSchemas(app, entry_point_group=None)
schema_files = build_schemas(1)
with dir_factory(schema_files) as directory:
ext.register_schemas_dir(directory)
assert m.counter == 0
ext.get_schema('rootschema_1.json')
assert m.counter == 1
ext.get_schema('rootschema_1.json')
ext.get_schema('rootschema_1.json')
assert m.counter == 1
ext.get_schema('sub1/subschema_1.json')
assert m.counter == 2
ext.get_schema('sub1/subschema_1.json')
assert m.counter == 2
def test_register_schema(app, dir_factory):
"""Test register schema."""
ext = InvenioJSONSchemas(app, entry_point_group=None)
schema_files = build_schemas(1)
with dir_factory(schema_files) as directory:
registered_schemas = set(list(schema_files.keys())[:1])
nonregistered_schema = [s for s in schema_files if s not in
registered_schemas]
for schema in registered_schemas:
ext.register_schema(directory, schema)
assert set(ext.list_schemas()) == registered_schemas
for schema in nonregistered_schema:
with pytest.raises(JSONSchemaNotFound):
ext.get_schema(schema)
def test_redefine(app, dir_factory):
"""Test redefine."""
ext = InvenioJSONSchemas(app, entry_point_group=None)
schema_files = build_schemas(1)
with dir_factory(schema_files) as dir1, \
dir_factory(schema_files) as dir2:
ext.register_schemas_dir(dir1)
# register schemas from a directory which have the same relative
# paths
with pytest.raises(JSONSchemaDuplicate) as exc_info:
ext.register_schemas_dir(dir2)
assert exc_info.value.schema in schema_files.keys()
def test_view(app, pkg_factory, mock_entry_points):
"""Test view."""
schema_files_1 = build_schemas(1)
schema_files_2 = build_schemas(2)
schema_files_3 = build_schemas(3)
all_schemas = dict()
all_schemas.update(schema_files_1)
all_schemas.update(schema_files_2)
all_schemas.update(schema_files_3)
entry_point_group = 'invenio_jsonschema_test_entry_point'
endpoint = '/testschemas'
app.config['JSONSCHEMAS_ENDPOINT'] = endpoint
with pkg_factory(schema_files_1) as pkg1, \
pkg_factory(schema_files_2) as pkg2, \
pkg_factory(schema_files_3) as pkg3:
mock_entry_points.add(entry_point_group, 'entry1', pkg1)
mock_entry_points.add(entry_point_group, 'entry2', pkg2)
mock_entry_points.add(entry_point_group, 'entry3', pkg3)
# Test an alternative way of initializing the app
# with InvenioJSONSchemas
ext = InvenioJSONSchemas(entry_point_group=entry_point_group)
ext = ext.init_app(app)
# Test if all the schemas are correctly found
assert set(ext.list_schemas()) == set(all_schemas.keys())
with app.test_client() as client:
for name, schema in all_schemas.items():
res = client.get("{0}/{1}".format(endpoint, name))
assert res.status_code == 200
assert json.loads(schema) == \
json.loads(res.get_data(as_text=True))
res = client.get("{0}/nonexisting".format(endpoint))
assert res.status_code == 404
def test_replace_refs_in_view(app, pkg_factory, mock_entry_points):
"""Test replace refs config in view."""
schemas = {
'root.json': '{"$ref": "sub/schema.json"}',
'sub/schema.json': schema_template.format('test')
}
entry_point_group = 'invenio_jsonschema_test_entry_point'
endpoint = '/testschemas'
app.config['JSONSCHEMAS_ENDPOINT'] = endpoint
with pkg_factory(schemas) as pkg1:
mock_entry_points.add(entry_point_group, 'entry1', pkg1)
ext = InvenioJSONSchemas(entry_point_group=entry_point_group)
ext = ext.init_app(app)
with app.test_client() as client:
res = client.get('{0}/{1}'.format(endpoint, 'root.json'))
assert res.status_code == 200
assert json.loads(schemas['root.json']) == \
json.loads(res.get_data(as_text=True))
app.config['JSONSCHEMAS_REPLACE_REFS'] = True
res = client.get('{0}/{1}'.format(endpoint, 'root.json'))
assert res.status_code == 200
assert json.loads(schemas['sub/schema.json']) == \
json.loads(res.get_data(as_text=True))
app.config['JSONSCHEMAS_REPLACE_REFS'] = False
res = client.get('{0}/{1}?refs=1'.format(endpoint, 'root.json'))
assert res.status_code == 200
assert json.loads(schemas['sub/schema.json']) == \
json.loads(res.get_data(as_text=True))
def test_replace_resolve_in_view(app, pkg_factory, mock_entry_points):
"""Test replace refs config in view."""
schemas = {
'root.json': '{"type": "object","allOf":'
'[{"$ref": "sub/schema.json"}]}',
'sub/schema.json': schema_template.format('test')
}
entry_point_group = 'invenio_jsonschema_test_entry_point'
endpoint = '/testschemas'
app.config['JSONSCHEMAS_ENDPOINT'] = endpoint
with pkg_factory(schemas) as pkg1:
mock_entry_points.add(entry_point_group, 'entry1', pkg1)
ext = InvenioJSONSchemas(entry_point_group=entry_point_group)
ext = ext.init_app(app)
with app.test_client() as client:
res = client.get('{0}/{1}'.format(endpoint, 'root.json'))
assert res.status_code == 200
assert json.loads(schemas['root.json']) == \
json.loads(res.get_data(as_text=True))
app.config['JSONSCHEMAS_RESOLVE_SCHEMA'] = True
res = client.get('{0}/{1}'.format(endpoint, 'root.json'))
assert res.status_code == 200
assert json.loads(schemas['sub/schema.json']) == \
json.loads(res.get_data(as_text=True))
app.config['JSONSCHEMAS_RESOLVE_SCHEMA'] = False
res = client.get(
'{0}/{1}?resolved=1'.format(endpoint, 'root.json'))
assert res.status_code == 200
assert json.loads(schemas['sub/schema.json']) == \
json.loads(res.get_data(as_text=True))
def test_alternative_entry_point_group_init(app, pkg_factory,
mock_entry_points):
"""Test initializing the entry_point_group after creating the extension."""
schema_files_1 = build_schemas(1)
schema_files_2 = build_schemas(2)
all_schemas = dict()
all_schemas.update(schema_files_1)
all_schemas.update(schema_files_2)
entry_point_group = 'invenio_jsonschema_test_entry_point'
with pkg_factory(schema_files_1) as pkg1, \
pkg_factory(schema_files_2) as pkg2:
mock_entry_points.add(entry_point_group, 'entry1', pkg1)
mock_entry_points.add(entry_point_group, 'entry2', pkg2)
# Test an alternative way of initializing the app and entry_point_group
# with InvenioJSONSchemas
ext = InvenioJSONSchemas()
ext = ext.init_app(app, entry_point_group=entry_point_group)
# Test if all the schemas are correctly found
assert set(ext.list_schemas()) == set(all_schemas.keys())
def mock_get_schema(self, path):
"""Mock the ``get_schema`` method of InvenioJSONSchemasState."""
assert path == 'some_schema.json'
ret_schema = {
"$schema": "http://json-schema.org/schema#",
"id": "http://localhost/schemas/some_schema.json",
"type": "object",
"properties": {
"foo": {"type": "string", },
"bar": {"type": "integer", },
}
}
return ret_schema
@mock.patch('invenio_jsonschemas.ext.InvenioJSONSchemasState.get_schema',
mock_get_schema)
def test_jsonresolver():
"""Test extension initialization."""
app = Flask('testapp')
InvenioJSONSchemas(app)
assert 'invenio-jsonschemas' in app.extensions
with app.app_context():
json_resolver = JSONResolver(
plugins=['invenio_jsonschemas.jsonresolver', ])
schema = {'$ref': 'http://localhost/schemas/some_schema.json'}
resolver_cls = ref_resolver_factory(json_resolver)
resolver = resolver_cls.from_schema(schema)
with pytest.raises(ValidationError) as exc_info:
validate({'foo': 'foo_value', 'bar': "not_an_int"}, schema,
resolver=resolver)
assert exc_info.value.schema == {'type': 'integer'}
@pytest.mark.parametrize('url_scheme', [
None, 'http', 'https'
])
def test_url_mapping(app, dir_factory, url_scheme):
"""Test register schema."""
app.config['SERVER_NAME'] = 'example.org'
app.config['JSONSCHEMAS_HOST'] = 'inveniosoftware.org'
if url_scheme is not None:
app.config['JSONSCHEMAS_URL_SCHEME'] = url_scheme
else:
# test with default url scheme configuration
url_scheme = JSONSCHEMAS_URL_SCHEME
ext = InvenioJSONSchemas(app, entry_point_group=None)
schema_files = build_schemas(1)
with dir_factory(schema_files) as directory:
ext.register_schemas_dir(directory)
with app.app_context():
assert 'sub1/subschema_1.json' == ext.url_to_path(
'{0}://inveniosoftware.org/schemas/sub1/subschema_1.json'
.format(url_scheme))
assert ext.url_to_path(
'{0}://inveniosoftware.org/schemas/invalid.json'
.format(url_scheme)) is None
assert ext.url_to_path(
'{0}://example.org/schemas/sub1/subschema_1.json'
.format(url_scheme)) is None
assert (
'{0}://inveniosoftware.org/schemas/sub1/subschema_1.json'
.format(url_scheme)
) == ext.path_to_url('sub1/subschema_1.json')
assert ext.path_to_url('invalid.json') is None
|
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# --------------------------------------------------------------------------
import logging
import os
import sys
import pytest
# module under test
import azure.core.settings as m
class TestPrioritizedSetting(object):
def test_env_var_property(self):
ps = m.PrioritizedSetting("foo", env_var="AZURE_FOO")
assert ps.env_var == "AZURE_FOO"
def test_everything_unset_raises(self):
ps = m.PrioritizedSetting("foo")
with pytest.raises(RuntimeError):
ps()
def test_implict_default(self):
ps = m.PrioritizedSetting("foo", default=10)
assert ps() == 10
def test_implict_default_converts(self):
ps = m.PrioritizedSetting("foo", convert=int, default="10")
assert ps() == 10
def test_system_hook(self):
ps = m.PrioritizedSetting("foo", system_hook=lambda: 20)
assert ps() == 20
def test_system_hook_converts(self):
ps = m.PrioritizedSetting("foo", convert=int, system_hook=lambda: "20")
assert ps() == 20
def test_env_var(self):
os.environ["AZURE_FOO"] = "30"
ps = m.PrioritizedSetting("foo", env_var="AZURE_FOO")
assert ps() == "30"
del os.environ["AZURE_FOO"]
def test_env_var_converts(self):
os.environ["AZURE_FOO"] = "30"
ps = m.PrioritizedSetting("foo", convert=int, env_var="AZURE_FOO")
assert ps() == 30
del os.environ["AZURE_FOO"]
def test_user_set(self):
ps = m.PrioritizedSetting("foo")
ps.set_value(40)
assert ps() == 40
def test_user_unset(self):
ps = m.PrioritizedSetting("foo", default=2)
ps.set_value(40)
assert ps() == 40
ps.unset_value()
assert ps() == 2
def test_user_set_converts(self):
ps = m.PrioritizedSetting("foo", convert=int)
ps.set_value("40")
assert ps() == 40
def test_immediate(self):
ps = m.PrioritizedSetting("foo")
assert ps(50) == 50
def test_immediate_converts(self):
ps = m.PrioritizedSetting("foo", convert=int)
assert ps("50") == 50
def test_precedence(self):
# 0. implicit default
ps = m.PrioritizedSetting("foo", env_var="AZURE_FOO", convert=int, default=10)
assert ps() == 10
# 1. system value
ps = m.PrioritizedSetting("foo", env_var="AZURE_FOO", convert=int, default=10, system_hook=lambda: 20)
assert ps() == 20
# 2. environment variable
os.environ["AZURE_FOO"] = "30"
assert ps() == 30
# 3. previously user-set value
ps.set_value(40)
assert ps() == 40
# 4. immediate values
assert ps(50) == 50
del os.environ["AZURE_FOO"]
def test___str__(self):
ps = m.PrioritizedSetting("foo")
assert str(ps) == "PrioritizedSetting(%r)" % "foo"
def test_descriptors(self):
class FakeSettings(object):
foo = m.PrioritizedSetting("foo", env_var="AZURE_FOO")
bar = m.PrioritizedSetting("bar", env_var="AZURE_BAR", default=10)
s = FakeSettings()
assert s.foo is FakeSettings.foo
assert s.bar() == 10
s.bar = 20
assert s.bar() == 20
class TestConverters(object):
@pytest.mark.parametrize("value", ["Yes", "YES", "yes", "1", "ON", "on", "true", "True", True])
def test_convert_bool(self, value):
assert m.convert_bool(value)
@pytest.mark.parametrize("value", ["No", "NO", "no", "0", "OFF", "off", "false", "False", False])
def test_convert_bool_false(self, value):
assert not m.convert_bool(value)
@pytest.mark.parametrize("value", [True, False])
def test_convert_bool_identity(self, value):
assert m.convert_bool(value) == value
def test_convert_bool_bad(self):
with pytest.raises(ValueError):
m.convert_bool("junk")
@pytest.mark.parametrize("value", ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"])
def test_convert_logging_good(self, value):
assert m.convert_logging(value) == getattr(logging, value)
# check lowercase works too
assert m.convert_logging(value.lower()) == getattr(logging, value)
@pytest.mark.parametrize("value", ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"])
def test_convert_logging_identity(self, value):
level = getattr(logging, value)
assert m.convert_logging(level) == level
def test_convert_logging_bad(self):
with pytest.raises(ValueError):
m.convert_logging("junk")
_standard_settings = ["log_level", "tracing_enabled"]
class TestStandardSettings(object):
@pytest.mark.parametrize("name", _standard_settings)
def test_setting_exists(self, name):
assert hasattr(m.settings, name)
# XXX: This test will need to become more sophisticated if the assumption
# settings.foo -> AZURE_FOO for env vars ever becomes invalidated.
@pytest.mark.parametrize("name", _standard_settings)
def test_setting_env_var(self, name):
ps = getattr(m.settings, name)
assert ps.env_var == "AZURE_" + name.upper()
def test_init(self):
assert m.settings.defaults_only == False
def test_config(self):
val = m.settings.config(log_level=30, tracing_enabled=True)
assert isinstance(val, tuple)
assert val.tracing_enabled == True
assert val.log_level == 30
os.environ["AZURE_LOG_LEVEL"] = "debug"
val = m.settings.config(tracing_enabled=False)
assert val.tracing_enabled == False
assert val.log_level == 10
val = m.settings.config(log_level=30, tracing_enabled=False)
assert val.tracing_enabled == False
assert val.log_level == 30
del os.environ["AZURE_LOG_LEVEL"]
def test_defaults(self):
val = m.settings.defaults
# assert isinstance(val, tuple)
defaults = m.settings.config(
log_level=20, tracing_enabled=False, tracing_implementation=None
)
assert val.log_level == defaults.log_level
assert val.tracing_enabled == defaults.tracing_enabled
assert val.tracing_implementation == defaults.tracing_implementation
os.environ["AZURE_LOG_LEVEL"] = "debug"
defaults = m.settings.config(
log_level=20, tracing_enabled=False, tracing_implementation=None
)
assert val.log_level == defaults.log_level
assert val.tracing_enabled == defaults.tracing_enabled
assert val.tracing_implementation == defaults.tracing_implementation
del os.environ["AZURE_LOG_LEVEL"]
def test_current(self):
os.environ["AZURE_LOG_LEVEL"] = "debug"
val = m.settings.current
assert isinstance(val, tuple)
assert val.log_level == 10
del os.environ["AZURE_LOG_LEVEL"]
|
|
# Copyright (C) 2013 Yahoo! Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import os
import re
import sys
import fixtures
from keystoneauth1 import session
import mock
import six
import testtools
from testtools import matchers
from neutronclient.common import clientmanager
from neutronclient import shell as openstack_shell
DEFAULT_USERNAME = 'username'
DEFAULT_PASSWORD = 'password'
DEFAULT_TENANT_ID = 'tenant_id'
DEFAULT_TENANT_NAME = 'tenant_name'
DEFAULT_AUTH_URL = 'http://127.0.0.1:5000/v2.0/'
DEFAULT_TOKEN = '3bcc3d3a03f44e3d8377f9247b0ad155'
DEFAULT_URL = 'http://quantum.example.org:9696/'
DEFAULT_REGION = 'regionOne'
DEFAULT_ENDPOINT_TYPE = 'public'
DEFAULT_API_VERSION = '2.0'
DEFAULT_SERVICE_TYPE = 'network'
DEFAULT_SERVICE_NAME = 'neutron'
DEFAULT_RETRIES = 3
DEFAULT_TIMEOUT = 3.0
class ShellTest(testtools.TestCase):
FAKE_ENV = {
'OS_USERNAME': DEFAULT_USERNAME,
'OS_PASSWORD': DEFAULT_PASSWORD,
'OS_TENANT_ID': DEFAULT_TENANT_ID,
'OS_TENANT_NAME': DEFAULT_TENANT_NAME,
'OS_AUTH_URL': DEFAULT_AUTH_URL,
'OS_REGION_NAME': None,
'HTTP_PROXY': None,
'http_proxy': None,
}
# Patch os.environ to avoid required auth info.
def setUp(self):
super(ShellTest, self).setUp()
for var in self.FAKE_ENV:
self.useFixture(
fixtures.EnvironmentVariable(
var, self.FAKE_ENV[var]))
def shell(self, argstr, check=False, expected_val=0):
# expected_val is the expected return value after executing
# the command in NeutronShell
orig = (sys.stdout, sys.stderr)
clean_env = {}
_old_env, os.environ = os.environ, clean_env.copy()
try:
sys.stdout = six.moves.cStringIO()
sys.stderr = six.moves.cStringIO()
_shell = openstack_shell.NeutronShell('2.0')
_shell.run(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_val, exc_value.code)
finally:
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
sys.stdout.close()
sys.stderr.close()
sys.stdout, sys.stderr = orig
os.environ = _old_env
return stdout, stderr
def test_run_unknown_command(self):
self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
stdout, stderr = self.shell('fake', check=True)
self.assertFalse(stdout)
self.assertEqual("Unknown command ['fake']", stderr.strip())
def test_help(self):
required = 'usage:'
help_text, stderr = self.shell('help')
self.assertThat(
help_text,
matchers.MatchesRegex(required))
def test_bash_completion(self):
required = '.*os_user_domain_id.*'
bash_completion, stderr = self.shell('bash-completion')
self.assertThat(
bash_completion,
matchers.MatchesRegex(required))
def test_help_on_subcommand(self):
required = [
'.*?^usage: .* quota-list']
stdout, stderr = self.shell('help quota-list')
for r in required:
self.assertThat(
stdout,
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
def test_help_command(self):
required = 'usage:'
help_text, stderr = self.shell('help network-create')
self.assertThat(
help_text,
matchers.MatchesRegex(required))
def test_bash_completion_in_outputs_of_help_command(self):
help_text, stderr = self.shell('help')
completion_cmd = "bash-completion"
completion_help_str = ("Prints all of the commands and options "
"for bash-completion.")
self.assertIn(completion_cmd, help_text)
self.assertIn(completion_help_str, help_text)
def test_bash_completion_command(self):
# just check we have some output
required = [
'.*--tenant_id',
'.*help',
'.*--dns-nameserver']
help_text, stderr = self.shell('neutron bash-completion')
for r in required:
self.assertThat(help_text,
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
def test_build_option_parser(self):
neutron_shell = openstack_shell.NeutronShell('2.0')
result = neutron_shell.build_option_parser('descr', '2.0')
self.assertIsInstance(result, argparse.ArgumentParser)
@mock.patch.object(openstack_shell.NeutronShell, 'run')
def test_main_with_unicode(self, fake_shell):
unicode_text = u'\u7f51\u7edc'
argv = ['net-list', unicode_text, unicode_text]
fake_shell.return_value = 0
ret = openstack_shell.main(argv=argv)
fake_shell.assert_called_once_with([u'net-list', unicode_text,
unicode_text])
self.assertEqual(0, ret)
def test_endpoint_option(self):
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
# Neither $OS_ENDPOINT_TYPE nor --os-endpoint-type
namespace = parser.parse_args([])
self.assertEqual('public', namespace.os_endpoint_type)
# --endpoint-type but not $OS_ENDPOINT_TYPE
namespace = parser.parse_args(['--os-endpoint-type=admin'])
self.assertEqual('admin', namespace.os_endpoint_type)
def test_endpoint_environment_variable(self):
fixture = fixtures.EnvironmentVariable("OS_ENDPOINT_TYPE",
"public")
self.useFixture(fixture)
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
# $OS_ENDPOINT_TYPE but not --endpoint-type
namespace = parser.parse_args([])
self.assertEqual("public", namespace.os_endpoint_type)
# --endpoint-type and $OS_ENDPOINT_TYPE
namespace = parser.parse_args(['--endpoint-type=admin'])
self.assertEqual('admin', namespace.endpoint_type)
def test_timeout_option(self):
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
# Neither $OS_ENDPOINT_TYPE nor --endpoint-type
namespace = parser.parse_args([])
self.assertIsNone(namespace.http_timeout)
# --endpoint-type but not $OS_ENDPOINT_TYPE
namespace = parser.parse_args(['--http-timeout=50'])
self.assertEqual(50, namespace.http_timeout)
def test_timeout_environment_variable(self):
fixture = fixtures.EnvironmentVariable("OS_NETWORK_TIMEOUT",
"50")
self.useFixture(fixture)
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
namespace = parser.parse_args([])
self.assertEqual(50, namespace.http_timeout)
def test_run_incomplete_command(self):
self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
cmd = (
'--os-username test --os-password test --os-project-id test '
'--os-auth-strategy keystone --os-auth-url '
'%s port-create' %
DEFAULT_AUTH_URL)
stdout, stderr = self.shell(cmd, check=True, expected_val=2)
search_str = "Try 'neutron help port-create' for more information"
self.assertTrue(any(search_str in string for string
in stderr.split('\n')))
def _test_authenticate_user(self, expect_verify, expect_insecure,
**options):
base_options = {'os_cloud': None,
'http_timeout': DEFAULT_TIMEOUT,
'region_name': DEFAULT_REGION,
'network_service_name': DEFAULT_SERVICE_NAME,
'neutron_service_type': DEFAULT_SERVICE_TYPE}
options.update(base_options)
if options.get('os_token'):
options.update({'os_token': 'token', 'os_url': 'url'})
else:
options.update({'os_token': None, 'os_url': None})
with mock.patch.object(openstack_shell.NeutronShell,
'run_subcommand'), \
mock.patch.object(session, 'Session') as session_mock, \
mock.patch.object(clientmanager, 'ClientManager') as cmgr_mock:
shell = openstack_shell.NeutronShell(DEFAULT_API_VERSION)
shell.options = mock.Mock(spec=options.keys())
for k, v in options.items():
setattr(shell.options, k, v)
shell.options.os_endpoint_type = DEFAULT_ENDPOINT_TYPE
shell.options.retries = DEFAULT_RETRIES
if not (options.get('os_token') and options.get('os_url')):
auth = mock.ANY
auth_session = mock.sentinel.session
session_mock.return_value = auth_session
else:
auth = None
auth_session = None
shell.authenticate_user()
if not (options.get('os_token') and options.get('os_url')):
session_mock.assert_called_once_with(
auth=mock.ANY, verify=expect_verify,
cert=options.get('cert'),
timeout=DEFAULT_TIMEOUT)
else:
self.assertFalse(session_mock.called)
cmgr_mock.assert_called_once_with(
retries=DEFAULT_RETRIES,
raise_errors=False,
session=auth_session,
url=options.get('os_url'),
token=options.get('os_token'),
region_name=DEFAULT_REGION,
api_version=DEFAULT_API_VERSION,
service_type=DEFAULT_SERVICE_TYPE,
service_name=DEFAULT_SERVICE_NAME,
endpoint_type=DEFAULT_ENDPOINT_TYPE,
auth=auth,
insecure=expect_insecure,
log_credentials=True)
def test_authenticate_secure_with_cacert_with_cert(self):
self._test_authenticate_user(
insecure=False, cacert='cacert', cert='cert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_secure_with_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=False, cacert='cacert', cert='cert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_insecure_with_cacert_with_cert(self):
self._test_authenticate_user(
insecure=True, cacert='cacert', cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_insecure_with_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=True, cacert='cacert', cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_secure_without_cacert_with_cert(self):
self._test_authenticate_user(
insecure=False, cert='cert',
expect_verify=True, expect_insecure=False)
def test_authenticate_secure_without_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=False, cert='cert',
expect_verify=True, expect_insecure=False)
def test_authenticate_insecure_without_cacert_with_cert(self):
self._test_authenticate_user(
insecure=True, cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_insecure_without_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=True, cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_secure_with_cacert_without_cert(self):
self._test_authenticate_user(
insecure=False, cacert='cacert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_secure_with_cacert_without_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=False, cacert='cacert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_insecure_with_cacert_without_cert(self):
self._test_authenticate_user(
insecure=True, cacert='cacert',
expect_verify=False, expect_insecure=True)
def test_authenticate_insecure_with_cacert_without_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=True, cacert='cacert',
expect_verify=False, expect_insecure=True)
|
|
import arrow
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.orm import aliased, contains_eager, joinedload
import sqlalchemy as sa
import web
from libweasyl.models.content import Report, ReportComment
from libweasyl.models.users import Login
from libweasyl import constants, legacy, staff
from weasyl.error import WeasylError
from weasyl import macro as m, define as d, media, note
_CONTENT = 2000
def _convert_violation(target):
violation = [i[2] for i in m.MACRO_REPORT_VIOLATION if i[0] == target]
return violation[0] if violation else 'Unknown'
def _dict_of_targetid(submitid, charid, journalid):
"""
Given a target of some type, return a dictionary indicating what the 'some
type' is. The dictionary's key will be the appropriate column on the Report
model.
"""
if submitid:
return {'target_sub': submitid}
elif charid:
return {'target_char': charid}
elif journalid:
return {'target_journal': journalid}
else:
raise ValueError('no ID given')
# form
# submitid violation
# charid content
# journalid
def create(userid, form):
form.submitid = d.get_int(form.submitid)
form.charid = d.get_int(form.charid)
form.journalid = d.get_int(form.journalid)
form.violation = d.get_int(form.violation)
form.content = form.content.strip()[:_CONTENT]
# get the violation type from allowed types
try:
vtype = next(x for x in m.MACRO_REPORT_VIOLATION if x[0] == form.violation)
except StopIteration:
raise WeasylError("Unexpected")
if not form.submitid and not form.charid and not form.journalid:
raise WeasylError("Unexpected")
elif form.violation == 0:
if userid not in staff.MODS:
raise WeasylError("Unexpected")
elif (form.submitid or form.charid) and not 2000 <= form.violation < 3000:
raise WeasylError("Unexpected")
elif form.journalid and not 3000 <= form.violation < 4000:
raise WeasylError("Unexpected")
elif vtype[3] and not form.content:
raise WeasylError("ReportCommentRequired")
query = d.execute(
"SELECT userid, settings FROM %s WHERE %s = %i",
["submission", "submitid", form.submitid] if form.submitid else
["character", "charid", form.charid] if form.charid else
["journal", "journalid", form.journalid],
options="single")
if not query or (form.violation != 0 and 'h' in query[1]):
raise WeasylError("TargetRecordMissing")
now = arrow.get()
target_dict = _dict_of_targetid(form.submitid, form.charid, form.journalid)
report = Report.query.filter_by(is_closed=False, **target_dict).first()
if report is None:
if form.violation == 0:
raise WeasylError("Unexpected")
urgency = vtype[1]
report = Report(urgency=urgency, opened_at=now, **target_dict)
Report.dbsession.add(report)
Report.dbsession.add(ReportComment(
report=report, violation=form.violation, userid=userid, unixtime=now, content=form.content))
Report.dbsession.flush()
_report_types = [
'_target_sub',
'_target_char',
'_target_journal',
]
def select_list(userid, form):
# Find the unique violation types and the number of reporters. This will be
# joined against the Report model to get the violations/reporters for each
# selected report.
subq = (
ReportComment.dbsession.query(
ReportComment.reportid,
sa.func.count(),
sa.type_coerce(
sa.func.array_agg(ReportComment.violation.distinct()),
ARRAY(sa.Integer, as_tuple=True)).label('violations'))
.filter(ReportComment.violation != 0)
.group_by(ReportComment.reportid)
.subquery())
# Find reports, joining against the aforementioned subquery, and eager-load
# the reports' owners.
q = (
Report.dbsession.query(Report, subq)
.options(joinedload(Report.owner))
.join(subq, Report.reportid == subq.c.reportid)
.reset_joinpoint())
# For each type of report, eagerly load the content reported and the
# content's owner. Also, keep track of the Login model aliases used for each
# report type so they can be filtered against later.
login_aliases = []
for column_name in _report_types:
login_alias = aliased(Login)
login_aliases.append(login_alias)
q = (
q
.outerjoin(getattr(Report, column_name))
.outerjoin(login_alias)
.options(contains_eager(column_name + '.owner', alias=login_alias))
.reset_joinpoint())
# Filter by report status. form.status can also be 'all', in which case no
# filter is applied.
if form.status == 'closed':
q = q.filter_by(is_closed=True)
elif form.status == 'open':
q = q.filter_by(is_closed=False)
# If filtering by the report's content's owner, iterate over the previously
# collected Login model aliases to compare against Login.login_name.
if form.submitter:
submitter = legacy.login_name(form.submitter)
q = q.filter(sa.or_(l.login_name == submitter for l in login_aliases))
# If filtering by violation type, see if the violation is in the array
# aggregate of unique violations for this report.
if form.violation and form.violation != '-1':
q = q.filter(sa.literal(int(form.violation)) == sa.func.any(subq.c.violations))
q = q.order_by(Report.opened_at.desc())
return [(report, report_count, map(_convert_violation, violations))
for report, _, report_count, violations in q.all()]
def select_view(userid, form):
report = (
Report.query
.options(joinedload('comments', innerjoin=True).joinedload('poster', innerjoin=True))
.get_or_404(int(form.reportid)))
report.old_style_comments = [
{
'userid': c.userid,
'username': c.poster.profile.username,
'unixtime': c.unixtime,
'content': c.content,
'violation': _convert_violation(c.violation),
} for c in report.comments]
media.populate_with_user_media(report.old_style_comments)
report.old_style_comments.sort(key=lambda c: c['unixtime'])
return report
_closure_actions = {
'no_action_taken': constants.ReportClosureReason.no_action_taken,
'action_taken': constants.ReportClosureReason.action_taken,
'invalid': constants.ReportClosureReason.invalid,
}
def close(userid, form):
if userid not in staff.MODS:
raise WeasylError("InsufficientPermissions")
root_report = Report.query.get(int(form.reportid))
if root_report is None or root_report.is_closed:
return
if 'close_all_user_reports' in form:
# If we're closing all of the reports opened against a particular content
# owner, do the same thing as in the select_list function and collect Login
# aliases so that filtering can be done by Login.login_name.
q = Report.query
login_aliases = []
for column_name in _report_types:
login_alias = aliased(Login)
login_aliases.append(login_alias)
q = (
q
.outerjoin(getattr(Report, column_name))
.outerjoin(login_alias)
.reset_joinpoint())
q = (
q
.filter_by(is_closed=False)
.filter(sa.or_(l.login_name == root_report.target.owner.login_name for l in login_aliases)))
reports = q.all()
else:
reports = [root_report]
for report in reports:
if report.is_closed:
raise RuntimeError("a closed report shouldn't have gotten this far")
report.closerid = userid
report.settings.mutable_settings.clear()
if 'assign' in form:
report.is_under_review = True
elif 'unassign' in form:
report.closerid = None
else:
report.closed_at = arrow.get()
report.closure_explanation = form.explanation
report.closure_reason = _closure_actions[form.action]
Report.dbsession.flush()
if form.action == 'action_taken':
note_form = web.Storage()
note_form.title = form.note_title
note_form.content = form.user_note
note_form.recipient = root_report.target.owner.login_name
note_form.mod_copy = True
note_form.staff_note = form.explanation
note.send(userid, note_form)
def check(submitid=None, charid=None, journalid=None):
return bool(
Report.query
.filter_by(is_closed=False, **_dict_of_targetid(submitid, charid, journalid))
.count())
def select_reported_list(userid):
q = (
Report.query
.join(ReportComment)
.options(contains_eager(Report.comments))
.options(joinedload('_target_sub'))
.options(joinedload('_target_char'))
.options(joinedload('_target_journal'))
.filter(ReportComment.violation != 0)
.filter_by(userid=userid))
reports = q.all()
for report in reports:
report.latest_report = max(c.unixtime for c in report.comments)
reports.sort(key=lambda r: r.latest_report, reverse=True)
return reports
|
|
"""
Python version of the runProcessOCV Matlab file for A123_OCV battery cell.
"""
import matplotlib.pyplot as plt
import numpy as np
import json
from models import BatteryData, FileData, ModelOcv
from funcs import OCVfromSOCtemp
from pathlib import Path
# Parameters and Data
# ------------------------------------------------------------------------------
# temperatures for cell experiments
temps = np.array([-25, -15, -5, 5, 15, 25, 35, 45])
minV = 2.00 # minimum cell voltage, used for plotting results
maxV = 3.75 # maximum cell voltage, used for plotting results
SOC = np.arange(0, 1+0.005, 0.005).round(decimals=3) # range for state of charge
# initialize variables to store calculations
eta = np.zeros(len(temps)) # coulombic efficiency
Q = np.zeros(len(temps)) # apparent total capacity
# initialize array to store battery cell data
data = np.zeros(len(temps), dtype=object)
# load battery cell data for each temperature as objects then store in data array
for idx, temp in enumerate(temps):
if temp < 0:
tempfmt = f'{abs(temp):02}'
files = [Path(f'./ocv_data/A123_OCV_N{tempfmt}_S1.csv'), Path(f'./ocv_data/A123_OCV_N{tempfmt}_S2.csv'),
Path(f'./ocv_data/A123_OCV_N{tempfmt}_S3.csv'), Path(f'./ocv_data/A123_OCV_N{tempfmt}_S4.csv')]
data[idx] = BatteryData(files)
else:
tempfmt = f'{abs(temp):02}'
files = [Path(f'./ocv_data/A123_OCV_P{tempfmt}_S1.csv'), Path(f'./ocv_data/A123_OCV_P{tempfmt}_S2.csv'),
Path(f'./ocv_data/A123_OCV_P{tempfmt}_S3.csv'), Path(f'./ocv_data/A123_OCV_P{tempfmt}_S4.csv')]
data[idx] = BatteryData(files)
# initial array to store calculated data
filedata = np.zeros(len(temps), dtype=object)
# Process 25 degC data to find raw OCV relationship and eta25
# ------------------------------------------------------------------------------
k, = np.where(temps == 25)[0] # index where temperature is 25 degC
p25 = data[k]
# compute total discharge in ampere hours, Ah
totDisAh = p25.s1.disAh[-1] + p25.s2.disAh[-1] + p25.s3.disAh[-1] + p25.s4.disAh[-1]
# compute total charge in ampere hours, Ah
totChgAh = p25.s1.chgAh[-1] + p25.s2.chgAh[-1] + p25.s3.chgAh[-1] + p25.s4.chgAh[-1]
# the 25 degC coulombic efficiency
eta25 = totDisAh/totChgAh
eta[k] = eta25
# adjust charge Ah in all scripts per eta25
p25.s1.chgAh = p25.s1.chgAh * eta25
p25.s2.chgAh = p25.s2.chgAh * eta25
p25.s3.chgAh = p25.s3.chgAh * eta25
p25.s4.chgAh = p25.s4.chgAh * eta25
# compute cell capacity at 25 degC, should be essentially same at
# all temps, but we're computing them individually to check this
Q25 = p25.s1.disAh[-1] + p25.s2.disAh[-1] - p25.s1.chgAh[-1] - p25.s2.chgAh[-1]
Q[k] = Q25
# discharge
indD = np.where(p25.s1.step == 2)[0] # slow discharge step
IR1Da = p25.s1.voltage[indD[0]-1] - p25.s1.voltage[indD[0]] # the i*R voltage drop at beginning of discharge
IR2Da = p25.s1.voltage[indD[-1]+1] - p25.s1.voltage[indD[-1]] # the i*R voltage drop at end of discharge
# charge
indC = np.where(p25.s3.step == 2)[0] # slow charge step
IR1Ca = p25.s3.voltage[indC[0]] - p25.s3.voltage[indC[0]-1] # the i*R voltage rise at beginning of charge
IR2Ca = p25.s3.voltage[indC[-1]] - p25.s3.voltage[indC[-1]+1] # the i*R voltage rise at end of charge
# put bounds on R
IR1D = min(IR1Da, 2*IR2Ca)
IR2D = min(IR2Da, 2*IR1Ca)
IR1C = min(IR1Ca, 2*IR2Da)
IR2C = min(IR2Ca, 2*IR1Da)
# discharge
blendD = np.linspace(0, 1, len(indD)) # linear blending from 0 to 1 for discharge
IRblendD = IR1D + (IR2D - IR1D)*blendD # blend resistances for discharge
disV = p25.s1.voltage[indD] + IRblendD # approximate discharge voltage at each point
disZ = 1 - p25.s1.disAh[indD]/Q25 # approximate SOC at each point
disZ = disZ + (1 - disZ[0])
# charge
blendC = np.linspace(0, 1, len(indC)) # linear blending from 0 to 1 for charge
IRblendC = IR1C + (IR2C - IR1C)*blendC # blend resistances for charge
chgV = p25.s3.voltage[indC] - IRblendC # approximate charge voltage at each point
chgZ = p25.s3.chgAh[indC]/Q25 # approximate SOC at each point
chgZ = chgZ - chgZ[0]
# compute voltage difference between charge and discharge at 50% SOC force i*R
# compensated curve to pass half-way between each charge and discharge at this
# point notice that vector chgZ and disZ must be increasing
deltaV50 = np.interp(0.5, chgZ, chgV) - np.interp(0.5, disZ[::-1], disV[::-1])
ind = np.where(chgZ < 0.5)[0]
vChg = chgV[ind] - chgZ[ind]*deltaV50
zChg = chgZ[ind]
ind = np.where(disZ > 0.5)[0]
vDis = disV[ind] + (1 - disZ[ind])*deltaV50
zDis = disZ[ind]
# rawocv now has our best guess of true ocv at this temperature
rawocv = np.interp(SOC, np.concatenate([zChg, zDis[::-1]]), np.concatenate([vChg, vDis[::-1]]))
# store calculated data into filedata object
filedata[k] = FileData(p25.s1.voltage[indD], disZ, p25.s3.voltage[indC], chgZ, rawocv, temps[k])
# Process Other Temperatures to Find Raw OCV Relationship and Eta
# Everything that follows is same as at 25 degC, except we need to compensate
# for different coulombic efficiencies eta at different temperatures.
# ------------------------------------------------------------------------------
not25, = np.where(temps != 25)
for k in not25:
# adjust charge Ah per eta25
data[k].s2.chgAh = data[k].s2.chgAh * eta25
data[k].s4.chgAh = data[k].s4.chgAh * eta25
# coulombic efficiency
eta[k] = ((data[k].s1.disAh[-1] + data[k].s2.disAh[-1] + data[k].s3.disAh[-1]
+ data[k].s4.disAh[-1] - data[k].s2.chgAh[-1] - data[k].s4.chgAh[-1])
/ (data[k].s1.chgAh[-1] + data[k].s3.chgAh[-1]))
# adjust charge Ah per eta at current temp
data[k].s1.chgAh = data[k].s1.chgAh * eta[k]
data[k].s3.chgAh = data[k].s3.chgAh * eta[k]
# compute cell capacity
Q[k] = data[k].s1.disAh[-1] + data[k].s2.disAh[-1] - data[k].s1.chgAh[-1] - data[k].s2.chgAh[-1]
# discharge
indD = np.where(data[k].s1.step == 2)[0] # slow discharge step
IR1D = data[k].s1.voltage[indD[0]-1] - data[k].s1.voltage[indD[0]] # the i*R voltage drop at beginning of discharge
IR2D = data[k].s1.voltage[indD[-1]+1] - data[k].s1.voltage[indD[-1]] # the i*R voltage drop at end of discharge
# charge
indC = np.where(data[k].s3.step == 2)[0] # slow charge step
IR1C = data[k].s3.voltage[indC[0]] - data[k].s3.voltage[indC[0]-1] # the i*R voltage rise at beginning of charge
IR2C = data[k].s3.voltage[indC[-1]] - data[k].s3.voltage[indC[-1]+1] # the i*R voltage rise at end of charge
# put bounds on R
IR1D = min(IR1D, 2*IR2C)
IR2D = min(IR2D, 2*IR1C)
IR1C = min(IR1C, 2*IR2D)
IR2C = min(IR2C, 2*IR1D)
# discharge
blend = np.linspace(0, 1, len(indD)) # linear blending from 0 to 1 for discharge
IRblend = IR1D + (IR2D - IR1D)*blend # blend resistances for discharge
disV = data[k].s1.voltage[indD] + IRblend # approximate discharge voltage at each point
disZ = 1 - data[k].s1.disAh[indD]/Q25 # approximate SOC at each point
disZ = disZ + (1 - disZ[0])
# charge
blend = np.linspace(0, 1, len(indC)) # linear blending from 0 to 1 for charge
IRblend = IR1C + (IR2C - IR1C)*blend # blend resistances for charge
chgV = data[k].s3.voltage[indC] - IRblend # approximate charge voltage at each point
chgZ = data[k].s3.chgAh[indC]/Q25 # approximate SOC at each point
chgZ = chgZ - chgZ[0]
# compute voltage difference between charge and discharge at 50% SOC force i*R
# compensated curve to pass half-way between each charge and discharge at this
# point notice that vector chgZ and disZ must be increasing
deltaV50 = np.interp(0.5, chgZ, chgV) - np.interp(0.5, disZ[::-1], disV[::-1])
ind = np.where(chgZ < 0.5)[0]
vChg = chgV[ind] - chgZ[ind]*deltaV50
zChg = chgZ[ind]
ind = np.where(disZ > 0.5)[0]
vDis = disV[ind] + (1 - disZ[ind])*deltaV50
zDis = disZ[ind]
# rawocv now has our best guess of true ocv at this temperature
rawocv = np.interp(SOC, np.concatenate([zChg, zDis[::-1]]), np.concatenate([vChg, vDis[::-1]]))
# store calculated data into filedata object
filedata[k] = FileData(data[k].s1.voltage[indD], disZ, data[k].s3.voltage[indC], chgZ, rawocv, temps[k])
# Use the SOC versus OCV data now available at each individual
# temperature to compute an OCV0 and OCVrel relationship
# ------------------------------------------------------------------------------
# compile the voltages and temperatures into single arrays rather than structures
postemps = temps[temps > 0] # temps > 0
numtempskept = len(postemps) # number of temps > 0
nocv = len(filedata[5].rawocv) # number of rawocv values based on 25 degC results
Vraw = np.zeros([numtempskept, nocv]) # initialize rawocv array
idxpos = np.where(temps > 0)[0] # indices of positive file temperatures
for k in range(numtempskept):
Vraw[k] = filedata[idxpos[k]].rawocv
# use linear least squares to determine best guess for OCV at 0 degC
# and then the per-degree OCV change
OCV0 = np.zeros(len(SOC))
OCVrel = np.zeros(len(SOC))
H = np.ones([numtempskept, 2])
H[:, 1] = postemps
for k in range(len(SOC)):
X = np.linalg.lstsq(H, Vraw[:, k], rcond=None)
OCV0[k] = X[0][0]
OCVrel[k] = X[0][1]
modelocv = ModelOcv(OCV0, OCVrel, SOC, 0, 0, 0, 0, 0)
# Make SOC0 and SOCrel
# Do same kind of analysis to find soc as a function of ocv
# ------------------------------------------------------------------------------
z = np.arange(-0.1, 1.1, 0.01) # test soc vector
v = np.arange(minV-0.01, maxV+0.02, 0.01).round(decimals=2)
socs = np.zeros((len(temps), len(v)))
for k, _ in enumerate(temps):
T = temps[k]
v1 = OCVfromSOCtemp(z, T, modelocv)
socs[k, :] = np.interp(v, v1, z)
SOC0 = np.zeros(len(v))
SOCrel = SOC0
H = np.ones([len(temps), 2])
H[:, 1] = temps
for k in range(len(v)):
X = np.linalg.lstsq(H, socs[:, k], rcond=None) # fit SOC(v,T) = 1*SOC0(v) + T*SOCrel(v)
SOC0[k] = X[0][0]
SOCrel[k] = X[0][1]
# store ocv results in model object
# ------------------------------------------------------------------------------
modelocv = ModelOcv(OCV0, OCVrel, SOC, v, SOC0, SOCrel, eta, Q)
# Plot Results
# ------------------------------------------------------------------------------
plt.close('all')
plt.ion()
for k, _ in enumerate(temps):
err = filedata[k].rawocv - OCVfromSOCtemp(SOC, filedata[k].temp, modelocv)
rmserr = np.sqrt(np.mean(err**2))
plt.figure(k+1)
plt.plot(100*SOC, OCVfromSOCtemp(SOC, filedata[k].temp, modelocv), 'k', label='model')
plt.plot(100*SOC, filedata[k].rawocv, 'r', label='approx')
plt.plot(100*filedata[k].disZ, filedata[k].disV, 'g--', label='dis')
plt.plot(100*filedata[k].chgZ, filedata[k].chgV, 'b--', label='chg')
plt.text(2, maxV-0.15, f'RMS error = {rmserr*1000:.01f} mV')
plt.ylim(minV-0.2, maxV+0.2)
plt.title(f'A123 OCV relationship at temp = {temps[k]}')
plt.xlabel('SOC (%)')
plt.ylabel('OCV (V)')
plt.legend(numpoints=1, loc='lower right')
plt.grid()
# convert model object to dict, then save in JSON to disk
# ------------------------------------------------------------------------------
if True:
modelocv = {k:v.tolist() for k,v in modelocv.__dict__.items() if isinstance(v, np.ndarray)}
with open('modelocv.json', 'w') as json_file:
json.dump(modelocv, json_file, indent=4)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
import os
import shutil
import sys
import tempfile
import unittest as python_unittest
from haas.testing import unittest
from haas.tests import _test_cases, builder
from haas.tests.compat import mock
from haas.loader import Loader
from haas.module_import_error import ModuleImportError
from haas.suite import find_test_cases, TestSuite
from haas.utils import cd
from ..discoverer import (
Discoverer,
filter_test_suite,
find_module_by_name,
find_top_level_directory,
get_module_name,
)
class FilterTestCase(_test_cases.TestCase):
pass
class TestDiscoveryMixin(object):
def setUp(self):
self.tmpdir = os.path.abspath(tempfile.mkdtemp())
self.dirs = dirs = ['haas_test_package', 'tests']
path = self.tmpdir
for dir_ in dirs:
path = os.path.join(path, dir_)
os.makedirs(path)
with open(os.path.join(path, '__init__.py'), 'w'):
pass
destdir = os.path.join(self.tmpdir, *dirs)
base = os.path.splitext(_test_cases.__file__)[0]
srcfile = '{0}.py'.format(base)
shutil.copyfile(
srcfile, os.path.join(destdir, 'test_cases.py'))
def tearDown(self):
for key in list(sys.modules.keys()):
if key in sys.modules and key.startswith(self.dirs[0]):
del sys.modules[key]
if self.tmpdir in sys.path:
sys.path.remove(self.tmpdir)
shutil.rmtree(self.tmpdir)
def get_test_cases(self, suite):
for test in find_test_cases(suite):
yield test
class TestFindTopLevelDirectory(TestDiscoveryMixin, unittest.TestCase):
def test_from_top_level_directory(self):
directory = find_top_level_directory(self.tmpdir)
self.assertEqual(directory, self.tmpdir)
def test_from_leaf_directory(self):
directory = find_top_level_directory(
os.path.join(self.tmpdir, *self.dirs))
self.assertEqual(directory, self.tmpdir)
def test_from_middle_directory(self):
directory = find_top_level_directory(
os.path.join(self.tmpdir, self.dirs[0]))
self.assertEqual(directory, self.tmpdir)
def test_from_nonpackage_directory(self):
nonpackage = os.path.join(self.tmpdir, self.dirs[0], 'nonpackage')
os.makedirs(nonpackage)
directory = find_top_level_directory(nonpackage)
self.assertEqual(directory, nonpackage)
def test_relative_directory(self):
relative = os.path.join(self.tmpdir, self.dirs[0], '..', *self.dirs)
directory = find_top_level_directory(relative)
self.assertEqual(directory, self.tmpdir)
def test_no_top_level(self):
os_path_dirname = os.path.dirname
def dirname(path):
if os.path.basename(os_path_dirname(path)) not in self.dirs:
return path
return os_path_dirname(path)
with mock.patch('os.path.dirname', dirname):
with self.assertRaises(ValueError):
find_top_level_directory(os.path.join(self.tmpdir, *self.dirs))
class TestGetModuleName(TestDiscoveryMixin, unittest.TestCase):
def test_module_in_project(self):
module_path = os.path.join(self.tmpdir, *self.dirs)
module_name = get_module_name(self.tmpdir, module_path)
self.assertEqual(module_name, '.'.join(self.dirs))
def test_module_not_in_project_deep(self):
module_path = os.path.join(self.tmpdir, *self.dirs)
with self.assertRaises(ValueError):
get_module_name(os.path.dirname(__file__), module_path)
def test_module_not_in_project_relpath(self):
module_path = os.path.abspath(
os.path.join(self.tmpdir, '..', *self.dirs))
with self.assertRaises(ValueError):
get_module_name(self.tmpdir, module_path)
class TestFindModuleByName(TestDiscoveryMixin, unittest.TestCase):
def setUp(self):
TestDiscoveryMixin.setUp(self)
sys.path.insert(0, self.tmpdir)
def tearDown(self):
sys.path.remove(self.tmpdir)
TestDiscoveryMixin.tearDown(self)
def test_package_in_project(self):
module, case_attributes = find_module_by_name('.'.join(self.dirs))
dirname = os.path.join(self.tmpdir, *self.dirs)
filename = os.path.join(dirname, '__init__')
self.assertEqual(os.path.splitext(module.__file__)[0], filename)
def test_missing_package_in_project(self):
module_name = '.'.join(self.dirs + ['missing'])
module, case_attributes = find_module_by_name(module_name)
dirname = os.path.join(self.tmpdir, *self.dirs)
filename = os.path.join(dirname, '__init__')
self.assertEqual(os.path.splitext(module.__file__)[0], filename)
self.assertEqual(case_attributes, ['missing'])
def test_module_attribute_in_project(self):
module_name = '.'.join(self.dirs + ['test_cases'])
test_case_name = '.'.join([module_name, 'TestCase'])
try:
module, case_attributes = find_module_by_name(test_case_name)
module_file = module.__file__
finally:
del sys.modules[module_name]
dirname = os.path.join(self.tmpdir, *self.dirs)
filename = os.path.join(dirname, 'test_cases')
self.assertEqual(os.path.splitext(module_file)[0], filename)
self.assertEqual(case_attributes, ['TestCase'])
def test_missing_top_level_package_in_project(self):
with self.assertRaises(ImportError):
find_module_by_name('no_module')
class TestFilterTestSuite(unittest.TestCase):
def setUp(self):
self.case_1 = _test_cases.TestCase(methodName='test_method')
self.case_2 = _test_cases.TestCase(methodName='_private_method')
self.case_3 = FilterTestCase(methodName='_private_method')
self.suite = TestSuite(
[
TestSuite(
[
self.case_1,
self.case_2,
],
),
TestSuite(
[
self.case_3,
],
),
],
)
def tearDown(self):
del self.suite
del self.case_3
del self.case_2
del self.case_1
def test_filter_by_method_name(self):
filtered_suite = filter_test_suite(self.suite, 'test_method')
self.assertEqual(len(filtered_suite), 1)
test, = filtered_suite
self.assertIs(test, self.case_1)
def test_filter_by_class_name(self):
filtered_suite = filter_test_suite(self.suite, 'FilterTestCase')
self.assertEqual(len(filtered_suite), 1)
test, = filtered_suite
self.assertIs(test, self.case_3)
def test_filter_by_module_name(self):
filtered_suite = filter_test_suite(self.suite, '_test_cases')
self.assertEqual(len(filtered_suite), 2)
test1, test2 = filtered_suite
self.assertIs(test1, self.case_1)
self.assertIs(test2, self.case_2)
def test_filter_by_package_name(self):
filtered_suite = filter_test_suite(self.suite, 'test_discoverer')
self.assertEqual(len(filtered_suite), 1)
test, = filtered_suite
self.assertIs(test, self.case_3)
def test_filter_by_nonexistant_name(self):
filtered_suite = filter_test_suite(self.suite, 'nothing_called_this')
self.assertEqual(len(filtered_suite), 0)
def test_filter_by_class_and_test_name(self):
filtered_suite = filter_test_suite(
self.suite, 'TestCase.test_method')
self.assertEqual(len(filtered_suite), 1)
test, = filtered_suite
self.assertIs(test, self.case_1)
def test_filter_by_module_and_class(self):
filtered_suite = filter_test_suite(
self.suite, '_test_cases.TestCase')
self.assertEqual(len(filtered_suite), 2)
test1, test2 = filtered_suite
self.assertIs(test1, self.case_1)
self.assertIs(test2, self.case_2)
def test_filter_by_module_and_class_and_test(self):
filtered_suite = filter_test_suite(
self.suite, '_test_cases.TestCase.test_method')
self.assertEqual(len(filtered_suite), 1)
test1, = filtered_suite
self.assertIs(test1, self.case_1)
class TestDiscoveryByPath(TestDiscoveryMixin, unittest.TestCase):
def setUp(self):
TestDiscoveryMixin.setUp(self)
self.discoverer = Discoverer(Loader())
def tearDown(self):
del self.discoverer
TestDiscoveryMixin.tearDown(self)
def assertSuite(self, suite):
self.assertIsInstance(suite, TestSuite)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 2)
for test in tests:
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_from_top_level_directory(self):
suite = self.discoverer.discover(self.tmpdir)
self.assertSuite(suite)
def test_from_leaf_directory(self):
suite = self.discoverer.discover(os.path.join(self.tmpdir, *self.dirs))
self.assertSuite(suite)
def test_from_middle_directory(self):
suite = self.discoverer.discover(
os.path.join(self.tmpdir, self.dirs[0]))
self.assertSuite(suite)
def test_start_from_nonpackage_directory(self):
nonpackage = os.path.join(self.tmpdir, self.dirs[0], 'nonpackage')
os.makedirs(nonpackage)
suite = self.discoverer.discover(nonpackage)
self.assertEqual(len(list(suite)), 0)
def test_from_nested_nonpackage_directory(self):
"""
Regression test for #38
"""
# Given
nonpackage = os.path.join(self.tmpdir, 'nonpackage')
package = os.path.join(nonpackage, 'nonpackage', 'tests')
os.makedirs(package)
with open(os.path.join(package, '__init__.py'), 'w'):
pass
with open(os.path.join(package, 'test.py'), 'w'):
pass
# When
suite = self.discoverer.discover(nonpackage, nonpackage)
# Then
self.assertEqual(suite.countTestCases(), 0)
def test_relative_directory(self):
relative = os.path.join(self.tmpdir, self.dirs[0], '..', *self.dirs)
suite = self.discoverer.discover(relative)
self.assertSuite(suite)
def test_given_correct_top_level_directory(self):
suite = self.discoverer.discover(
self.tmpdir, top_level_directory=self.tmpdir)
self.assertSuite(suite)
def test_given_incorrect_top_level_directory(self):
with self.assertRaises(ImportError):
self.discoverer.discover(
self.tmpdir,
top_level_directory=os.path.dirname(self.tmpdir),
)
def test_top_level_directory_on_path(self):
sys.path.insert(0, self.tmpdir)
try:
suite = self.discoverer.discover(self.tmpdir)
finally:
sys.path.remove(self.tmpdir)
self.assertSuite(suite)
class TestDiscoveryByModule(TestDiscoveryMixin, unittest.TestCase):
def setUp(self):
TestDiscoveryMixin.setUp(self)
self.discoverer = Discoverer(Loader())
def tearDown(self):
del self.discoverer
TestDiscoveryMixin.tearDown(self)
def test_discover_package(self):
suite = self.discoverer.discover(
'.'.join(self.dirs),
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 2)
for test in tests:
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_package_no_top_level(self):
suite = self.discoverer.discover('haas.tests')
tests = list(self.get_test_cases(suite))
self.assertGreater(len(tests), 1)
def test_discover_module(self):
module = '{0}.test_cases'.format('.'.join(self.dirs))
suite = self.discoverer.discover(
module, top_level_directory=self.tmpdir)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 2)
for test in tests:
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_case(self):
module = '{0}.test_cases.TestCase'.format('.'.join(self.dirs))
suite = self.discoverer.discover(
module, top_level_directory=self.tmpdir)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_missing_case(self):
module = '{0}.test_cases.MissingTestCase'.format('.'.join(self.dirs))
suite = self.discoverer.discover(
module, top_level_directory=self.tmpdir)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 0)
def test_discover_not_case(self):
module = '{0}.test_cases.NotTestCase'.format('.'.join(self.dirs))
suite = self.discoverer.discover(
module, top_level_directory=self.tmpdir)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 0)
def test_discover_method(self):
module = '{0}.test_cases.TestCase.test_method'.format(
'.'.join(self.dirs))
suite = self.discoverer.discover(
module, top_level_directory=self.tmpdir)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_too_many_components(self):
module = '{0}.test_cases.TestCase.test_method.nothing'.format(
'.'.join(self.dirs))
with self.assertRaises(ValueError):
self.discoverer.discover(module, top_level_directory=self.tmpdir)
class TestDiscoverFilteredTests(TestDiscoveryMixin, unittest.TestCase):
def setUp(self):
TestDiscoveryMixin.setUp(self)
self.discoverer = Discoverer(Loader())
def tearDown(self):
del self.discoverer
TestDiscoveryMixin.tearDown(self)
def test_discover_subpackage(self):
suite = self.discoverer.discover(
'tests',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 2)
for test in tests:
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_test_method(self):
suite = self.discoverer.discover(
'test_method',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 2)
for test in tests:
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_class(self):
suite = self.discoverer.discover(
'TestCase',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_no_top_level(self):
getcwd = mock.Mock()
getcwd.return_value = self.tmpdir
with mock.patch.object(os, 'getcwd', getcwd):
suite = self.discoverer.discover(
'TestCase',
)
getcwd.assert_called_once_with()
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_class_and_method(self):
suite = self.discoverer.discover(
'TestCase.test_method',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_module_and_class_and_method(self):
suite = self.discoverer.discover(
'test_cases.TestCase.test_method',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_module_and_class(self):
suite = self.discoverer.discover(
'test_cases.TestCase',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
class TestDiscovererImportError(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module1 = builder.Module('test_something.py', (klass,))
module2 = builder.Module('test_something_else.py', (klass,))
subpackage = builder.Package(
'subpackage',
(
builder.Package('package1', (module1,)),
builder.Package('package2', (module2,)),
),
)
package = builder.Package('package', (subpackage,))
fixture = builder.Package('fixture', (package,))
fixture.create(self.tempdir)
module_path = os.path.join(
self.tempdir, fixture.name, package.name, subpackage.name,
module1.name)
with open(module_path, 'w') as fh:
fh.write('import haas.i_dont_exist\n')
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_creates_importerror_testcase(self):
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(
self.tempdir, self.tempdir)
self.assertEqual(suite.countTestCases(), 3)
case_names = [
type(case).__name__ for case in find_test_cases(suite)]
self.assertEqual(
case_names, ['ModuleImportError', 'TestSomething',
'TestSomething'])
def test_importerror_testcase(self):
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(
self.tempdir, self.tempdir)
self.assertEqual(suite.countTestCases(), 3)
result = unittest.TestResult()
suite.run(result)
self.assertEqual(len(result.errors), 1)
class TestDiscovererNonPackageImport(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module1 = builder.Module('test_something.py', (klass,))
module2 = builder.Module('test_something_else.py', (klass,))
subpackage = builder.Directory(
'subpackage',
(
builder.Package('package1', (module1,)),
builder.Package('package2', (module2,)),
),
)
package = builder.Directory('package', (subpackage,))
fixture = builder.Directory('fixture', (package,))
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_skips_non_packages(self):
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(self.tempdir, self.tempdir)
self.assertEqual(suite.countTestCases(), 0)
class TestDiscovererDotInModuleName(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
expected_klass = builder.Class(
'TestExpected',
(
builder.Method('test_expected'),
),
)
module1 = builder.Module('test_some.thing.py', (klass,))
module2 = builder.Module('test_something_else.py', (klass,))
module3 = builder.Module('test_another_one.py', (expected_klass,))
subpackage = builder.Package(
'subpackage',
(
builder.Package('package1', (module1,)),
builder.Package('packa.ge2', (module2,)),
builder.Package('package3', (module3,)),
),
)
package = builder.Package('package', (subpackage,))
fixture = builder.Package('fixture', (package,))
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_tests(self):
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(self.tempdir, self.tempdir)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertEqual(type(case).__name__, 'TestExpected')
self.assertEqual(case._testMethodName, 'test_expected')
class TestDiscovererNeverFilterModuleImportError(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
text = builder.RawText('ImportError', 'import haas.i_dont_exist')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module = builder.Module('test_importerror.py', (text, klass,))
package = builder.Package('package', (module,))
fixture = builder.Package('fixture', (package,))
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_tests(self):
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover('TestSomething', None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertIsInstance(case, ModuleImportError)
self.assertEqual(case._testMethodName, 'test_error')
class TestDiscovererSelectiveFilterPackageImportError(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
text = builder.RawText('ImportError', 'from . import i_dont_exist')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module = builder.Module('test_importerror.py', (klass,))
fixture = builder.Directory(
'testing_package',
(
builder.Module('__init__.py', (text,)),
module,
),
)
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_tests(self):
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover('TestSomething', None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertIsInstance(case, ModuleImportError)
self.assertEqual(case._testMethodName, 'test_error')
class TestDiscovererFindTestsByFilePath(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module = builder.Module('test_something.py', (klass,))
package = builder.Package('package', (module,))
fixture = builder.Package('fixture', (package,))
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_tests_no_prefix_dot_slash(self):
# Given
start = 'fixture/package/test_something.py'
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(start, None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertEqual(type(case).__name__, 'TestSomething')
self.assertEqual(case._testMethodName, 'test_method')
def test_discover_tests_with_dot_slash(self):
# Given
start = './fixture/package/test_something.py'
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(start, None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertEqual(type(case).__name__, 'TestSomething')
self.assertEqual(case._testMethodName, 'test_method')
class TestDiscovererEmacsRecoveryFiles(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module = builder.Module('.#test_module.py', (klass,))
module = builder.Module('test_module.py', (klass,))
fixture = builder.Package(
'testing_package',
(
module,
),
)
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_error_emacs_recovery_file(self):
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover('TestSomething', None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertIsInstance(case, unittest.TestCase)
self.assertEqual(case._testMethodName, 'test_method')
class TestDiscovererExceptionOnModuleImport(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
text = builder.RawText('RuntimeError', 'raise RuntimeError("failed")')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module = builder.Module('test_importerror.py', (text, klass,))
fixture = builder.Package(
'testing_package',
(
module,
),
)
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_tests_runtime_error_on_import(self):
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover('TestSomething', None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertIsInstance(case, ModuleImportError)
self.assertEqual(case._testMethodName, 'test_error')
|
|
__all__ = ['KanesMethod']
from sympy import Symbol, zeros, Matrix, diff, solve_linear_system_LU, eye
from sympy.utilities import default_sort_key
from sympy.physics.mechanics.essential import ReferenceFrame, dynamicsymbols
from sympy.physics.mechanics.particle import Particle
from sympy.physics.mechanics.point import Point
from sympy.physics.mechanics.rigidbody import RigidBody
from sympy.physics.mechanics.functions import (inertia_of_point_mass,
partial_velocity)
class KanesMethod(object):
"""Kane's method object.
This object is used to do the "book-keeping" as you go through and form
equations of motion in the way Kane presents in:
Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill
The attributes are for equations in the form [M] udot = forcing.
Attributes
==========
auxiliary : Matrix
If applicable, the set of auxiliary Kane's
equations used to solve for non-contributing
forces.
mass_matrix : Matrix
The system's mass matrix
forcing : Matrix
The system's forcing vector
mass_matrix_full : Matrix
The "mass matrix" for the u's and q's
forcing_full : Matrix
The "forcing vector" for the u's and q's
Examples
========
This is a simple example for a one degree of freedom translational
spring-mass-damper.
In this example, we first need to do the kinematics.
This involves creating generalized speeds and coordinates and their
derivatives.
Then we create a point and set its velocity in a frame::
>>> from sympy import symbols
>>> from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame
>>> from sympy.physics.mechanics import Point, Particle, KanesMethod
>>> q, u = dynamicsymbols('q u')
>>> qd, ud = dynamicsymbols('q u', 1)
>>> m, c, k = symbols('m c k')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, u * N.x)
Next we need to arrange/store information in the way that KanesMethod
requires. The kinematic differential equations need to be stored in a
dict. A list of forces/torques must be constructed, where each entry in
the list is a (Point, Vector) or (ReferenceFrame, Vector) tuple, where the
Vectors represent the Force or Torque.
Next a particle needs to be created, and it needs to have a point and mass
assigned to it.
Finally, a list of all bodies and particles needs to be created::
>>> kd = [qd - u]
>>> FL = [(P, (-k * q - c * u) * N.x)]
>>> pa = Particle('pa', P, m)
>>> BL = [pa]
Finally we can generate the equations of motion.
First we create the KanesMethod object and supply an inertial frame,
coordinates, generalized speeds, and the kinematic differential equations.
Additional quantities such as configuration and motion constraints,
dependent coordinates and speeds, and auxiliary speeds are also supplied
here (see the online documentation).
Next we form FR* and FR to complete: Fr + Fr* = 0.
We have the equations of motion at this point.
It makes sense to rearrnge them though, so we calculate the mass matrix and
the forcing terms, for E.o.M. in the form: [MM] udot = forcing, where MM is
the mass matrix, udot is a vector of the time derivatives of the
generalized speeds, and forcing is a vector representing "forcing" terms::
>>> KM = KanesMethod(N, q_ind=[q], u_ind=[u], kd_eqs=kd)
>>> (fr, frstar) = KM.kanes_equations(FL, BL)
>>> MM = KM.mass_matrix
>>> forcing = KM.forcing
>>> rhs = MM.inv() * forcing
>>> rhs
[(-c*u(t) - k*q(t))/m]
>>> KM.linearize()[0]
[ 0, 1]
[-k, -c]
Please look at the documentation pages for more information on how to
perform linearization and how to deal with dependent coordinates & speeds,
and how do deal with bringing non-contributing forces into evidence.
"""
simp = True
def __init__(self, frame, q_ind, u_ind, kd_eqs=None, q_dependent=[],
configuration_constraints=[], u_dependent=[],
velocity_constraints=[], acceleration_constraints=None,
u_auxiliary=[]):
"""Please read the online documentation. """
# Big storage things
if not isinstance(frame, ReferenceFrame):
raise TypeError('An intertial ReferenceFrame must be supplied')
self._inertial = frame
self._forcelist = None
self._bodylist = None
self._fr = None
self._frstar = None
self._rhs = None
self._aux_eq = None
# States
self._q = None
self._qdep = []
self._qdot = None
self._u = None
self._udep = []
self._udot = None
self._uaux = None
# Differential Equations Matrices
self._k_d = None
self._f_d = None
self._k_kqdot = None
self._k_ku = None
self._f_k = None
# Constraint Matrices
self._f_h = Matrix([])
self._k_nh = Matrix([])
self._f_nh = Matrix([])
self._k_dnh = Matrix([])
self._f_dnh = Matrix([])
self._coords(q_ind, q_dependent, configuration_constraints)
self._speeds(u_ind, u_dependent, velocity_constraints,
acceleration_constraints, u_auxiliary)
if kd_eqs is not None:
self._kindiffeq(kd_eqs)
def _find_dynamicsymbols(self, inlist, insyms=[]):
"""Finds all non-supplied dynamicsymbols in the expressions."""
from sympy.core.function import AppliedUndef, Derivative
t = dynamicsymbols._t
return reduce(set.union, [set([i]) for j in inlist
for i in j.atoms(AppliedUndef, Derivative)
if i.atoms() == set([t])], set()) - insyms
temp_f = set().union(*[i.atoms(AppliedUndef) for i in inlist])
temp_d = set().union(*[i.atoms(Derivative) for i in inlist])
set_f = set([a for a in temp_f if a.args == (t,)])
set_d = set([a for a in temp_d if ((a.args[0] in set_f) and all([i == t
for i in a.variables]))])
return list(set.union(set_f, set_d) - set(insyms))
def _find_othersymbols(self, inlist, insyms=[]):
"""Finds all non-dynamic symbols in the expressions."""
return list(reduce(set.union, [i.atoms(Symbol) for i in inlist]) -
set(insyms))
def _mat_inv_mul(self, A, B):
"""Internal Function
Computes A^-1 * B symbolically w/ substitution, where B is not
necessarily a vector, but can be a matrix.
"""
r1, c1 = A.shape
r2, c2 = B.shape
temp1 = Matrix(r1, c1, lambda i, j: Symbol('x' + str(j) + str(r1 * i)))
temp2 = Matrix(r2, c2, lambda i, j: Symbol('y' + str(j) + str(r2 * i)))
for i in range(len(temp1)):
if A[i] == 0:
temp1[i] = 0
for i in range(len(temp2)):
if B[i] == 0:
temp2[i] = 0
temp3 = []
for i in range(c2):
temp3.append(temp1.LDLsolve(temp2[:, i]))
temp3 = Matrix([i.T for i in temp3]).T
return temp3.subs(dict(zip(temp1, A))).subs(dict(zip(temp2, B)))
def _coords(self, qind, qdep=[], coneqs=[]):
"""Supply all the generalized coordinates in a list.
If some coordinates are dependent, supply them as part of qdep. Their
dependent nature will only show up in the linearization process though.
Parameters
==========
qind : list
A list of independent generalized coords
qdep : list
List of dependent coordinates
coneq : list
List of expressions which are equal to zero; these are the
configuration constraint equations
"""
if not isinstance(qind, (list, tuple)):
raise TypeError('Generalized coords. must be supplied in a list.')
self._q = qind + qdep
self._qdot = [diff(i, dynamicsymbols._t) for i in self._q]
if not isinstance(qdep, (list, tuple)):
raise TypeError('Dependent coordinates and constraints must each be '
'provided in their own list.')
if len(qdep) != len(coneqs):
raise ValueError('There must be an equal number of dependent '
'coordinates and constraints.')
coneqs = Matrix(coneqs)
self._qdep = qdep
self._f_h = coneqs
def _speeds(self, uind, udep=[], coneqs=[], diffconeqs=None, u_auxiliary=[]):
"""Supply all the generalized speeds in a list.
If there are motion constraints or auxiliary speeds, they are provided
here as well (as well as motion constraints).
Parameters
==========
uind : list
A list of independent generalized speeds
udep : list
Optional list of dependent speeds
coneqs : list
Optional List of constraint expressions; these are expressions
which are equal to zero which define a speed (motion) constraint.
diffconeqs : list
Optional, calculated automatically otherwise; list of constraint
equations; again equal to zero, but define an acceleration
constraint.
u_auxiliary : list
An optional list of auxiliary speeds used for brining
non-contributing forces into evidence
"""
if not hasattr(uind, '__iter__'):
raise TypeError('Supply generalized speeds in an iterable.')
self._u = uind + udep
self._udot = [diff(i, dynamicsymbols._t) for i in self._u]
self._uaux = u_auxiliary
if not hasattr(udep, '__iter__'):
raise TypeError('Supply dependent speeds in an iterable.')
if len(udep) != len(coneqs):
raise ValueError('There must be an equal number of dependent '
'speeds and constraints.')
if diffconeqs is not None:
if len(udep) != len(diffconeqs):
raise ValueError('There must be an equal number of dependent '
'speeds and constraints.')
if len(udep) != 0:
u = self._u
uzero = dict(zip(u, [0] * len(u)))
coneqs = Matrix(coneqs)
udot = self._udot
udotzero = dict(zip(udot, [0] * len(udot)))
self._udep = udep
self._f_nh = coneqs.subs(uzero)
self._k_nh = (coneqs - self._f_nh).jacobian(u)
# if no differentiated non holonomic constraints were given, calculate
if diffconeqs is None:
self._k_dnh = self._k_nh
self._f_dnh = (self._k_nh.diff(dynamicsymbols._t) * Matrix(u) +
self._f_nh.diff(dynamicsymbols._t))
else:
self._f_dnh = diffconeqs.subs(udotzero)
self._k_dnh = (diffconeqs - self._f_dnh).jacobian(udot)
o = len(u) # number of generalized speeds
m = len(udep) # number of motion constraints
p = o - m # number of independent speeds
# For a reminder, form of non-holonomic constraints is:
# B u + C = 0
B = self._k_nh[:, :]
C = self._f_nh[:, 0]
# We partition B into indenpendent and dependent columns
# Ars is then -Bdep.inv() * Bind, and it relates depedent speeds to
# independent speeds as: udep = Ars uind, neglecting the C term here.
self._depB = B
self._depC = C
mr1 = B[:, :p]
ml1 = B[:, p:o]
self._Ars = - self._mat_inv_mul(ml1, mr1)
def kindiffdict(self):
"""Returns the qdot's in a dictionary. """
if self._k_kqdot is None:
raise ValueError('Kin. diff. eqs need to be supplied first.')
sub_dict = solve_linear_system_LU(Matrix([self._k_kqdot.T,
-(self._k_ku * Matrix(self._u) + self._f_k).T]).T, self._qdot)
return sub_dict
def _kindiffeq(self, kdeqs):
"""Supply all the kinematic differential equations in a list.
They should be in the form [Expr1, Expr2, ...] where Expri is equal to
zero
Parameters
==========
kdeqs : list (of Expr)
The listof kinematic differential equations
"""
if len(self._q) != len(kdeqs):
raise ValueError('There must be an equal number of kinematic '
'differential equations and coordinates.')
uaux = self._uaux
# dictionary of auxiliary speeds which are equal to zero
uaz = dict(zip(uaux, [0] * len(uaux)))
kdeqs = Matrix(kdeqs).subs(uaz)
qdot = self._qdot
qdotzero = dict(zip(qdot, [0] * len(qdot)))
u = self._u
uzero = dict(zip(u, [0] * len(u)))
f_k = kdeqs.subs(uzero).subs(qdotzero)
k_kqdot = (kdeqs.subs(uzero) - f_k).jacobian(Matrix(qdot))
k_ku = (kdeqs.subs(qdotzero) - f_k).jacobian(Matrix(u))
self._k_ku = self._mat_inv_mul(k_kqdot, k_ku)
self._f_k = self._mat_inv_mul(k_kqdot, f_k)
self._k_kqdot = eye(len(qdot))
def _form_fr(self, fl):
"""Form the generalized active force.
Computes the vector of the generalized active force vector.
Used to compute E.o.M. in the form Fr + Fr* = 0.
Parameters
==========
fl : list
Takes in a list of (Point, Vector) or (ReferenceFrame, Vector)
tuples which represent the force at a point or torque on a frame.
"""
if not hasattr(fl, '__iter__'):
raise TypeError('Force pairs must be supplied in an iterable.')
N = self._inertial
self._forcelist = fl[:]
u = self._u
o = len(u) # number of gen. speeds
b = len(fl) # number of forces
FR = zeros(o, 1)
# pull out relevant velocities for constructing partial velocities
vel_list = []
f_list = []
for i in fl:
if isinstance(i[0], ReferenceFrame):
vel_list += [i[0].ang_vel_in(N)]
elif isinstance(i[0], Point):
vel_list += [i[0].vel(N)]
else:
raise TypeError('First entry in pair must be point or frame.')
f_list += [i[1]]
partials = partial_velocity(vel_list, u, N)
# Fill Fr with dot product of partial velocities and forces
for i in range(o):
for j in range(b):
FR[i] -= partials[j][i] & f_list[j]
# In case there are dependent speeds
m = len(self._udep) # number of dependent speeds
if m != 0:
p = o - m
FRtilde = FR[:p, 0]
FRold = FR[p:o, 0]
FRtilde += self._Ars.T * FRold
FR = FRtilde
self._fr = FR
return FR
def _form_frstar(self, bl):
"""Form the generalized inertia force.
Computes the vector of the generalized inertia force vector.
Used to compute E.o.M. in the form Fr + Fr* = 0.
Parameters
==========
bl : list
A list of all RigidBody's and Particle's in the system.
"""
if not hasattr(bl, '__iter__'):
raise TypeError('Bodies must be supplied in an iterable.')
t = dynamicsymbols._t
N = self._inertial
self._bodylist = bl
u = self._u # all speeds
udep = self._udep # dependent speeds
o = len(u)
m = len(udep)
p = o - m
udot = self._udot
udotzero = dict(zip(udot, [0] * o))
# auxiliary speeds
uaux = self._uaux
uauxdot = [diff(i, t) for i in uaux]
# dictionary of auxiliary speeds which are equal to zero
uaz = dict(zip(uaux, [0] * len(uaux)))
uadz = dict(zip(uauxdot, [0] * len(uauxdot)))
MM = zeros(o, o)
nonMM = zeros(o, 1)
partials = []
# Fill up the list of partials: format is a list with no. elements
# equal to number of entries in body list. Each of these elements is a
# list - either of length 1 for the translational components of
# particles or of length 2 for the translational and rotational
# components of rigid bodies. The inner most list is the list of
# partial velocities.
for v in bl:
if isinstance(v, RigidBody):
partials += [partial_velocity([v.masscenter.vel(N),
v.frame.ang_vel_in(N)], u, N)]
elif isinstance(v, Particle):
partials += [partial_velocity([v.point.vel(N)], u, N)]
else:
raise TypeError('The body list needs RigidBody or '
'Particle as list elements.')
# This section does 2 things - computes the parts of Fr* that are
# associated with the udots, and the parts that are not associated with
# the udots. This happens for RigidBody and Particle a little
# differently, but similar process overall.
for i, v in enumerate(bl):
if isinstance(v, RigidBody):
M = v.mass.subs(uaz).doit()
I, P = v.inertia
if P != v.masscenter:
# redefine I about the center of mass
# have I S/O, want I S/S*
# I S/O = I S/S* + I S*/O; I S/S* = I S/O - I S*/O
f = v.frame
d = v.masscenter.pos_from(P)
I -= inertia_of_point_mass(M, d, f)
I = I.subs(uaz).doit()
for j in range(o):
for k in range(o):
# translational
MM[j, k] += M * (partials[i][0][j].subs(uaz).doit() &
partials[i][0][k])
# rotational
temp = (I & partials[i][1][j].subs(uaz).doit())
MM[j, k] += (temp &
partials[i][1][k])
# translational components
nonMM[j] += ( (M.diff(t) *
v.masscenter.vel(N)).subs(uaz).doit() &
partials[i][0][j])
nonMM[j] += (M *
v.masscenter.acc(N).subs(udotzero).subs(uaz).doit()
& partials[i][0][j])
# rotational components
omega = v.frame.ang_vel_in(N).subs(uaz).doit()
nonMM[j] += ((I.dt(v.frame) & omega).subs(uaz).doit() &
partials[i][1][j])
nonMM[j] += ((I &
v.frame.ang_acc_in(N)).subs(udotzero).subs(uaz).doit()
& partials[i][1][j])
nonMM[j] += ((omega ^ (I & omega)).subs(uaz).doit() &
partials[i][1][j])
if isinstance(v, Particle):
M = v.mass.subs(uaz).doit()
for j in range(o):
for k in range(o):
MM[j, k] += M * (partials[i][0][j].subs(uaz).doit() &
partials[i][0][k])
nonMM[j] += M.diff(t) * (v.point.vel(N).subs(uaz).doit() &
partials[i][0][j])
nonMM[j] += (M *
v.point.acc(N).subs(udotzero).subs(uaz).doit() &
partials[i][0][j])
FRSTAR = MM * Matrix(udot).subs(uadz) + nonMM
# For motion constraints, m is the number of constraints
# Really, one should just look at Kane's book for descriptions of this
# process
if m != 0:
FRSTARtilde = FRSTAR[:p, 0]
FRSTARold = FRSTAR[p:o, 0]
FRSTARtilde += self._Ars.T * FRSTARold
FRSTAR = FRSTARtilde
MMi = MM[:p, :]
MMd = MM[p:o, :]
MM = MMi + self._Ars.T * MMd
self._frstar = FRSTAR
zeroeq = self._fr + self._frstar
zeroeq = zeroeq.subs(udotzero)
self._k_d = MM
self._f_d = zeroeq
return FRSTAR
def kanes_equations(self, FL, BL):
""" Method to form Kane's equations, Fr + Fr* = 0.
Returns (Fr, Fr*). In the case where auxiliary generalized speeds are
present (say, s auxiliary speeds, o generalized speeds, and m motion
constraints) the length of the returned vectors will be o - m + s in
length. The first o - m equations will be the constrained Kane's
equations, then the s auxiliary Kane's equations. These auxiliary
equations can be accessed with the auxiliary_eqs().
Parameters
==========
FL : list
Takes in a list of (Point, Vector) or (ReferenceFrame, Vector)
tuples which represent the force at a point or torque on a frame.
BL : list
A list of all RigidBody's and Particle's in the system.
"""
if (self._q is None) or (self._u is None):
raise ValueError('Speeds and coordinates must be supplied first.')
if (self._k_kqdot is None):
raise ValueError(
'Supply kinematic differential equations, please.')
fr = self._form_fr(FL)
frstar = self._form_frstar(BL)
if self._uaux != []:
if self._udep == []:
km = KanesMethod(self._inertial, self._q, self._uaux,
u_auxiliary=self._uaux)
else:
km = KanesMethod(self._inertial, self._q, self._uaux,
u_auxiliary=self._uaux, u_dependent=self._udep,
velocity_constraints=(self._k_nh * Matrix(self._u) + self._f_nh))
self._km = km
fraux = km._form_fr(FL)
frstaraux = km._form_frstar(BL)
self._aux_eq = fraux + frstaraux
self._fr = fr.col_join(fraux)
self._frstar = frstar.col_join(frstaraux)
return (self._fr, self._frstar)
else:
return (fr, frstar)
def linearize(self):
""" Method used to generate linearized equations.
Note that for linearization, it is assumed that time is not perturbed,
but only coordinates and positions. The "forcing" vector's jacobian is
computed with respect to the state vector in the form [Qi, Qd, Ui, Ud].
This is the "f_lin_A" matrix.
It also finds any non-state dynamicsymbols and computes the jacobian of
the "forcing" vector with respect to them. This is the "f_lin_B"
matrix; if this is empty, an empty matrix is created.
Consider the following:
If our equations are: [M]qudot = f, where [M] is the full mass matrix,
qudot is a vector of the deriatives of the coordinates and speeds, and
f in the full forcing vector, the linearization process is as follows:
[M]qudot = [f_lin_A]qu + [f_lin_B]y, where qu is the state vector,
f_lin_A is the jacobian of the full forcing vector with respect to the
state vector, f_lin_B is the jacobian of the full forcing vector with
respect to any non-speed/coordinate dynamicsymbols which show up in the
full forcing vector, and y is a vector of those dynamic symbols (each
column in f_lin_B corresponds to a row of the y vector, each of which
is a non-speed/coordinate dynamicsymbol).
To get the traditional state-space A and B matrix, you need to multiply
the f_lin_A and f_lin_B matrices by the inverse of the mass matrix.
Caution needs to be taken when inverting large symbolic matrices;
substituting in numerical values before inverting will work better.
A tuple of (f_lin_A, f_lin_B, other_dynamicsymbols) is returned.
"""
if (self._fr is None) or (self._frstar is None):
raise ValueError('Need to compute Fr, Fr* first.')
# Note that this is now unneccessary, and it should never be
# encountered; I still think it should be in here in case the user
# manually sets these matrices incorrectly.
for i in self._q:
if self._k_kqdot.diff(i) != 0 * self._k_kqdot:
raise ValueError('Matrix K_kqdot must not depend on any q.')
t = dynamicsymbols._t
uaux = self._uaux
uauxdot = [diff(i, t) for i in uaux]
# dictionary of auxiliary speeds & derivatives which are equal to zero
subdict = dict(zip(uaux + uauxdot, [0] * (len(uaux) + len(uauxdot))))
# Checking for dynamic symbols outside the dynamic differential
# equations; throws error if there is.
insyms = set(
self._q + self._qdot + self._u + self._udot + uaux + uauxdot)
if any(self._find_dynamicsymbols(i, insyms) for i in [self._k_kqdot,
self._k_ku,
self._f_k,
self._k_dnh,
self._f_dnh,
self._k_d]):
raise ValueError('Cannot have dynamicsymbols outside dynamic '
'forcing vector.')
other_dyns = list(self._find_dynamicsymbols(self._f_d.subs(subdict),
insyms))
# make it canonically ordered so the jacobian is canonical
other_dyns.sort(key=default_sort_key)
for i in other_dyns:
if diff(i, dynamicsymbols._t) in other_dyns:
raise ValueError('Cannot have derivatives of specified '
'quantities when linearizing forcing terms.')
o = len(self._u) # number of speeds
n = len(self._q) # number of coordinates
l = len(self._qdep) # number of configuration constraints
m = len(self._udep) # number of motion constraints
qi = Matrix(self._q[: n - l]) # independent coords
qd = Matrix(self._q[n - l: n]) # dependent coords; could be empty
ui = Matrix(self._u[: o - m]) # independent speeds
ud = Matrix(self._u[o - m: o]) # dependent speeds; could be empty
qdot = Matrix(self._qdot) # time derivatives of coordinates
# with equations in the form MM udot = forcing, expand that to:
# MM_full [q,u].T = forcing_full. This combines coordinates and
# speeds together for the linearization, which is necessary for the
# linearization process, due to dependent coordinates. f1 is the rows
# from the kinematic differential equations, f2 is the rows from the
# dynamic differential equations (and differentiated non-holonomic
# constraints).
f1 = self._k_ku * Matrix(self._u) + self._f_k
f2 = self._f_d
# Only want to do this if these matrices have been filled in, which
# occurs when there are dependent speeds
if m != 0:
f2 = self._f_d.col_join(self._f_dnh)
fnh = self._f_nh + self._k_nh * Matrix(self._u)
f1 = f1.subs(subdict)
f2 = f2.subs(subdict)
fh = self._f_h.subs(subdict)
fku = (self._k_ku * Matrix(self._u)).subs(subdict)
fkf = self._f_k.subs(subdict)
# In the code below, we are applying the chain rule by hand on these
# things. All the matrices have been changed into vectors (by
# multiplying the dynamic symbols which it is paired with), so we can
# take the jacobian of them. The basic operation is take the jacobian
# of the f1, f2 vectors wrt all of the q's and u's. f1 is a function of
# q, u, and t; f2 is a function of q, qdot, u, and t. In the code
# below, we are not considering perturbations in t. So if f1 is a
# function of the q's, u's but some of the q's or u's could be
# dependent on other q's or u's (qd's might be dependent on qi's, ud's
# might be dependent on ui's or qi's), so what we do is take the
# jacobian of the f1 term wrt qi's and qd's, the jacobian wrt the qd's
# gets multiplied by the jacobian of qd wrt qi, this is extended for
# the ud's as well. dqd_dqi is computed by taking a taylor expansion of
# the holonomic constraint equations about q*, treating q* - q as dq,
# seperating into dqd (depedent q's) and dqi (independent q's) and the
# rearranging for dqd/dqi. This is again extended for the speeds.
# First case: configuration and motion constraints
if (l != 0) and (m != 0):
fh_jac_qi = fh.jacobian(qi)
fh_jac_qd = fh.jacobian(qd)
fnh_jac_qi = fnh.jacobian(qi)
fnh_jac_qd = fnh.jacobian(qd)
fnh_jac_ui = fnh.jacobian(ui)
fnh_jac_ud = fnh.jacobian(ud)
fku_jac_qi = fku.jacobian(qi)
fku_jac_qd = fku.jacobian(qd)
fku_jac_ui = fku.jacobian(ui)
fku_jac_ud = fku.jacobian(ud)
fkf_jac_qi = fkf.jacobian(qi)
fkf_jac_qd = fkf.jacobian(qd)
f1_jac_qi = f1.jacobian(qi)
f1_jac_qd = f1.jacobian(qd)
f1_jac_ui = f1.jacobian(ui)
f1_jac_ud = f1.jacobian(ud)
f2_jac_qi = f2.jacobian(qi)
f2_jac_qd = f2.jacobian(qd)
f2_jac_ui = f2.jacobian(ui)
f2_jac_ud = f2.jacobian(ud)
f2_jac_qdot = f2.jacobian(qdot)
dqd_dqi = - self._mat_inv_mul(fh_jac_qd, fh_jac_qi)
dud_dqi = self._mat_inv_mul(fnh_jac_ud, (fnh_jac_qd *
dqd_dqi - fnh_jac_qi))
dud_dui = - self._mat_inv_mul(fnh_jac_ud, fnh_jac_ui)
dqdot_dui = - self._k_kqdot.inv() * (fku_jac_ui +
fku_jac_ud * dud_dui)
dqdot_dqi = - self._k_kqdot.inv() * (fku_jac_qi + fkf_jac_qi +
(fku_jac_qd + fkf_jac_qd) * dqd_dqi + fku_jac_ud * dud_dqi)
f1_q = f1_jac_qi + f1_jac_qd * dqd_dqi + f1_jac_ud * dud_dqi
f1_u = f1_jac_ui + f1_jac_ud * dud_dui
f2_q = (f2_jac_qi + f2_jac_qd * dqd_dqi + f2_jac_qdot * dqdot_dqi +
f2_jac_ud * dud_dqi)
f2_u = f2_jac_ui + f2_jac_ud * dud_dui + f2_jac_qdot * dqdot_dui
# Second case: configuration constraints only
elif l != 0:
dqd_dqi = - self._mat_inv_mul(fh.jacobian(qd), fh.jacobian(qi))
dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi) + (fku.jacobian(qd) + fkf.jacobian(qd)) *
dqd_dqi)
f1_q = (f1.jacobian(qi) + f1.jacobian(qd) * dqd_dqi)
f1_u = f1.jacobian(ui)
f2_jac_qdot = f2.jacobian(qdot)
f2_q = (f2.jacobian(qi) + f2.jacobian(qd) * dqd_dqi +
f2.jac_qdot * dqdot_dqi)
f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui
# Third case: motion constraints only
elif m != 0:
dud_dqi = self._mat_inv_mul(fnh.jacobian(ud), - fnh.jacobian(qi))
dud_dui = - self._mat_inv_mul(fnh.jacobian(ud), fnh.jacobian(ui))
dqdot_dui = - self._k_kqdot.inv() * (fku.jacobian(ui) +
fku.jacobian(ud) * dud_dui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi) + fku.jacobian(ud) * dud_dqi)
f1_jac_ud = f1.jacobian(ud)
f2_jac_qdot = f2.jacobian(qdot)
f2_jac_ud = f2.jacobian(ud)
f1_q = f1.jacobian(qi) + f1_jac_ud * dud_dqi
f1_u = f1.jacobian(ui) + f1_jac_ud * dud_dui
f2_q = (f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi + f2_jac_ud
* dud_dqi)
f2_u = (f2.jacobian(ui) + f2_jac_ud * dud_dui + f2_jac_qdot *
dqdot_dui)
# Fourth case: No constraints
else:
dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi))
f1_q = f1.jacobian(qi)
f1_u = f1.jacobian(ui)
f2_jac_qdot = f2.jacobian(qdot)
f2_q = f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi
f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui
f_lin_A = -(f1_q.row_join(f1_u)).col_join(f2_q.row_join(f2_u))
if other_dyns:
f1_oths = f1.jacobian(other_dyns)
f2_oths = f2.jacobian(other_dyns)
f_lin_B = -f1_oths.col_join(f2_oths)
else:
f_lin_B = Matrix([])
return (f_lin_A, f_lin_B, Matrix(other_dyns))
def rhs(self, inv_method=None):
""" Returns the system's equations of motion in first order form.
The output of this will be the right hand side of:
[qdot, udot].T = f(q, u, t)
Or, the equations of motion in first order form. The right hand side
is what is needed by most numerical ODE integrators.
Parameters
==========
inv_method : str
The specific sympy inverse matrix calculation method to use.
"""
if inv_method is None:
self._rhs = self._mat_inv_mul(self.mass_matrix_full,
self.forcing_full)
else:
self._rhs = (self.mass_matrix_full.inv(inv_method,
try_block_diag=True) * self.forcing_full)
return self._rhs
@property
def auxiliary_eqs(self):
if (self._fr is None) or (self._frstar is None):
raise ValueError('Need to compute Fr, Fr* first.')
if self._uaux == []:
raise ValueError('No auxiliary speeds have been declared.')
return self._aux_eq
@property
def mass_matrix(self):
# Returns the mass matrix, which is augmented by the differentiated non
# holonomic equations if necessary
if (self._frstar is None) & (self._fr is None):
raise ValueError('Need to compute Fr, Fr* first.')
return Matrix([self._k_d, self._k_dnh])
@property
def mass_matrix_full(self):
# Returns the mass matrix from above, augmented by kin diff's k_kqdot
if (self._frstar is None) & (self._fr is None):
raise ValueError('Need to compute Fr, Fr* first.')
o = len(self._u)
n = len(self._q)
return ((self._k_kqdot).row_join(zeros(n, o))).col_join((zeros(o,
n)).row_join(self.mass_matrix))
@property
def forcing(self):
# Returns the forcing vector, which is augmented by the differentiated
# non holonomic equations if necessary
if (self._frstar is None) & (self._fr is None):
raise ValueError('Need to compute Fr, Fr* first.')
return -Matrix([self._f_d, self._f_dnh])
@property
def forcing_full(self):
# Returns the forcing vector, which is augmented by the differentiated
# non holonomic equations if necessary
if (self._frstar is None) & (self._fr is None):
raise ValueError('Need to compute Fr, Fr* first.')
f1 = self._k_ku * Matrix(self._u) + self._f_k
return -Matrix([f1, self._f_d, self._f_dnh])
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""ResNets, implemented in Gluon."""
__all__ = ['ResNetV1', 'ResNetV2',
'BasicBlockV1', 'BasicBlockV2',
'BottleneckV1', 'BottleneckV2',
'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'get_resnet']
import os
from ....device import cpu
from ...block import HybridBlock
from ... import nn
from .... import base
from .... util import use_np, wrap_ctx_to_device_func
from .... import npx
# Helpers
def _conv3x3(channels, stride, in_channels):
return nn.Conv2D(channels, kernel_size=3, strides=stride, padding=1,
use_bias=False, in_channels=in_channels)
# Blocks
@use_np
class BasicBlockV1(HybridBlock):
r"""BasicBlock V1 from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
This is used for ResNet V1 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BasicBlockV1, self).__init__(**kwargs)
self.body = nn.HybridSequential()
self.body.add(_conv3x3(channels, stride, in_channels))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels, 1, channels))
self.body.add(nn.BatchNorm())
if downsample:
self.downsample = nn.HybridSequential()
self.downsample.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.downsample.add(nn.BatchNorm())
else:
self.downsample = None
def forward(self, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
x = npx.activation(residual+x, act_type='relu')
return x
@use_np
class BottleneckV1(HybridBlock):
r"""Bottleneck V1 from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
This is used for ResNet V1 for 50, 101, 152 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV1, self).__init__(**kwargs)
self.body = nn.HybridSequential()
self.body.add(nn.Conv2D(channels//4, kernel_size=1, strides=stride))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels//4, 1, channels//4))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(nn.Conv2D(channels, kernel_size=1, strides=1))
self.body.add(nn.BatchNorm())
if downsample:
self.downsample = nn.HybridSequential()
self.downsample.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.downsample.add(nn.BatchNorm())
else:
self.downsample = None
def forward(self, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
x = npx.activation(x + residual, act_type='relu')
return x
@use_np
class BasicBlockV2(HybridBlock):
r"""BasicBlock V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BasicBlockV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm()
self.conv1 = _conv3x3(channels, stride, in_channels)
self.bn2 = nn.BatchNorm()
self.conv2 = _conv3x3(channels, 1, channels)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def forward(self, x):
residual = x
x = self.bn1(x)
x = npx.activation(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = npx.activation(x, act_type='relu')
x = self.conv2(x)
return x + residual
@use_np
class BottleneckV2(HybridBlock):
r"""Bottleneck V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 50, 101, 152 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm()
self.conv1 = nn.Conv2D(channels//4, kernel_size=1, strides=1, use_bias=False)
self.bn2 = nn.BatchNorm()
self.conv2 = _conv3x3(channels//4, stride, channels//4)
self.bn3 = nn.BatchNorm()
self.conv3 = nn.Conv2D(channels, kernel_size=1, strides=1, use_bias=False)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def forward(self, x):
residual = x
x = self.bn1(x)
x = npx.activation(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = npx.activation(x, act_type='relu')
x = self.conv2(x)
x = self.bn3(x)
x = npx.activation(x, act_type='relu')
x = self.conv3(x)
return x + residual
# Nets
@use_np
class ResNetV1(HybridBlock):
r"""ResNet V1 model from
`"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
block : gluon.HybridBlock
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
channels : list of int
Numbers of channels in each block. Length should be one larger than layers list.
classes : int, default 1000
Number of classification classes.
thumbnail : bool, default False
Enable thumbnail.
"""
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
super(ResNetV1, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
self.features = nn.HybridSequential()
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 0))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, in_channels=channels[i]))
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.Dense(classes, in_units=channels[-1])
def _make_layer(self, block, layers, channels, stride, in_channels=0):
layer = nn.HybridSequential()
layer.add(block(channels, stride, channels != in_channels, in_channels=in_channels))
for _ in range(layers-1):
layer.add(block(channels, 1, False, in_channels=channels))
return layer
def forward(self, x):
x = self.features(x)
x = self.output(x)
return x
@use_np
class ResNetV2(HybridBlock):
r"""ResNet V2 model from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
block : gluon.HybridBlock
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
channels : list of int
Numbers of channels in each block. Length should be one larger than layers list.
classes : int, default 1000
Number of classification classes.
thumbnail : bool, default False
Enable thumbnail.
"""
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
super(ResNetV2, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
self.features = nn.HybridSequential()
self.features.add(nn.BatchNorm(scale=False, center=False))
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 0))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
in_channels = channels[0]
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, in_channels=in_channels))
in_channels = channels[i+1]
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.GlobalAvgPool2D())
self.features.add(nn.Flatten())
self.output = nn.Dense(classes, in_units=in_channels)
def _make_layer(self, block, layers, channels, stride, in_channels=0):
layer = nn.HybridSequential()
layer.add(block(channels, stride, channels != in_channels, in_channels=in_channels))
for _ in range(layers-1):
layer.add(block(channels, 1, False, in_channels=channels))
return layer
def forward(self, x):
x = self.features(x)
x = self.output(x)
return x
# Specification
resnet_spec = {18: ('basic_block', [2, 2, 2, 2], [64, 64, 128, 256, 512]),
34: ('basic_block', [3, 4, 6, 3], [64, 64, 128, 256, 512]),
50: ('bottle_neck', [3, 4, 6, 3], [64, 256, 512, 1024, 2048]),
101: ('bottle_neck', [3, 4, 23, 3], [64, 256, 512, 1024, 2048]),
152: ('bottle_neck', [3, 8, 36, 3], [64, 256, 512, 1024, 2048])}
resnet_net_versions = [ResNetV1, ResNetV2]
resnet_block_versions = [{'basic_block': BasicBlockV1, 'bottle_neck': BottleneckV1},
{'basic_block': BasicBlockV2, 'bottle_neck': BottleneckV2}]
# Constructor
@wrap_ctx_to_device_func
def get_resnet(version, num_layers, pretrained=False, device=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
ResNet V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
version : int
Version of ResNet. Options are 1, 2.
num_layers : int
Numbers of layers. Options are 18, 34, 50, 101, 152.
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
assert num_layers in resnet_spec, \
"Invalid number of layers: %d. Options are %s"%(
num_layers, str(resnet_spec.keys()))
block_type, layers, channels = resnet_spec[num_layers]
assert version >= 1 and version <= 2, \
"Invalid resnet version: %d. Options are 1 and 2."%version
resnet_class = resnet_net_versions[version-1]
block_class = resnet_block_versions[version-1][block_type]
net = resnet_class(block_class, layers, channels, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('resnet%d_v%d'%(num_layers, version),
root=root), device=device)
return net
@wrap_ctx_to_device_func
def resnet18_v1(**kwargs):
r"""ResNet-18 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 18, **kwargs)
@wrap_ctx_to_device_func
def resnet34_v1(**kwargs):
r"""ResNet-34 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 34, **kwargs)
@wrap_ctx_to_device_func
def resnet50_v1(**kwargs):
r"""ResNet-50 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 50, **kwargs)
@wrap_ctx_to_device_func
def resnet101_v1(**kwargs):
r"""ResNet-101 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 101, **kwargs)
@wrap_ctx_to_device_func
def resnet152_v1(**kwargs):
r"""ResNet-152 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 152, **kwargs)
@wrap_ctx_to_device_func
def resnet18_v2(**kwargs):
r"""ResNet-18 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 18, **kwargs)
@wrap_ctx_to_device_func
def resnet34_v2(**kwargs):
r"""ResNet-34 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 34, **kwargs)
@wrap_ctx_to_device_func
def resnet50_v2(**kwargs):
r"""ResNet-50 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 50, **kwargs)
@wrap_ctx_to_device_func
def resnet101_v2(**kwargs):
r"""ResNet-101 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 101, **kwargs)
@wrap_ctx_to_device_func
def resnet152_v2(**kwargs):
r"""ResNet-152 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
device : Device, default CPU
The device in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 152, **kwargs)
|
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .deprecation import deprecated
from ..exceptions import DataConversionWarning as _DataConversionWarning
from ..exceptions import NonBLASDotWarning as _NonBLASDotWarning
from ..exceptions import NotFittedError as _NotFittedError
@deprecated("DataConversionWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class DataConversionWarning(_DataConversionWarning):
pass
@deprecated("NonBLASDotWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class NonBLASDotWarning(_NonBLASDotWarning):
pass
@deprecated("NotFittedError has been moved into the sklearn.exceptions module."
" It will not be available here from version 0.19")
class NotFittedError(_NotFittedError):
pass
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', _NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, _DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
_DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
# FIXME NotFittedError_ --> NotFittedError in 0.19
raise _NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
|
|
from lxml import etree
from numbers import Number
from troveclient.compat import exceptions
from troveclient.compat.client import TroveHTTPClient
XML_NS = {None: "http://docs.openstack.org/database/api/v1.0"}
# If XML element is listed here then this searches through the ancestors.
LISTIFY = {
"accounts": [[]],
"databases": [[]],
"flavors": [[]],
"instances": [[]],
"links": [[]],
"hosts": [[]],
"devices": [[]],
"users": [[]],
"versions": [[]],
"attachments": [[]],
"limits": [[]],
"security_groups": [[]],
"backups": [[]]
}
class IntDict(object):
pass
TYPE_MAP = {
"instance": {
"volume": {
"used": float,
"size": int,
},
"deleted": bool,
"server": {
"local_id": int,
"deleted": bool,
},
},
"instances": {
"deleted": bool,
},
"deleted": bool,
"flavor": {
"ram": int,
},
"diagnostics": {
"vmHwm": int,
"vmPeak": int,
"vmSize": int,
"threads": int,
"vmRss": int,
"fdSize": int,
},
"security_group_rule": {
"from_port": int,
"to_port": int,
},
"quotas": IntDict,
}
TYPE_MAP["flavors"] = TYPE_MAP["flavor"]
REQUEST_AS_LIST = set(['databases', 'users'])
def element_ancestors_match_list(element, list):
"""
For element root at <foo><blah><root></blah></foo> matches against
list ["blah", "foo"].
"""
itr_elem = element.getparent()
for name in list:
if itr_elem is None:
break
if name != normalize_tag(itr_elem):
return False
itr_elem = itr_elem.getparent()
return True
def element_must_be_list(parent_element, name):
"""Determines if an element to be created should be a dict or list."""
if name in LISTIFY:
list_of_lists = LISTIFY[name]
for tag_list in list_of_lists:
if element_ancestors_match_list(parent_element, tag_list):
return True
return False
def element_to_json(name, element):
if element_must_be_list(element, name):
return element_to_list(element)
else:
return element_to_dict(element)
def root_element_to_json(name, element):
"""Returns a tuple of the root JSON value, plus the links if found."""
if name == "rootEnabled": # Why oh why were we inconsistent here? :'(
if element.text.strip() == "False":
return False, None
elif element.text.strip() == "True":
return True, None
if element_must_be_list(element, name):
return element_to_list(element, True)
else:
return element_to_dict(element), None
def element_to_list(element, check_for_links=False):
"""
For element "foo" in <foos><foo/><foo/></foos>
Returns [{}, {}]
"""
links = None
result = []
for child_element in element:
# The "links" element gets jammed into the root element.
if check_for_links and normalize_tag(child_element) == "links":
links = element_to_list(child_element)
else:
result.append(element_to_dict(child_element))
if check_for_links:
return result, links
else:
return result
def element_to_dict(element):
result = {}
for name, value in element.items():
result[name] = value
for child_element in element:
name = normalize_tag(child_element)
result[name] = element_to_json(name, child_element)
if len(result) == 0 and element.text:
string_value = element.text.strip()
if len(string_value):
if string_value == 'None':
return None
return string_value
return result
def standardize_json_lists(json_dict):
"""
In XML, we might see something like {'instances':{'instances':[...]}},
which we must change to just {'instances':[...]} to be compatable with
the true JSON format.
If any items are dictionaries with only one item which is a list,
simply remove the dictionary and insert its list directly.
"""
found_items = []
for key, value in json_dict.items():
value = json_dict[key]
if isinstance(value, dict):
if len(value) == 1 and isinstance(value.values()[0], list):
found_items.append(key)
else:
standardize_json_lists(value)
for key in found_items:
json_dict[key] = json_dict[key].values()[0]
def normalize_tag(elem):
"""Given an element, returns the tag minus the XMLNS junk.
IOW, .tag may sometimes return the XML namespace at the start of the
string. This gets rids of that.
"""
try:
prefix = "{" + elem.nsmap[None] + "}"
if elem.tag.startswith(prefix):
return elem.tag[len(prefix):]
except KeyError:
pass
return elem.tag
def create_root_xml_element(name, value):
"""Create the first element using a name and a dictionary."""
element = etree.Element(name, nsmap=XML_NS)
if name in REQUEST_AS_LIST:
add_subelements_from_list(element, name, value)
else:
populate_element_from_dict(element, value)
return element
def create_subelement(parent_element, name, value):
"""Attaches a new element onto the parent element."""
if isinstance(value, dict):
create_subelement_from_dict(parent_element, name, value)
elif isinstance(value, list):
create_subelement_from_list(parent_element, name, value)
else:
raise TypeError("Can't handle type %s." % type(value))
def create_subelement_from_dict(parent_element, name, dict):
element = etree.SubElement(parent_element, name)
populate_element_from_dict(element, dict)
def create_subelement_from_list(parent_element, name, list):
element = etree.SubElement(parent_element, name)
add_subelements_from_list(element, name, list)
def add_subelements_from_list(element, name, list):
if name.endswith("s"):
item_name = name[:len(name) - 1]
else:
item_name = name
for item in list:
create_subelement(element, item_name, item)
def populate_element_from_dict(element, dict):
for key, value in dict.items():
if isinstance(value, basestring):
element.set(key, value)
elif isinstance(value, Number):
element.set(key, str(value))
elif isinstance(value, None.__class__):
element.set(key, '')
else:
create_subelement(element, key, value)
def modify_response_types(value, type_translator):
"""
This will convert some string in response dictionary to ints or bool
so that our respose is compatiable with code expecting JSON style responses
"""
if isinstance(value, str):
if value == 'True':
return True
elif value == 'False':
return False
else:
return type_translator(value)
elif isinstance(value, dict):
for k, v in value.iteritems():
if type_translator is not IntDict:
if v.__class__ is dict and v.__len__() == 0:
value[k] = None
elif k in type_translator:
value[k] = modify_response_types(value[k],
type_translator[k])
else:
value[k] = int(value[k])
return value
elif isinstance(value, list):
return [modify_response_types(element, type_translator)
for element in value]
class TroveXmlClient(TroveHTTPClient):
@classmethod
def morph_request(self, kwargs):
kwargs['headers']['Accept'] = 'application/xml'
kwargs['headers']['Content-Type'] = 'application/xml'
if 'body' in kwargs:
body = kwargs['body']
root_name = body.keys()[0]
xml = create_root_xml_element(root_name, body[root_name])
xml_string = etree.tostring(xml, pretty_print=True)
kwargs['body'] = xml_string
@classmethod
def morph_response_body(self, body_string):
# The root XML element always becomes a dictionary with a single
# field, which has the same key as the elements name.
result = {}
try:
root_element = etree.XML(body_string)
except etree.XMLSyntaxError:
raise exceptions.ResponseFormatError()
root_name = normalize_tag(root_element)
root_value, links = root_element_to_json(root_name, root_element)
result = {root_name: root_value}
if links:
result['links'] = links
modify_response_types(result, TYPE_MAP)
return result
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a Toeplitz matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorToeplitz",]
@tf_export("linalg.LinearOperatorToeplitz")
class LinearOperatorToeplitz(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] of toeplitz matrices.
This operator acts like a [batch] Toeplitz matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
#### Description in terms of toeplitz matrices
Toeplitz means that `A` has constant diagonals. Hence, `A` can be generated
with two vectors. One represents the first column of the matrix, and the
other represents the first row.
Below is a 4 x 4 example:
```
A = |a b c d|
|e a b c|
|f e a b|
|g f e a|
```
#### Example of a Toeplitz operator.
```python
# Create a 3 x 3 Toeplitz operator.
col = [1., 2., 3.]
row = [1., 4., -9.]
operator = LinearOperatorToeplitz(col, row)
operator.to_dense()
==> [[1., 4., -9.],
[2., 1., 4.],
[3., 2., 1.]]
operator.shape
==> [3, 3]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.matmul(x)
==> Shape [3, 4] Tensor
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
col,
row,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorToeplitz"):
r"""Initialize a `LinearOperatorToeplitz`.
Args:
col: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The first column of the operator. Allowed dtypes: `float16`, `float32`,
`float64`, `complex64`, `complex128`. Note that the first entry of
`col` is assumed to be the same as the first entry of `row`.
row: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The first row of the operator. Allowed dtypes: `float16`, `float32`,
`float64`, `complex64`, `complex128`. Note that the first entry of
`row` is assumed to be the same as the first entry of `col`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `diag.dtype` is real, this is auto-set to `True`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
"""
with ops.name_scope(name, values=[row, col]):
self._row = ops.convert_to_tensor(row, name="row")
self._col = ops.convert_to_tensor(col, name="col")
self._check_row_col(self._row, self._col)
circulant_col = array_ops.concat(
[self._col,
array_ops.zeros_like(self._col[..., 0:1]),
array_ops.reverse(self._row[..., 1:], axis=[-1])], axis=-1)
# To be used for matmul.
self._circulant = linear_operator_circulant.LinearOperatorCirculant(
fft_ops.fft(_to_complex(circulant_col)),
input_output_dtype=self._row.dtype)
if is_square is False: # pylint:disable=g-bool-id-comparison
raise ValueError("Only square Toeplitz operators currently supported.")
is_square = True
super(LinearOperatorToeplitz, self).__init__(
dtype=self._row.dtype,
graph_parents=[self._row, self._col],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_row_col(self, row, col):
"""Static check of row and column."""
for name, tensor in [["row", row], ["col", col]]:
if tensor.get_shape().ndims is not None and tensor.get_shape().ndims < 1:
raise ValueError("Argument {} must have at least 1 dimension. "
"Found: {}".format(name, tensor))
if row.get_shape()[-1] is not None and col.get_shape()[-1] is not None:
if row.get_shape()[-1] != col.get_shape()[-1]:
raise ValueError(
"Expected square matrix, got row and col with mismatched "
"dimensions.")
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
v_shape = array_ops.broadcast_static_shape(
self.row.shape, self.col.shape)
return v_shape.concatenate(v_shape[-1:])
def _shape_tensor(self):
v_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(self.row),
array_ops.shape(self.col))
k = v_shape[-1]
return array_ops.concat((v_shape, [k]), 0)
def _assert_self_adjoint(self):
return check_ops.assert_equal(
self.row,
self.col,
message=("row and col are not the same, and "
"so this operator is not self-adjoint."))
# TODO(srvasude): Add efficient solver and determinant calculations to this
# class (based on Levinson recursion.)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Given a Toeplitz matrix, we can embed it in a Circulant matrix to perform
# efficient matrix multiplications. Given a Toeplitz matrix with first row
# [t_0, t_1, ... t_{n-1}] and first column [t0, t_{-1}, ..., t_{-(n-1)},
# let C by the circulant matrix with first column [t0, t_{-1}, ...,
# t_{-(n-1)}, 0, t_{n-1}, ..., t_1]. Also adjoin to our input vector `x`
# `n` zeros, to make it a vector of length `2n` (call it y). It can be shown
# that if we take the first n entries of `Cy`, this is equal to the Toeplitz
# multiplication. See:
# http://math.mit.edu/icg/resources/teaching/18.085-spring2015/toeplitz.pdf
# for more details.
x = linalg.adjoint(x) if adjoint_arg else x
expanded_x = array_ops.concat([x, array_ops.zeros_like(x)], axis=-2)
result = self._circulant.matmul(
expanded_x, adjoint=adjoint, adjoint_arg=False)
return math_ops.cast(
result[..., :self.domain_dimension_tensor(), :],
self.dtype)
def _trace(self):
return math_ops.cast(
self.domain_dimension_tensor(),
dtype=self.dtype) * self.col[..., 0]
def _diag_part(self):
diag_entry = self.col[..., 0:1]
return diag_entry * array_ops.ones(
[self.domain_dimension_tensor()], self.dtype)
@property
def col(self):
return self._col
@property
def row(self):
return self._row
def _to_complex(x):
dtype = dtypes.complex64
if x.dtype in [dtypes.float64, dtypes.complex128]:
dtype = dtypes.complex128
return math_ops.cast(x, dtype)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.util import nest
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"SampleEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
"InferenceHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Interface for implementing sampling in seq2seq decoders.
Helper instances are used by `BasicDecoder`.
"""
@abc.abstractproperty
def batch_size(self):
"""Batch size of tensor returned by `sample`.
Returns a scalar int32 tensor.
"""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractproperty
def sample_ids_shape(self):
"""Shape of tensor returned by `sample`, excluding the batch dimension.
Returns a `TensorShape`.
"""
raise NotImplementedError("sample_ids_shape has not been implemented")
@abc.abstractproperty
def sample_ids_dtype(self):
"""DType of tensor returned by `sample`.
Returns a DType.
"""
raise NotImplementedError("sample_ids_dtype has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
sample_ids_shape=None, sample_ids_dtype=None):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
`int32`, the shape of each value in the `sample_ids` batch. Defaults to
a scalar.
sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_ids_shape
@property
def sample_ids_dtype(self):
return self._sample_ids_dtype
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sampler = bernoulli.Bernoulli(
probs=self._sampling_probability, dtype=dtypes.bool)
select_sample = select_sampler.sample(
sample_shape=self.batch_size, seed=self._scheduling_seed)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
gen_array_ops.fill([self.batch_size], -1))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = array_ops.gather_nd(
base_next_inputs, where_not_sampling)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_inputs_fn=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
the next input when sampling. If `None` (default), the RNN outputs will
be used as the next inputs.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
self._next_inputs_fn = next_inputs_fn
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
return sampler.sample(sample_shape=self.batch_size, seed=self._seed)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_inputs_fn is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_inputs_fn(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples),
lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
class SampleEmbeddingHelper(GreedyEmbeddingHelper):
"""A helper for use during inference.
Uses sampling (from a distribution) instead of argmax and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token,
softmax_temperature=None, seed=None):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
softmax_temperature: (Optional) `float32` scalar, value to divide the
logits by before computing the softmax. Larger values (above 1.0) result
in more random samples, while smaller values push the sampling
distribution towards the argmax. Must be strictly greater than 0.
Defaults to 1.0.
seed: (Optional) The sampling seed.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
super(SampleEmbeddingHelper, self).__init__(
embedding, start_tokens, end_token)
self._softmax_temperature = softmax_temperature
self._seed = seed
def sample(self, time, outputs, state, name=None):
"""sample for SampleEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, we sample instead of argmax (greedy).
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
if self._softmax_temperature is None:
logits = outputs
else:
logits = outputs / self._softmax_temperature
sample_id_sampler = categorical.Categorical(logits=logits)
sample_ids = sample_id_sampler.sample(seed=self._seed)
return sample_ids
class InferenceHelper(Helper):
"""A helper to use during inference with a custom sampling function."""
def __init__(self, sample_fn, sample_shape, sample_dtype,
start_inputs, end_fn, next_inputs_fn=None):
"""Initializer.
Args:
sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.
sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,
the shape of the each sample in the batch returned by `sample_fn`.
sample_dtype: the dtype of the sample returned by `sample_fn`.
start_inputs: The initial batch of inputs.
end_fn: A callable that takes `sample_ids` and emits a `bool` vector
shaped `[batch_size]` indicating whether each sample is an end token.
next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns
the next batch of inputs. If not provided, `sample_ids` is used as the
next batch of inputs.
"""
self._sample_fn = sample_fn
self._end_fn = end_fn
self._sample_shape = tensor_shape.TensorShape(sample_shape)
self._sample_dtype = sample_dtype
self._next_inputs_fn = next_inputs_fn
self._batch_size = array_ops.shape(start_inputs)[0]
self._start_inputs = ops.convert_to_tensor(
start_inputs, name="start_inputs")
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_shape
@property
def sample_ids_dtype(self):
return self._sample_dtype
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
del time, state # unused by sample
return self._sample_fn(outputs)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
del time, outputs # unused by next_inputs
if self._next_inputs_fn is None:
next_inputs = sample_ids
else:
next_inputs = self._next_inputs_fn(sample_ids)
finished = self._end_fn(sample_ids)
return (finished, next_inputs, state)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
fixedwidth.py:
Read or write a table with fixed width columns.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu)
"""
from . import core
from .core import InconsistentTableError, DefaultSplitter
from . import basic
class FixedWidthSplitter(core.BaseSplitter):
"""
Split line based on fixed start and end positions for each ``col`` in
``self.cols``.
This class requires that the Header class will have defined ``col.start``
and ``col.end`` for each column. The reference to the ``header.cols`` gets
put in the splitter object by the base Reader.read() function just in time
for splitting data lines by a ``data`` object.
Note that the ``start`` and ``end`` positions are defined in the pythonic
style so line[start:end] is the desired substring for a column. This splitter
class does not have a hook for ``process_lines`` since that is generally not
useful for fixed-width input.
"""
delimiter_pad = ''
bookend = False
delimiter = '|'
def __call__(self, lines):
for line in lines:
vals = [line[x.start:x.end] for x in self.cols]
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals, widths):
pad = self.delimiter_pad or ''
delimiter = self.delimiter or ''
padded_delim = pad + delimiter + pad
if self.bookend:
bookend_left = delimiter + pad
bookend_right = pad + delimiter
else:
bookend_left = ''
bookend_right = ''
vals = [' ' * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class FixedWidthHeaderSplitter(DefaultSplitter):
'''Splitter class that splits on ``|``.'''
delimiter = '|'
class FixedWidthHeader(basic.BasicHeader):
"""
Fixed width table header reader.
"""
splitter_class = FixedWidthHeaderSplitter
""" Splitter class for splitting data lines into columns """
position_line = None # secondary header line position
""" row index of line that specifies position (default = 1) """
set_of_position_line_characters = set(r'`~!#$%^&*-_+=\|":' + "'")
def get_line(self, lines, index):
for i, line in enumerate(self.process_lines(lines)):
if i == index:
break
else: # No header line matching
raise InconsistentTableError('No header line found in table')
return line
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
# See "else" clause below for explanation of start_line and position_line
start_line = core._get_line_index(self.start_line, self.process_lines(lines))
position_line = core._get_line_index(self.position_line, self.process_lines(lines))
# If start_line is none then there is no header line. Column positions are
# determined from first data line and column names are either supplied by user
# or auto-generated.
if start_line is None:
if position_line is not None:
raise ValueError("Cannot set position_line without also setting header_start")
# data.data_lines attribute already set via self.data.get_data_lines(lines)
# in BaseReader.read(). This includes slicing for data_start / data_end.
data_lines = self.data.data_lines
if not data_lines:
raise InconsistentTableError(
'No data lines found so cannot autogenerate column names')
vals, starts, ends = self.get_fixedwidth_params(data_lines[0])
self.names = [self.auto_format.format(i)
for i in range(1, len(vals) + 1)]
else:
# This bit of code handles two cases:
# start_line = <index> and position_line = None
# Single header line where that line is used to determine both the
# column positions and names.
# start_line = <index> and position_line = <index2>
# Two header lines where the first line defines the column names and
# the second line defines the column positions
if position_line is not None:
# Define self.col_starts and self.col_ends so that the call to
# get_fixedwidth_params below will use those to find the header
# column names. Note that get_fixedwidth_params returns Python
# slice col_ends but expects inclusive col_ends on input (for
# more intuitive user interface).
line = self.get_line(lines, position_line)
if len(set(line) - set([self.splitter.delimiter, ' '])) != 1:
raise InconsistentTableError(
'Position line should only contain delimiters and '
'one other character, e.g. "--- ------- ---".')
# The line above lies. It accepts white space as well.
# We don't want to encourage using three different
# characters, because that can cause ambiguities, but white
# spaces are so common everywhere that practicality beats
# purity here.
charset = self.set_of_position_line_characters.union(
set([self.splitter.delimiter, ' ']))
if not set(line).issubset(charset):
raise InconsistentTableError(
f'Characters in position line must be part of {charset}')
vals, self.col_starts, col_ends = self.get_fixedwidth_params(line)
self.col_ends = [x - 1 if x is not None else None for x in col_ends]
# Get the header column names and column positions
line = self.get_line(lines, start_line)
vals, starts, ends = self.get_fixedwidth_params(line)
self.names = vals
self._set_cols_from_names()
# Set column start and end positions.
for i, col in enumerate(self.cols):
col.start = starts[i]
col.end = ends[i]
def get_fixedwidth_params(self, line):
"""
Split ``line`` on the delimiter and determine column values and
column start and end positions. This might include null columns with
zero length (e.g. for ``header row = "| col1 || col2 | col3 |"`` or
``header2_row = "----- ------- -----"``). The null columns are
stripped out. Returns the values between delimiters and the
corresponding start and end positions.
Parameters
----------
line : str
Input line
Returns
-------
vals : list
List of values.
starts : list
List of starting indices.
ends : list
List of ending indices.
"""
# If column positions are already specified then just use those.
# If neither column starts or ends are given, figure out positions
# between delimiters. Otherwise, either the starts or the ends have
# been given, so figure out whichever wasn't given.
if self.col_starts is not None and self.col_ends is not None:
starts = list(self.col_starts) # could be any iterable, e.g. np.array
# user supplies inclusive endpoint
ends = [x + 1 if x is not None else None for x in self.col_ends]
if len(starts) != len(ends):
raise ValueError('Fixed width col_starts and col_ends must have the same length')
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
elif self.col_starts is None and self.col_ends is None:
# There might be a cleaner way to do this but it works...
vals = line.split(self.splitter.delimiter)
starts = [0]
ends = []
for val in vals:
if val:
ends.append(starts[-1] + len(val))
starts.append(ends[-1] + 1)
else:
starts[-1] += 1
starts = starts[:-1]
vals = [x.strip() for x in vals if x]
if len(vals) != len(starts) or len(vals) != len(ends):
raise InconsistentTableError('Error parsing fixed width header')
else:
# exactly one of col_starts or col_ends is given...
if self.col_starts is not None:
starts = list(self.col_starts)
ends = starts[1:] + [None] # Assume each col ends where the next starts
else: # self.col_ends is not None
ends = [x + 1 for x in self.col_ends]
starts = [0] + ends[:-1] # Assume each col starts where the last ended
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
return vals, starts, ends
def write(self, lines):
# Header line not written until data are formatted. Until then it is
# not known how wide each column will be for fixed width.
pass
class FixedWidthData(basic.BasicData):
"""
Base table data reader.
"""
splitter_class = FixedWidthSplitter
""" Splitter class for splitting data lines into columns """
def write(self, lines):
vals_list = []
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
for i, col in enumerate(self.cols):
col.width = max([len(vals[i]) for vals in vals_list])
if self.header.start_line is not None:
col.width = max(col.width, len(col.info.name))
widths = [col.width for col in self.cols]
if self.header.start_line is not None:
lines.append(self.splitter.join([col.info.name for col in self.cols],
widths))
if self.header.position_line is not None:
char = self.header.position_char
if len(char) != 1:
raise ValueError(f'Position_char="{char}" must be a single character')
vals = [char * col.width for col in self.cols]
lines.append(self.splitter.join(vals, widths))
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class FixedWidth(basic.Basic):
"""Fixed width table with single header line defining column names and positions.
Examples::
# Bar delimiter in header and data
| Col1 | Col2 | Col3 |
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Bar delimiter in header only
Col1 | Col2 | Col3
1.2 hello there 3
2.4 many words 7
# No delimiter with column positions specified as input
Col1 Col2Col3
1.2hello there 3
2.4many words 7
See the :ref:`fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width'
_description = 'Fixed width'
header_class = FixedWidthHeader
data_class = FixedWidthData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True):
super().__init__()
self.data.splitter.delimiter_pad = delimiter_pad
self.data.splitter.bookend = bookend
self.header.col_starts = col_starts
self.header.col_ends = col_ends
class FixedWidthNoHeaderHeader(FixedWidthHeader):
'''Header reader for fixed with tables with no header line'''
start_line = None
class FixedWidthNoHeaderData(FixedWidthData):
'''Data reader for fixed width tables with no header line'''
start_line = 0
class FixedWidthNoHeader(FixedWidth):
"""Fixed width table which has no header line.
When reading, column names are either input (``names`` keyword) or
auto-generated. Column positions are determined either by input
(``col_starts`` and ``col_stops`` keywords) or by splitting the first data
line. In the latter case a ``delimiter`` is required to split the data
line.
Examples::
# Bar delimiter in header and data
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Compact table having no delimiter and column positions specified as input
1.2hello there3
2.4many words 7
This class is just a convenience wrapper around the ``FixedWidth`` reader
but with ``header_start=None`` and ``data_start=0``.
See the :ref:`fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width_no_header'
_description = 'Fixed width with no header'
header_class = FixedWidthNoHeaderHeader
data_class = FixedWidthNoHeaderData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True):
super().__init__(col_starts, col_ends, delimiter_pad=delimiter_pad,
bookend=bookend)
class FixedWidthTwoLineHeader(FixedWidthHeader):
'''Header reader for fixed width tables splitting on whitespace.
For fixed width tables with several header lines, there is typically
a white-space delimited format line, so splitting on white space is
needed.
'''
splitter_class = DefaultSplitter
class FixedWidthTwoLineDataSplitter(FixedWidthSplitter):
'''Splitter for fixed width tables splitting on ``' '``.'''
delimiter = ' '
class FixedWidthTwoLineData(FixedWidthData):
'''Data reader for fixed with tables with two header lines.'''
splitter_class = FixedWidthTwoLineDataSplitter
class FixedWidthTwoLine(FixedWidth):
"""Fixed width table which has two header lines.
The first header line defines the column names and the second implicitly
defines the column positions.
Examples::
# Typical case with column extent defined by ---- under column names.
col1 col2 <== header_start = 0
----- ------------ <== position_line = 1, position_char = "-"
1 bee flies <== data_start = 2
2 fish swims
# Pretty-printed table
+------+------------+
| Col1 | Col2 |
+------+------------+
| 1.2 | "hello" |
| 2.4 | there world|
+------+------------+
See the :ref:`fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width_two_line'
_description = 'Fixed width with second header line'
data_class = FixedWidthTwoLineData
header_class = FixedWidthTwoLineHeader
def __init__(self, position_line=1, position_char='-', delimiter_pad=None, bookend=False):
super().__init__(delimiter_pad=delimiter_pad, bookend=bookend)
self.header.position_line = position_line
self.header.position_char = position_char
self.data.start_line = position_line + 1
|
|
"""Metadata read/write support for bup."""
# Copyright (C) 2010 Rob Browning
#
# This code is covered under the terms of the GNU Library General
# Public License as described in the bup LICENSE file.
import errno, os, sys, stat, pwd, grp, struct, re
from cStringIO import StringIO
from bup import vint, xstat
from bup.drecurse import recursive_dirlist
from bup.helpers import add_error, mkdirp, log, is_superuser
from bup.xstat import utime, lutime, lstat
import bup._helpers as _helpers
try:
import xattr
except ImportError:
log('Warning: Linux xattr support missing; install python-pyxattr.\n')
xattr = None
if xattr:
try:
xattr.get_all
except AttributeError:
log('Warning: python-xattr module is too old; '
'install python-pyxattr instead.\n')
xattr = None
try:
import posix1e
except ImportError:
log('Warning: POSIX ACL support missing; install python-pylibacl.\n')
posix1e = None
try:
from bup._helpers import get_linux_file_attr, set_linux_file_attr
except ImportError:
# No need for a warning here; the only reason they won't exist is that we're
# not on Linux, in which case files don't have any linux attrs anyway, so
# lacking the functions isn't a problem.
get_linux_file_attr = set_linux_file_attr = None
# WARNING: the metadata encoding is *not* stable yet. Caveat emptor!
# Q: Consider hardlink support?
# Q: Is it OK to store raw linux attr (chattr) flags?
# Q: Can anything other than S_ISREG(x) or S_ISDIR(x) support posix1e ACLs?
# Q: Is the application of posix1e has_extended() correct?
# Q: Is one global --numeric-ids argument sufficient?
# Q: Do nfsv4 acls trump posix1e acls? (seems likely)
# Q: Add support for crtime -- ntfs, and (only internally?) ext*?
# FIXME: Fix relative/abs path detection/stripping wrt other platforms.
# FIXME: Add nfsv4 acl handling - see nfs4-acl-tools.
# FIXME: Consider other entries mentioned in stat(2) (S_IFDOOR, etc.).
# FIXME: Consider pack('vvvvsss', ...) optimization.
# FIXME: Consider caching users/groups.
## FS notes:
#
# osx (varies between hfs and hfs+):
# type - regular dir char block fifo socket ...
# perms - rwxrwxrwxsgt
# times - ctime atime mtime
# uid
# gid
# hard-link-info (hfs+ only)
# link-target
# device-major/minor
# attributes-osx see chflags
# content-type
# content-creator
# forks
#
# ntfs
# type - regular dir ...
# times - creation, modification, posix change, access
# hard-link-info
# link-target
# attributes - see attrib
# ACLs
# forks (alternate data streams)
# crtime?
#
# fat
# type - regular dir ...
# perms - rwxrwxrwx (maybe - see wikipedia)
# times - creation, modification, access
# attributes - see attrib
verbose = 0
_have_lchmod = hasattr(os, 'lchmod')
def _clean_up_path_for_archive(p):
# Not the most efficient approach.
result = p
# Take everything after any '/../'.
pos = result.rfind('/../')
if pos != -1:
result = result[result.rfind('/../') + 4:]
# Take everything after any remaining '../'.
if result.startswith("../"):
result = result[3:]
# Remove any '/./' sequences.
pos = result.find('/./')
while pos != -1:
result = result[0:pos] + '/' + result[pos + 3:]
pos = result.find('/./')
# Remove any leading '/'s.
result = result.lstrip('/')
# Replace '//' with '/' everywhere.
pos = result.find('//')
while pos != -1:
result = result[0:pos] + '/' + result[pos + 2:]
pos = result.find('//')
# Take everything after any remaining './'.
if result.startswith('./'):
result = result[2:]
# Take everything before any remaining '/.'.
if result.endswith('/.'):
result = result[:-2]
if result == '' or result.endswith('/..'):
result = '.'
return result
def _risky_path(p):
if p.startswith('/'):
return True
if p.find('/../') != -1:
return True
if p.startswith('../'):
return True
if p.endswith('/..'):
return True
return False
def _clean_up_extract_path(p):
result = p.lstrip('/')
if result == '':
return '.'
elif _risky_path(result):
return None
else:
return result
# These tags are currently conceptually private to Metadata, and they
# must be unique, and must *never* be changed.
_rec_tag_end = 0
_rec_tag_path = 1
_rec_tag_common = 2 # times, owner, group, type, perms, etc.
_rec_tag_symlink_target = 3
_rec_tag_posix1e_acl = 4 # getfacl(1), setfacl(1), etc.
_rec_tag_nfsv4_acl = 5 # intended to supplant posix1e acls?
_rec_tag_linux_attr = 6 # lsattr(1) chattr(1)
_rec_tag_linux_xattr = 7 # getfattr(1) setfattr(1)
class ApplyError(Exception):
# Thrown when unable to apply any given bit of metadata to a path.
pass
class Metadata:
# Metadata is stored as a sequence of tagged binary records. Each
# record will have some subset of add, encode, load, create, and
# apply methods, i.e. _add_foo...
## Common records
# Timestamps are (sec, ns), relative to 1970-01-01 00:00:00, ns
# must be non-negative and < 10**9.
def _add_common(self, path, st):
self.mode = st.st_mode
self.uid = st.st_uid
self.gid = st.st_gid
self.rdev = st.st_rdev
self.atime = st.st_atime
self.mtime = st.st_mtime
self.ctime = st.st_ctime
self.owner = self.group = ''
try:
self.owner = pwd.getpwuid(st.st_uid)[0]
except KeyError, e:
add_error("no user name for id %s '%s'" % (st.st_gid, path))
try:
self.group = grp.getgrgid(st.st_gid)[0]
except KeyError, e:
add_error("no group name for id %s '%s'" % (st.st_gid, path))
def _encode_common(self):
atime = xstat.nsecs_to_timespec(self.atime)
mtime = xstat.nsecs_to_timespec(self.mtime)
ctime = xstat.nsecs_to_timespec(self.ctime)
result = vint.pack('VVsVsVvVvVvV',
self.mode,
self.uid,
self.owner,
self.gid,
self.group,
self.rdev,
atime[0],
atime[1],
mtime[0],
mtime[1],
ctime[0],
ctime[1])
return result
def _load_common_rec(self, port):
data = vint.read_bvec(port)
(self.mode,
self.uid,
self.owner,
self.gid,
self.group,
self.rdev,
self.atime,
atime_ns,
self.mtime,
mtime_ns,
self.ctime,
ctime_ns) = vint.unpack('VVsVsVvVvVvV', data)
self.atime = xstat.timespec_to_nsecs((self.atime, atime_ns))
self.mtime = xstat.timespec_to_nsecs((self.mtime, mtime_ns))
self.ctime = xstat.timespec_to_nsecs((self.ctime, ctime_ns))
def _recognized_file_type(self):
return stat.S_ISREG(self.mode) \
or stat.S_ISDIR(self.mode) \
or stat.S_ISCHR(self.mode) \
or stat.S_ISBLK(self.mode) \
or stat.S_ISFIFO(self.mode) \
or stat.S_ISSOCK(self.mode) \
or stat.S_ISLNK(self.mode)
def _create_via_common_rec(self, path, create_symlinks=True):
# If the path already exists and is a dir, try rmdir.
# If the path already exists and is anything else, try unlink.
st = None
try:
st = xstat.lstat(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if st:
if stat.S_ISDIR(st.st_mode):
try:
os.rmdir(path)
except OSError, e:
if e.errno == errno.ENOTEMPTY:
msg = 'refusing to overwrite non-empty dir' + path
raise Exception(msg)
raise
else:
os.unlink(path)
if stat.S_ISREG(self.mode):
assert(self._recognized_file_type())
fd = os.open(path, os.O_CREAT|os.O_WRONLY|os.O_EXCL, 0600)
os.close(fd)
elif stat.S_ISDIR(self.mode):
assert(self._recognized_file_type())
os.mkdir(path, 0700)
elif stat.S_ISCHR(self.mode):
assert(self._recognized_file_type())
os.mknod(path, 0600 | stat.S_IFCHR, self.rdev)
elif stat.S_ISBLK(self.mode):
assert(self._recognized_file_type())
os.mknod(path, 0600 | stat.S_IFBLK, self.rdev)
elif stat.S_ISFIFO(self.mode):
assert(self._recognized_file_type())
os.mknod(path, 0600 | stat.S_IFIFO)
elif stat.S_ISSOCK(self.mode):
os.mknod(path, 0600 | stat.S_IFSOCK)
elif stat.S_ISLNK(self.mode):
assert(self._recognized_file_type())
if self.symlink_target and create_symlinks:
# on MacOS, symlink() permissions depend on umask, and there's
# no way to chown a symlink after creating it, so we have to
# be careful here!
oldumask = os.umask((self.mode & 0777) ^ 0777)
try:
os.symlink(self.symlink_target, path)
finally:
os.umask(oldumask)
# FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
else:
assert(not self._recognized_file_type())
add_error('not creating "%s" with unrecognized mode "0x%x"\n'
% (path, self.mode))
def _apply_common_rec(self, path, restore_numeric_ids=False):
# FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
# EACCES errors at this stage are fatal for the current path.
if lutime and stat.S_ISLNK(self.mode):
try:
lutime(path, (self.atime, self.mtime))
except OSError, e:
if e.errno == errno.EACCES:
raise ApplyError('lutime: %s' % e)
else:
raise
else:
try:
utime(path, (self.atime, self.mtime))
except OSError, e:
if e.errno == errno.EACCES:
raise ApplyError('utime: %s' % e)
else:
raise
# Don't try to restore owner unless we're root, and even
# if asked, don't try to restore the owner or group if
# it doesn't exist in the system db.
uid = self.uid
gid = self.gid
if not restore_numeric_ids:
if not self.owner:
uid = -1
add_error('ignoring missing owner for "%s"\n' % path)
else:
if not is_superuser():
uid = -1 # Not root; assume we can't change owner.
else:
try:
uid = pwd.getpwnam(self.owner)[2]
except KeyError:
uid = -1
fmt = 'ignoring unknown owner %s for "%s"\n'
add_error(fmt % (self.owner, path))
if not self.group:
gid = -1
add_error('ignoring missing group for "%s"\n' % path)
else:
try:
gid = grp.getgrnam(self.group)[2]
except KeyError:
gid = -1
add_error('ignoring unknown group %s for "%s"\n'
% (self.group, path))
try:
os.lchown(path, uid, gid)
except OSError, e:
if e.errno == errno.EPERM:
add_error('lchown: %s' % e)
else:
raise
if _have_lchmod:
os.lchmod(path, stat.S_IMODE(self.mode))
elif not stat.S_ISLNK(self.mode):
os.chmod(path, stat.S_IMODE(self.mode))
## Path records
def _encode_path(self):
if self.path:
return vint.pack('s', self.path)
else:
return None
def _load_path_rec(self, port):
self.path = vint.unpack('s', vint.read_bvec(port))[0]
## Symlink targets
def _add_symlink_target(self, path, st):
try:
if stat.S_ISLNK(st.st_mode):
self.symlink_target = os.readlink(path)
except OSError, e:
add_error('readlink: %s', e)
def _encode_symlink_target(self):
return self.symlink_target
def _load_symlink_target_rec(self, port):
self.symlink_target = vint.read_bvec(port)
## POSIX1e ACL records
# Recorded as a list:
# [txt_id_acl, num_id_acl]
# or, if a directory:
# [txt_id_acl, num_id_acl, txt_id_default_acl, num_id_default_acl]
# The numeric/text distinction only matters when reading/restoring
# a stored record.
def _add_posix1e_acl(self, path, st):
if not posix1e: return
if not stat.S_ISLNK(st.st_mode):
try:
if posix1e.has_extended(path):
acl = posix1e.ACL(file=path)
self.posix1e_acl = [acl, acl] # txt and num are the same
if stat.S_ISDIR(st.st_mode):
acl = posix1e.ACL(filedef=path)
self.posix1e_acl.extend([acl, acl])
except EnvironmentError, e:
if e.errno != errno.EOPNOTSUPP:
raise
def _encode_posix1e_acl(self):
# Encode as two strings (w/default ACL string possibly empty).
if self.posix1e_acl:
acls = self.posix1e_acl
txt_flags = posix1e.TEXT_ABBREVIATE
num_flags = posix1e.TEXT_ABBREVIATE | posix1e.TEXT_NUMERIC_IDS
acl_reps = [acls[0].to_any_text('', '\n', txt_flags),
acls[1].to_any_text('', '\n', num_flags)]
if len(acls) < 3:
acl_reps += ['', '']
else:
acl_reps.append(acls[2].to_any_text('', '\n', txt_flags))
acl_reps.append(acls[3].to_any_text('', '\n', num_flags))
return vint.pack('ssss',
acl_reps[0], acl_reps[1], acl_reps[2], acl_reps[3])
else:
return None
def _load_posix1e_acl_rec(self, port):
data = vint.read_bvec(port)
acl_reps = vint.unpack('ssss', data)
if acl_reps[2] == '':
acl_reps = acl_reps[:2]
self.posix1e_acl = [posix1e.ACL(text=x) for x in acl_reps]
def _apply_posix1e_acl_rec(self, path, restore_numeric_ids=False):
if not posix1e:
if self.posix1e_acl:
add_error("%s: can't restore ACLs; posix1e support missing.\n"
% path)
return
if self.posix1e_acl:
acls = self.posix1e_acl
if len(acls) > 2:
if restore_numeric_ids:
acls[3].applyto(path, posix1e.ACL_TYPE_DEFAULT)
else:
acls[2].applyto(path, posix1e.ACL_TYPE_DEFAULT)
if restore_numeric_ids:
acls[1].applyto(path, posix1e.ACL_TYPE_ACCESS)
else:
acls[0].applyto(path, posix1e.ACL_TYPE_ACCESS)
## Linux attributes (lsattr(1), chattr(1))
def _add_linux_attr(self, path, st):
if not get_linux_file_attr: return
if stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode):
try:
attr = get_linux_file_attr(path)
if attr != 0:
self.linux_attr = attr
except OSError, e:
if e.errno == errno.EACCES:
add_error('read Linux attr: %s' % e)
elif e.errno == errno.ENOTTY: # Inappropriate ioctl for device.
add_error('read Linux attr: %s' % e)
else:
raise
def _encode_linux_attr(self):
if self.linux_attr:
return vint.pack('V', self.linux_attr)
else:
return None
def _load_linux_attr_rec(self, port):
data = vint.read_bvec(port)
self.linux_attr = vint.unpack('V', data)[0]
def _apply_linux_attr_rec(self, path, restore_numeric_ids=False):
if self.linux_attr:
if not set_linux_file_attr:
add_error("%s: can't restore linuxattrs: "
"linuxattr support missing.\n" % path)
return
set_linux_file_attr(path, self.linux_attr)
## Linux extended attributes (getfattr(1), setfattr(1))
def _add_linux_xattr(self, path, st):
if not xattr: return
try:
self.linux_xattr = xattr.get_all(path, nofollow=True)
except EnvironmentError, e:
if e.errno != errno.EOPNOTSUPP:
raise
def _encode_linux_xattr(self):
if self.linux_xattr:
result = vint.pack('V', len(self.linux_xattr))
for name, value in self.linux_xattr:
result += vint.pack('ss', name, value)
return result
else:
return None
def _load_linux_xattr_rec(self, file):
data = vint.read_bvec(file)
memfile = StringIO(data)
result = []
for i in range(vint.read_vuint(memfile)):
key = vint.read_bvec(memfile)
value = vint.read_bvec(memfile)
result.append((key, value))
self.linux_xattr = result
def _apply_linux_xattr_rec(self, path, restore_numeric_ids=False):
if not xattr:
if self.linux_xattr:
add_error("%s: can't restore xattr; xattr support missing.\n"
% path)
return
existing_xattrs = set(xattr.list(path, nofollow=True))
if self.linux_xattr:
for k, v in self.linux_xattr:
if k not in existing_xattrs \
or v != xattr.get(path, k, nofollow=True):
try:
xattr.set(path, k, v, nofollow=True)
except IOError, e:
if e.errno == errno.EPERM:
raise ApplyError('xattr.set: %s' % e)
else:
raise
existing_xattrs -= frozenset([k])
for k in existing_xattrs:
try:
xattr.remove(path, k, nofollow=True)
except IOError, e:
if e.errno == errno.EPERM:
raise ApplyError('xattr.remove: %s' % e)
else:
raise
def __init__(self):
# optional members
self.path = None
self.symlink_target = None
self.linux_attr = None
self.linux_xattr = None
self.posix1e_acl = None
self.posix1e_acl_default = None
def write(self, port, include_path=True):
records = include_path and [(_rec_tag_path, self._encode_path())] or []
records.extend([(_rec_tag_common, self._encode_common()),
(_rec_tag_symlink_target, self._encode_symlink_target()),
(_rec_tag_posix1e_acl, self._encode_posix1e_acl()),
(_rec_tag_linux_attr, self._encode_linux_attr()),
(_rec_tag_linux_xattr, self._encode_linux_xattr())])
for tag, data in records:
if data:
vint.write_vuint(port, tag)
vint.write_bvec(port, data)
vint.write_vuint(port, _rec_tag_end)
@staticmethod
def read(port):
# This method should either: return a valid Metadata object;
# throw EOFError if there was nothing at all to read; throw an
# Exception if a valid object could not be read completely.
tag = vint.read_vuint(port)
try: # From here on, EOF is an error.
result = Metadata()
while True: # only exit is error (exception) or _rec_tag_end
if tag == _rec_tag_path:
result._load_path_rec(port)
elif tag == _rec_tag_common:
result._load_common_rec(port)
elif tag == _rec_tag_symlink_target:
result._load_symlink_target_rec(port)
elif tag == _rec_tag_posix1e_acl:
result._load_posix1e_acl_rec(port)
elif tag ==_rec_tag_nfsv4_acl:
result._load_nfsv4_acl_rec(port)
elif tag == _rec_tag_linux_attr:
result._load_linux_attr_rec(port)
elif tag == _rec_tag_linux_xattr:
result._load_linux_xattr_rec(port)
elif tag == _rec_tag_end:
return result
else: # unknown record
vint.skip_bvec(port)
tag = vint.read_vuint(port)
except EOFError:
raise Exception("EOF while reading Metadata")
def isdir(self):
return stat.S_ISDIR(self.mode)
def create_path(self, path, create_symlinks=True):
self._create_via_common_rec(path, create_symlinks=create_symlinks)
def apply_to_path(self, path=None, restore_numeric_ids=False):
# apply metadata to path -- file must exist
if not path:
path = self.path
if not path:
raise Exception('Metadata.apply_to_path() called with no path');
if not self._recognized_file_type():
add_error('not applying metadata to "%s"' % path
+ ' with unrecognized mode "0x%x"\n' % self.mode)
return
num_ids = restore_numeric_ids
try:
self._apply_common_rec(path, restore_numeric_ids=num_ids)
self._apply_posix1e_acl_rec(path, restore_numeric_ids=num_ids)
self._apply_linux_attr_rec(path, restore_numeric_ids=num_ids)
self._apply_linux_xattr_rec(path, restore_numeric_ids=num_ids)
except ApplyError, e:
add_error(e)
def from_path(path, statinfo=None, archive_path=None, save_symlinks=True):
result = Metadata()
result.path = archive_path
st = statinfo or xstat.lstat(path)
result._add_common(path, st)
if save_symlinks:
result._add_symlink_target(path, st)
result._add_posix1e_acl(path, st)
result._add_linux_attr(path, st)
result._add_linux_xattr(path, st)
return result
def save_tree(output_file, paths,
recurse=False,
write_paths=True,
save_symlinks=True,
xdev=False):
# Issue top-level rewrite warnings.
for path in paths:
safe_path = _clean_up_path_for_archive(path)
if safe_path != path:
log('archiving "%s" as "%s"\n' % (path, safe_path))
start_dir = os.getcwd()
try:
for (p, st) in recursive_dirlist(paths, xdev=xdev):
dirlist_dir = os.getcwd()
os.chdir(start_dir)
safe_path = _clean_up_path_for_archive(p)
m = from_path(p, statinfo=st, archive_path=safe_path,
save_symlinks=save_symlinks)
if verbose:
print >> sys.stderr, m.path
m.write(output_file, include_path=write_paths)
os.chdir(dirlist_dir)
finally:
os.chdir(start_dir)
def _set_up_path(meta, create_symlinks=True):
# Allow directories to exist as a special case -- might have
# been created by an earlier longer path.
if meta.isdir():
mkdirp(meta.path)
else:
parent = os.path.dirname(meta.path)
if parent:
mkdirp(parent)
meta.create_path(meta.path, create_symlinks=create_symlinks)
class _ArchiveIterator:
def next(self):
try:
return Metadata.read(self._file)
except EOFError:
raise StopIteration()
def __iter__(self):
return self
def __init__(self, file):
self._file = file
def display_archive(file):
for meta in _ArchiveIterator(file):
if verbose:
print meta.path # FIXME
else:
print meta.path
def start_extract(file, create_symlinks=True):
for meta in _ArchiveIterator(file):
if verbose:
print >> sys.stderr, meta.path
xpath = _clean_up_extract_path(meta.path)
if not xpath:
add_error(Exception('skipping risky path "%s"' % meta.path))
else:
meta.path = xpath
_set_up_path(meta, create_symlinks=create_symlinks)
def finish_extract(file, restore_numeric_ids=False):
all_dirs = []
for meta in _ArchiveIterator(file):
xpath = _clean_up_extract_path(meta.path)
if not xpath:
add_error(Exception('skipping risky path "%s"' % dir.path))
else:
if os.path.isdir(meta.path):
all_dirs.append(meta)
else:
if verbose:
print >> sys.stderr, meta.path
meta.apply_to_path(path=xpath,
restore_numeric_ids=restore_numeric_ids)
all_dirs.sort(key = lambda x : len(x.path), reverse=True)
for dir in all_dirs:
# Don't need to check xpath -- won't be in all_dirs if not OK.
xpath = _clean_up_extract_path(dir.path)
if verbose:
print >> sys.stderr, dir.path
dir.apply_to_path(path=xpath, restore_numeric_ids=restore_numeric_ids)
def extract(file, restore_numeric_ids=False, create_symlinks=True):
# For now, just store all the directories and handle them last,
# longest first.
all_dirs = []
for meta in _ArchiveIterator(file):
xpath = _clean_up_extract_path(meta.path)
if not xpath:
add_error(Exception('skipping risky path "%s"' % meta.path))
else:
meta.path = xpath
if verbose:
print >> sys.stderr, '+', meta.path
_set_up_path(meta, create_symlinks=create_symlinks)
if os.path.isdir(meta.path):
all_dirs.append(meta)
else:
if verbose:
print >> sys.stderr, '=', meta.path
meta.apply_to_path(restore_numeric_ids=restore_numeric_ids)
all_dirs.sort(key = lambda x : len(x.path), reverse=True)
for dir in all_dirs:
# Don't need to check xpath -- won't be in all_dirs if not OK.
xpath = _clean_up_extract_path(dir.path)
if verbose:
print >> sys.stderr, '=', xpath
# Shouldn't have to check for risky paths here (omitted above).
dir.apply_to_path(path=dir.path,
restore_numeric_ids=restore_numeric_ids)
|
|
# testing/requirements.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exclusions.
"""
from . import exclusions
from .. import util
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled or
self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a
SELECT.
"""
return exclusions.open()
@property
def bound_limit_offset(self):
"""target database can render LIMIT and/or OFFSET using a bound
parameter
"""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert or
config.db.dialect.supports_default_values,
"empty inserts not supported"
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"%(database)s %(does_support)s 'returning'"
)
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names."
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts."
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences
], "no sequence support")
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences and
config.db.dialect.sequences_optional
], "no sequence support, or sequences not optional")
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW definition.
"""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def temp_table_reflection(self):
return exclusions.open()
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return exclusions.closed()
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return exclusions.open()
@property
def temporary_views(self):
"""target database supports temporary views"""
return exclusions.closed()
@property
def index_reflection(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def duplicate_key_raises_integrity_error(self):
"""target dialect raises IntegrityError when reporting an INSERT
with a primary key violation. (hint: it should)
"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol
names.
"""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE where the same table is present in a
subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this::
select data as foo from test order by foo || 'bar'
Lots of databases including Postgresql don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at
all.
"""
return exclusions.open()
@property
def graceful_disconnects(self):
"""Target driver must raise a DBAPI-level exception, such as
InterfaceError, when the underlying connection has been closed
and the execute() method is called.
"""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
DBs that scale poorly for many connections, even when closed, i.e.
Oracle, may use the "--low-connections" option which flags this
requirement as not present.
"""
return exclusions.skip_if(
lambda config: config.options.low_connections)
@property
def timing_intensive(self):
return exclusions.requires_tag("timing_intensive")
@property
def memory_intensive(self):
return exclusions.requires_tag("memory_intensive")
@property
def threading_with_mock(self):
"""Mark tests that use threading and mock at the same time - stability
issues have been observed with coverage + python 3.3
"""
return exclusions.skip_if(
lambda config: util.py3k and config.options.has_coverage,
"Stability issues with coverage + py3k"
)
@property
def no_coverage(self):
"""Test should be skipped if coverage is enabled.
This is to block tests that exercise libraries that seem to be
sensitive to coverage, such as Postgresql notice logging.
"""
return exclusions.skip_if(
lambda config: config.options.has_coverage,
"Issues observed when coverage is enabled"
)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not self._has_cextensions(), "C extensions not installed"
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine('sqlite://')
return True
except ImportError:
return False
def _has_cextensions(self):
try:
from sqlalchemy import cresultproxy, cprocessors
return True
except ImportError:
return False
|
|
#
# flightlines_to_DTM_and_DSM.py
#
# (c) 2014, martin isenburg - http://rapidlasso.com
# rapidlasso GmbH - fast tools to catch reality
#
# This LAStools pipeline classifies very large LAS
# or LAZ files by operating with a tile-based multi-
# core pipeline. The input file is first tiled using
# lastile with the specified tile size. The specified
# buffer is used to avoid edge artifacts. All tiles
# are then ground classified using lasground marking
# points as ground (class 2) and non-gound (class 1).
# Next the height of all points above the ground is
# computed using lasheight with an optional removal
# of points above a specified height. Then buildings
# and vegetation are classified using lasclassify.
# Finally the processed tiles are rejoined back into
# a single file with points in their original order
# and all temporary files are deleted.
#
# LiDAR input: LAS/LAZ/BIN/TXT/SHP/BIL/ASC/DTM
# LiDAR output: LAS/LAZ/BIN/TXT
#
# for licensing see http://lastools.org/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting flightlines_to_DTM_and_DSM ...")
### define positions of arguments in argv array
arg_input_folder = 1
arg_tile_size = 2
arg_buffer = 3
arg_terrain_type = 4
arg_step = 5
arg_cores = 6
arg_empty_temp_dir = 7
arg_output_dir = 8
arg_output_base_name = 9
arg_output_rformat = 10
arg_output_pformat = 11
arg_verbose = 12
arg_count_needed = 13
### get number of arguments
argc = len(sys.argv)
### make sure we have right number of arguments
if argc != arg_count_needed:
gp.AddMessage("Error. Wrong number of arguments. Got " + str(argc) + " expected " + str(arg_count_needed))
sys.exit(1)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get selected arguments
empty_temp_dir = sys.argv[arg_empty_temp_dir]
output_base_name = sys.argv[arg_output_base_name]
### get the path to LAStools
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))
### make sure the path does not contain spaces
if lastools_path.count(" ") > 0:
gp.AddMessage("Error. Path to .\\lastools installation contains spaces.")
gp.AddMessage("This does not work: " + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
### make sure the path does not contain open or closing brackets
if (lastools_path.count("(") > 0) or (lastools_path.count(")") > 0):
gp.AddMessage("Error. Path to .\\lastools installation contains brackets.")
gp.AddMessage("This does not work: " + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
### complete the path to where the LAStools executables are
lastools_path = lastools_path + "\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\\lastools\\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lastile executable
lastile_path = lastools_path+"\\lastile.exe"
### check if the lastile executable exists
if os.path.exists(lastile_path) == False:
gp.AddMessage("Cannot find lastile.exe at " + lastile_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastile_path + " ...")
### create the full path to the lasground executable
lasground_path = lastools_path+"\\lasground.exe"
### check if the lasground executable exists
if os.path.exists(lasground_path) == False:
gp.AddMessage("Cannot find lasground.exe at " + lasground_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasground_path + " ...")
### create the full path to the las2dem executable
las2dem_path = lastools_path+"\\las2dem.exe"
### check if the las2dem executable exists
if os.path.exists(las2dem_path) == False:
gp.AddMessage("Cannot find las2dem.exe at " + las2dem_path)
sys.exit(1)
else:
gp.AddMessage("Found " + las2dem_path + " ...")
### check if the empty temp directory exists
if os.path.exists(empty_temp_dir) == False:
gp.AddMessage("Cannot find empty temp dir " + empty_temp_dir)
sys.exit(1)
else:
gp.AddMessage("Found " + empty_temp_dir + " ...")
### make sure the empty temp directory is emtpy
if os.listdir(empty_temp_dir) != []:
gp.AddMessage("Empty temp directory '" + empty_temp_dir + "' is not empty")
sys.exit(1)
else:
gp.AddMessage("And it's empty ...")
###################################################
### first step: tile folder of flightlines
###################################################
### create the command string for lastile.exe
command = ['"'+lastile_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+sys.argv[arg_input_folder]+'\\*.las"')
command.append("-i")
command.append('"'+sys.argv[arg_input_folder]+'\\*.laz"')
### they are flight lines
command.append("-files_are_flightlines")
### maybe use a user-defined tile size
if sys.argv[arg_tile_size] != "1000":
command.append("-tile_size")
command.append(sys.argv[arg_tile_size].replace(",","."))
### maybe create a buffer around the tiles
if sys.argv[arg_buffer] != "0":
command.append("-buffer")
command.append(sys.argv[arg_buffer].replace(",","."))
### an empty temp directory must have been selected
if empty_temp_dir != "#":
command.append("-odir")
command.append('"'+empty_temp_dir+'"')
else:
gp.AddMessage("Error. no empty temp directory was specified.")
sys.exit(1)
### use default if base name not given
if output_base_name == "#":
output_base_name = "tile"
### give tiles a base name
command.append("-o")
command.append('"' + output_base_name + '.laz"')
### store temporary tiles in compressed format
command.append("-olaz")
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lastile
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_DTM_and_DSM failed in lastile step.")
sys.exit(1)
### report success
gp.AddMessage("lastile step done.")
###################################################
### second step: ground classify each tile
###################################################
### create the command string for lasground.exe
command = ['"'+lasground_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"' + empty_temp_dir + "\\" + output_base_name + "*.laz" + '"')
### what type of terrain do we have
if sys.argv[arg_terrain_type] == "wilderness":
command.append("-wilderness")
elif sys.argv[arg_terrain_type] == "city or warehouses":
command.append("-city")
command.append("-extra_fine")
elif sys.argv[arg_terrain_type] == "towns or flats":
command.append("-town")
command.append("-fine")
elif sys.argv[arg_terrain_type] == "metropolis":
command.append("-metro")
command.append("-ultra_fine")
### give ground-classified tiles a meaningful appendix
command.append("-odix")
command.append("_g")
### store ground-classified tiles in compressed format
command.append("-olaz")
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasground
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_DTM_and_DSM failed in lasground step.")
sys.exit(1)
### report success
gp.AddMessage("lasground step done.")
###################################################
### third step: raster the DTMs
###################################################
### create the command string for las2dem.exe
command = ['"'+las2dem_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"' + empty_temp_dir + "\\" + output_base_name + "*_g.laz" + '"')
### filter ground points
command.append("-keep_class")
command.append("2")
### do an extra pass for smaller memory footprints
command.append("-extra_pass")
### raster tile with requested step
command.append("-step")
command.append(sys.argv[arg_step].replace(",","."))
### raster only tile interiors
command.append("-use_tile_bb")
### store rastered tiles in requested directory
command.append("-odir")
command.append(sys.argv[arg_output_dir])
### give the tiles a meaningful appendix
command.append("-ocut")
command.append("2")
command.append("-odix")
command.append("_dtm")
### store rastered tiles in requested format
command.append("-o" + sys.argv[arg_output_rformat])
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of las2dem
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_DTM_and_DSM failed in las2dem (DTM) step.")
sys.exit(1)
### report success
gp.AddMessage("las2dem (DTM) step done.")
###################################################
### fourth step: raster the DSMs
###################################################
### create the command string for las2dem.exe
command = ['"'+las2dem_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"' + empty_temp_dir + "\\" + output_base_name + "*_g.laz" + '"')
### filter first returns
command.append("-first_only")
### do an extra pass for smaller memory footprints
command.append("-extra_pass")
### raster tile with requested step
command.append("-step")
command.append(sys.argv[arg_step].replace(",","."))
### raster only tile interiors
command.append("-use_tile_bb")
### store rastered tiles in requested directory
command.append("-odir")
command.append(sys.argv[arg_output_dir])
### give the tiles a meaningful appendix
command.append("-ocut")
command.append("2")
command.append("-odix")
command.append("_dsm")
### store rastered tiles in requested format
command.append("-o" + sys.argv[arg_output_rformat])
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of las2dem
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_DTM_and_DSM failed in las2dem (DSM) step.")
sys.exit(1)
### report success
gp.AddMessage("las2dem (DSM) step done.")
###################################################
### fifth step: remove buffers from tiles
###################################################
### create the command string for lastile.exe
command = ['"'+lastile_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"' + empty_temp_dir + "\\" + output_base_name + "*_g.laz" + '"')
### use mode remove buffer
command.append("-remove_buffer")
### store rastered tiles in requested directory
command.append("-odir")
command.append(sys.argv[arg_output_dir])
### give the tiles a meaningful appendix
command.append("-ocut")
command.append("2")
### store classified tiles in requested format
command.append("-o" + sys.argv[arg_output_pformat])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lastile (remove)
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_DTM_and_DSM failed in lastile (remove) step.")
sys.exit(1)
### report success
gp.AddMessage("lastile (remove) step done.")
###################################################
### final step: clean-up all temporary files
###################################################
### create the command string for clean-up
command = ["del"]
### add temporary files wildcard
command.append('"' + empty_temp_dir + "\\" + output_base_name + "*.laz" + '"')
### report command string
gp.AddMessage("clean-up command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of clean-up
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_DTM_and_DSM failed in clean-up step.")
sys.exit(1)
### report success
gp.AddMessage("clean-up step done.")
### report happy end
gp.AddMessage("Success. flightlines_to_DTM_and_DSM done.")
|
|
#!/usr/bin/env python
'''
Copyright 2012 the original author or authors.
See the NOTICE file distributed with this work for additional
information regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import rospilot
import rospy
import rospilot.msg
import rospilot.srv
from serial.serialutil import SerialException
from pymavlink import mavutil
from geometry_msgs.msg import Vector3
from optparse import OptionParser
from time import time
from glob import glob
class MavlinkNode:
def __init__(self, device, baudrate, export_host, allow_control):
self.export_conn = None
self.rate = 10
if export_host:
self.export_conn = mavutil.mavlink_connection(
"udp:" + export_host, input=False)
if device == "auto":
candidates = glob("/dev/ardupilot_*")
if len(candidates) != 1:
raise SerialException("Cannot find Ardupilot device")
device = candidates[0]
baudrate = int(device.split("_")[1])
self.conn = mavutil.mavlink_connection(device, baud=baudrate)
self.pub_battery = rospy.Publisher('battery', rospilot.msg.Battery, queue_size=1)
self.pub_attitude = rospy.Publisher('attitude', rospilot.msg.Attitude, queue_size=1)
self.pub_rcstate = rospy.Publisher('rcstate', rospilot.msg.RCState, queue_size=1)
self.pub_gpsraw = rospy.Publisher('gpsraw', rospilot.msg.GPSRaw, queue_size=1)
self.pub_imuraw = rospy.Publisher('imuraw', rospilot.msg.IMURaw, queue_size=1)
self.pub_basic_status = rospy.Publisher('basic_status',
rospilot.msg.BasicStatus, queue_size=1)
self.pub_waypoints = rospy.Publisher('waypoints',
rospilot.msg.Waypoints, queue_size=1)
rospy.Subscriber("set_rc", rospilot.msg.RCState,
self.handle_set_rc)
rospy.Service('set_waypoints',
rospilot.srv.SetWaypoints,
self.handle_set_waypoints)
rospy.Service('set_mode',
rospilot.srv.SetBasicMode,
self.handle_set_mode)
self.allow_control = allow_control.lower() in ["true", "1"]
self.enable_control = False
# Safety, in case radio has control enabled on start-up
self.enable_control_has_been_false = False
# Waypoints are read and written using a stateful API
# this buffer stores the queued writes/partial reads
self.waypoint_buffer = []
self.num_waypoints = 0
self.waypoint_read_in_progress = False
self.waypoint_write_in_progress = False
self.last_waypoint_message_time = 0
self.last_vibration_message_time = 0
self.vibration_vector = Vector3(0, 0, 0)
self.accelerometer_clipping_counts = [0, 0, 0]
def reset_rc_override(self):
# Send 0 to reset the channel
self.conn.mav.rc_channels_override_send(
self.conn.target_system, self.conn.target_component,
0, 0, 0, 0, 0, 0, 0, 0)
def handle_set_waypoints(self, message):
if self.waypoint_read_in_progress or self.waypoint_write_in_progress:
rospy.logwarn("Can't write waypoints because a read/write is already in progress")
return rospilot.srv.SetWaypointsResponse()
if message.waypoints:
self.waypoint_write_in_progress = True
# XXX: APM seems to overwrite index 0, so insert the first waypoint
# twice
self.waypoint_buffer = [message.waypoints[0]] + message.waypoints
self.last_waypoint_message_time = time()
self.conn.mav.mission_count_send(
self.conn.target_system,
self.conn.target_component,
len(self.waypoint_buffer))
return rospilot.srv.SetWaypointsResponse()
def handle_set_rc(self, message):
if self.allow_control and self.enable_control and \
self.enable_control_has_been_false:
# channel 8 is ignored, since that's the enable control channel
self.conn.mav.rc_channels_override_send(
self.conn.target_system, self.conn.target_component,
message.channel[0], message.channel[1],
message.channel[2], message.channel[3],
message.channel[4], message.channel[5],
message.channel[6], 0)
def handle_set_mode(self, data):
# XXX: This code should work,
# but the APM doesn't seem to listen to set_mode messages :(
# See MAV_MODE_FLAG in pymavlink.mavlinkv10
# self.conn.mav.set_mode_send(self.conn.target_system,
# 209 if data.armed else 81, 0)
# So instead we fake the tranmitter signals
self.conn.mav.rc_channels_override_send(
self.conn.target_system,
self.conn.target_component, 0, 0,
1000, # throttle to zero
# yaw full right to arm, left to disarm
2000 if data.armed else 1000,
0, 0, 0, 0)
rospy.sleep(5)
self.conn.mav.rc_channels_override_send(
self.conn.target_system,
self.conn.target_component, 0, 0, 0, 0, 0, 0, 0, 0)
return rospilot.srv.SetBasicModeResponse()
def request_waypoints(self):
if self.waypoint_read_in_progress or self.waypoint_write_in_progress:
return
self.last_waypoint_message_time = time()
self.conn.mav.mission_request_list_send(
self.conn.target_system,
self.conn.target_component)
self.waypoint_read_in_progress = True
def run(self):
rospy.loginfo("Waiting for heartbeat")
try:
while not self.conn.wait_heartbeat(blocking=False) and not rospy.is_shutdown():
pass
if rospy.is_shutdown():
return
except SerialException as e:
# Ignore since we're shutting down
return
rospy.loginfo("Got heartbeat. Waiting 10secs for APM to be ready")
rospy.sleep(10)
self.conn.mav.request_data_stream_send(
self.conn.target_system,
self.conn.target_component, mavutil.mavlink.MAV_DATA_STREAM_ALL,
self.rate, 1)
# Send request to read waypoints
self.request_waypoints()
last_waypoint_read = time()
while not rospy.is_shutdown():
rospy.sleep(0.001)
msg = self.conn.recv_match(blocking=True)
if time() - self.last_waypoint_message_time > 5:
self.waypoint_read_in_progress = False
self.waypoint_write_in_progress = False
if time() - last_waypoint_read > 10:
last_waypoint_read = time()
self.request_waypoints()
if not msg:
continue
msg_type = msg.get_type()
if msg_type == "BAD_DATA":
rospy.logwarn("Got bad data")
continue
if self.export_conn:
self.export_conn.mav.send(msg)
if msg_type == "ATTITUDE":
self.pub_attitude.publish(
msg.roll, msg.pitch, msg.yaw,
msg.rollspeed, msg.pitchspeed, msg.yawspeed)
elif msg_type == "RC_CHANNELS_RAW":
self.pub_rcstate.publish([
msg.chan1_raw, msg.chan2_raw,
msg.chan3_raw, msg.chan4_raw, msg.chan5_raw, msg.chan6_raw,
msg.chan7_raw, msg.chan8_raw])
self.enable_control = msg.chan8_raw > 1700
if not self.enable_control:
self.enable_control_has_been_false = True
self.reset_rc_override()
elif msg_type == "RC_CHANNELS_SCALED":
pass
elif msg_type == "HEARTBEAT":
self.pub_basic_status.publish(
msg.base_mode & mavutil.mavlink.MAV_MODE_FLAG_SAFETY_ARMED,
mavutil.mode_string_v10(msg))
elif msg_type == "GPS_RAW_INT":
self.pub_gpsraw.publish(
msg.time_usec, msg.fix_type,
msg.lat / float(10 * 1000 * 1000),
msg.lon / float(10 * 1000 * 1000),
msg.alt / float(1000), msg.satellites_visible)
elif msg_type == "RAW_IMU":
if msg.time_usec - self.last_vibration_message_time > 10*1000*1000:
# reset, since data is stale
self.vibration_vector = Vector3(0, 0, 0)
self.accelerometer_clipping_counts = [0, 0, 0]
self.pub_imuraw.publish(
msg.time_usec,
Vector3(msg.xgyro / 100.0, msg.ygyro / 100.0, msg.zgyro / 100.0),
Vector3(msg.xacc / 100.0, msg.yacc / 100.0, msg.zacc / 100.0),
Vector3(msg.xmag / 100.0, msg.ymag / 100.0, msg.zmag / 100.0),
self.vibration_vector,
self.accelerometer_clipping_counts)
elif msg_type == "MISSION_COUNT":
if not self.waypoint_read_in_progress:
rospy.logwarn("Did not expect MISSION_COUNT message")
else:
self.num_waypoints = msg.count
self.waypoint_buffer = []
# Ignore the first one, because it's some magic waypoint
if msg.count > 1:
# Request the first waypoint
self.last_waypoint_message_time = time()
self.conn.mav.mission_request_send(
self.conn.target_system,
self.conn.target_component,
1)
else:
self.waypoint_read_in_progress = False
elif msg_type == "MISSION_REQUEST":
if not self.waypoint_write_in_progress:
rospy.logwarn("Waypoint write not in progress, but received a request for a waypoint")
else:
waypoint = self.waypoint_buffer[msg.seq]
frame = mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT
if msg.seq == 0:
# Waypoint zero seems to be special, and uses the
# GLOBAL frame. It also is magically reset in the
# firmware, so this probably doesn't matter.
frame = mavutil.mavlink.MAV_FRAME_GLOBAL
self.last_waypoint_message_time = time()
self.conn.mav.mission_item_send(
self.conn.target_system,
self.conn.target_component,
msg.seq,
frame,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,
1 if msg.seq == 1 else 0, # Set current
1, # Auto continue after this waypoint
1.0, # "reached waypoint" is +/- 1.0m
5.0, # Stay for 5 secs then move on
1.0, # Stay within 1.0m for LOITER
0, # Face north on arrival
waypoint.latitude, # Latitude
waypoint.longitude, # Longitude
waypoint.altitude) # Altitude
elif msg_type == "MISSION_ACK":
if not self.waypoint_write_in_progress:
rospy.logwarn("Did not expect MISSION_ACK no write in progress")
# NOTE: APM is suppose to return MAV_CMD_ACK_OK, but it seems
# to return 0
elif msg.type not in (0, mavutil.mavlink.MAV_CMD_ACK_OK):
rospy.logerr("Bad MISSION_ACK: %d", msg.type)
self.waypoint_write_in_progress = False
else:
# All waypoints have been sent, read them back
self.waypoint_write_in_progress = False
self.last_waypoint_message_time = time()
self.conn.mav.mission_request_list_send(
self.conn.target_system,
self.conn.target_component)
self.waypoint_read_in_progress = True
elif msg_type == "MISSION_ITEM":
if not self.waypoint_read_in_progress:
rospy.logwarn("Did not expect MISSION_ITEM, no read in progress")
else:
self.waypoint_buffer.append(rospilot.msg.Waypoint(msg.x, msg.y, msg.z))
if self.num_waypoints == msg.seq + 1:
self.conn.mav.mission_ack_send(
self.conn.target_system,
self.conn.target_component,
mavutil.mavlink.MAV_CMD_ACK_OK)
self.pub_waypoints.publish(self.waypoint_buffer)
self.waypoint_read_in_progress = False
else:
self.last_waypoint_message_time = time()
self.conn.mav.mission_request_send(
self.conn.target_system,
self.conn.target_component,
msg.seq + 1)
elif msg_type == "SYS_STATUS":
self.pub_battery.publish(msg.voltage_battery / 1000.0)
elif msg_type == "VIBRATION":
self.last_vibration_message_time = msg.time_usec
self.vibration_vector = Vector3(
msg.vibration_x,
msg.vibration_y,
msg.vibration_z)
self.accelerometer_clipping_counts = [
msg.clipping_0,
msg.clipping_1,
msg.clipping_2]
if __name__ == '__main__':
parser = OptionParser("rospilot.py <options>")
parser.add_option(
"--baudrate", dest="baudrate",
type='int', help="serial port baud rate", default=115200)
parser.add_option(
"--allow-control", dest="allow_control",
help="allow sending control signals to autopilot", default="false")
parser.add_option(
"--device", dest="device",
default="auto", help="serial device")
parser.add_option(
"--udp-export", dest="export_host",
default=None, help="UDP host/port to send copy of MAVLink data to")
(opts, args) = parser.parse_args()
rospy.init_node('rospilot_mavlink')
node = None
while not rospy.is_shutdown() and node is None:
try:
node = MavlinkNode(
device=opts.device, baudrate=opts.baudrate,
export_host=opts.export_host, allow_control=opts.allow_control)
except SerialException as e:
rospy.logerr("Failed to initialize mavlink node: " + str(e))
rospy.sleep(5)
if node:
node.run()
|
|
#####################################################################
##### IMPORT STANDARD MODULES
#####################################################################
#Python 3 support:
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import pandas as pd
import numpy as np
from scipy.stats import mode
import warnings
import collections
from .base_class import BaseClass
from .data import DataBlock
#####################################################################
##### DEFINE PREPROCESS CLASS
#####################################################################
class PreProcess(BaseClass):
""" A preprocessing modules which helps you preprocess you data
using the easy-to-use built-in modules. The preprocessing is
done on all 3 of the dataframes in datablock simultaneously.
Parameters
----------
data_block : object of type DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
Attributes
----------
datablock : object of type DataBlock
The pre-processed data containing all the modifications made
using the module's methods
"""
def __init__(self, data_block):
#check if data element is of class datablock
self.check_datatype(data_block,'data_block',DataBlock)
self.datablock = data_block
#though redundant but will make code mode readable
self.target = self.datablock.target
#get tuple of available data
self.dp = self.datablock.data_present().values()
def check_missing(
self, subset=None, printResult=True, returnResult=False):
""" Checks the missing values in the all dataframes.
The target column in the predict variable will not be
checked here because it is assumed to have all missing values.
Parameters
__________
subset : list of str or None, default=None
A list specifying the subset of columns in which the missing
values are to be checked.
If None, all columns checked
printResult : bool, default=True
if True, the result will be printed
returnResult : bool, default=False
if True, the function will return a dictionary with keys as the
name of the dataset (train/test/predict) and values as a series
object with index as column name and values as the number of
missing values in that column. It will contain all columns even
if they have 0 missing values.
Returns
_______
missing_values : tuple with 3 elements
Returns a tuple of 3 pandas series objects pertaining to the
train, test and predict dataframes. Each series has index as the
name of the columns having missing values and values as the
number of missing values in the column.
Returns (None,None,None) if no missing values found.
Nothing is returned is returnSeries argument is False
"""
#Check if subset is actually a subset of the parameters or not:
if subset:
self.subset_check(subset)
else:
subset = self.datablock.columns
check_dict = {}
for key,data in self.datablock.data_present().items():
miss_val = data[subset].apply(lambda x: sum(x.isnull()))
if key=='predict':
#Remove target index if present:
if self.target in miss_val:
miss_val.drop(self.target,inplace=True)
check_dict[key] = miss_val
if printResult:
for df,miss in check_dict.items():
if sum(miss)==0:
print('\nNo missing value found in %s dataframe'%df)
else:
print('''\nTotal %d missing values found in %s dataframe
in following columns:'''%(sum(miss),df))
print(pd.DataFrame(
miss[miss>0],
columns=['Num Missing Values'])
)
if returnResult:
return check_dict
def imputation(
self, column, metric=None, groupby=None, constant=None,
inplace=True, suffix='_imputed'):
""" Used to performs imputation on a column. The imputation is
performed on all dataframes together. For instance, if a median
imputation is performed, then median of the train dataframe is
determined and the same value used to impute the test and predict
dataframes as well.
Parameters
__________
column : str
The name of the column to be imputed.
metric : str or None, default=None
The metric to be used for imputation. Possible options:
- mean: impute with column mean (only for numeric columns)
- median: impute with column median; default for numeric data
(only for numeric columns)
- mode: impute with column model default for non-numeric data
(only for non-numeric columns)
groupby : str or list of str or None, default=None
The list of columns by which the metric is to be grouped for
imputation. Note that these columns should not have any misssing
values.
constant : no constraint, default=None
To be used if a constant user-defined value is to be used for
imputation. This is ignored if metric is not None.
inplace : bool, default=True
If True, then the original column in the data will be imputed.
If False, then a new column will be created with a suffix as
specified by suffix parameter. This can be used to test
different imputation metrics.
suffix : str, default='_imputed'
If inplace argument is False, then this is the suffix applied to
the column name to creat a new column.
Note that if such a column already exists, it will be overwritten
"""
#Perform checks:
self.check_datatype(column,'column',basestring)
if metric:
self.check_value_in_range(
metric,
['mean','mediam','mode'],
'The metric can only be "mean","median" or "mode", found %s'%metric
)
self.check_datatype(inplace,'inplace',bool)
self.check_datatype(suffix,'suffix',basestring)
self.subset_check(column)
if groupby:
for col in groupby:
self.subset_check(col)
if sum([sum(x[col].isnull()) for x in self.dp]):
raise ValueError('The groupby column %s contains missing values. Please impute that first.'%col)
if inplace:
new_var = column
else:
new_var = "".join([column,suffix])
for data in self.dp:
data[new_var] = data[column]
self.datablock.update_column_list()
# If constant value passed, then exit the function with that
# imputation
if constant:
warnings.warn('Missing values being imputed with the constant value passed in metrics argument')
for data in self.dp:
data[new_var].fillna(metric,inplace=True)
return
#Define a function to impute by groups if such selected
def fill_grps(impute_grps):
for data in self.dp:
for i, row in data.loc[
data[column].isnull(),
[column]+groupby
].iterrows():
x = tuple(row.loc[groupby])
data.loc[i,new_var] = impute_grps.loc[x]
#Case1: continuous column
if column in self.datablock.numeric_columns:
if metric is not None:
if metric not in ['mean','median']:
raise ValueError('metric can only be mean or median for numeric column')
else:
metric = 'median'
#check constant input:
#Impute by mean:
if metric == "mean":
if groupby is not None:
#impute groups to be determined on basis of train data
#only not any other data
impute_grps = self.datablock.train.pivot_table(
values=column, index=groupby, aggfunc=np.mean,
fill_value=self.datablock.train[column].median()
)
fill_grps(impute_grps)
else:
impute_val =self.datablock.train[column].mean()
for data in self.dp:
data[new_var].fillna(impute_val,inplace=True)
#Impute by median:
elif metric == "median":
if groupby is not None:
#impute groups to be determined on basis of train data
#only not any other data
impute_grps = self.datablock.train.pivot_table(
values=column, index=groupby, aggfunc=np.median,
fill_value=self.datablock.train[column].median()
)
fill_grps(impute_grps)
else:
impute_val =self.datablock.train[column].median()
for data in self.dp:
data[new_var].fillna(impute_val,inplace=True)
#Case2: Categorical variable:
if column in self.datablock.other_columns:
if metric is not None:
if metric not in ['mode']:
raise ValueError('metric can only be mode for non-numeric column')
else:
metric = 'mode'
#Define the custom functino to determine the mode using scipy's
#mode function
def cust_mode(x):
return mode(x).mode[0]
#Impute by mode:
if metric == "mode":
if groupby is not None:
#impute groups to be determined on basis of train data
#only not any other data
impute_grps = self.datablock.train.pivot_table(
values=column, index=groupby, aggfunc=cust_mode,
fill_value=cust_mode(self.datablock.train[column])
)
fill_grps(impute_grps)
else:
impute_val = cust_mode(self.datablock.train[column])
for data in self.dp:
data[new_var].fillna(impute_val,inplace=True)
def scale(self,subset,scale_range=(0,1),inplace=True,suffix='_scaled'):
""" Used to scale the data within a fixed range of values.
Parameters
__________
subset : str or list of str
This represents the columns to be scaled. 2 options:
- str: a single column to be scaled
- list of str: list of multiple columns to be scaled
scale_range : tuple or dictionary, default=(0,1)
This represents the range to which the data is to be scaled.
2 options:
- tuple (min,mex): fixed range for all columns mentioned in subset
- dictionary : a dictionary with keys as columns mentioned in
subset list and values as the range to which that column is to
be scaled. Note that this works only if subset is entered as a
list of strings
inplace : bool, default=True
If True, the dataframes will be modified and columns scaled.
If False, new columns will be created with suffix as specified
in suffix parameter
suffix : str, default='_scaled'
If inplace argument is False, then this is the suffix applied to
the column name to creat a new column.
Note that if such a column already exists, it will be overwritten
"""
#check:
self.check_datatype2(subset,'subset',(basestring,list))
self.check_datatype2(scale_range,'scale_range',(tuple,dict))
self.check_datatype(inplace,'inplace',bool)
self.check_datatype(suffix,'suffix',basestring) #basestring works for both python2 and python3
self.subset_check(subset)
if isinstance(subset,str):
subset = [subset]
#Iterate over all columns and scale them:
for column in subset:
if isinstance(scale_range,tuple):
r = scale_range
else:
if column not in scale_range:
raise KeyError("%s not found in the dictionary range"%column)
r = scale_range[column]
#check each tuple of size 2:
if len(r)!=2:
raise InvalidInput("range should contain tuples of fixed size 2. tuple of size %d found"%len(r))
#check second element always greater than the first
if r[0]>=r[1]:
raise InvalidInput("each range tuple should be of form (min,max) where min<max")
#
if inplace:
new_var = column
else:
new_var = "".join([column,suffix])
for data in self.dp:
data[new_var] = data[column]
#Get min and max values:
min_val = min([data[column].min() for data in self.dp])
max_val = max([data[column].max() for data in self.dp])
for data in self.dp:
data[new_var] = data[column] - min_val
data[new_var] = data[new_var] / (max_val - min_val)
data[new_var] = data[new_var]*(r[1]-r[0]) + r[0]
#Update the list of columns if inplace False
if not inplace:
self.datablock.update_column_list()
def normalize(self,subset,norm='l2',inplace=True,suffix='_norm'):
""" Used to normalize the data using an l1 or l2 normalization.
Parameters
__________
subset : list of str
This represents the columns to be normalized. Input should be a
list of columns.
norm : str ('l1' or 'l2'), default='l2'
This specifies the type or normalization- l1 or l2
inplace : bool, default=True
If True, the dataframes will be modified and columns normalized.
If False, new columns will be created with suffix as specified
in suffix parameter
suffix : str, default='_norm'
If inplace argument is False, then this is the suffix applied to
the column name to creat a new column.
Note that if such a column already exists, it will be overwritten
"""
#check:
self.check_datatype(subset,'subset',list)
self.check_datatype(norm,'norm',basestring) #basestring works for both python2 and python3
self.check_datatype(inplace,'inplace',bool)
self.check_datatype(suffix,'suffix',basestring) #basestring works for both python2 and python3
if norm not in ['l1','l2']:
raise self.InvalidInput("norm can only take values 'l1' or 'l2', found %s"%norm)
self.subset_check(subset)
#Iterate over all columns and scale them:
for column in subset:
#Check if column contains a missing value
if sum(sum(data[column].isnull()) for data in self.dp)>0:
raise self.InvalidInput("The %s column contains missing values, please impute first!"%column)
if inplace:
new_var = column
else:
new_var = "".join([column,suffix])
for data in self.dp:
data[new_var] = data[column]
#Get min and max values:
if norm=='l1':
divisor = sum([sum([abs(x) for x in data[column]]) for data in self.dp])
else:
divisor = np.sqrt(sum([sum([x**2 for x in data[column]]) for data in self.dp]))
for data in self.dp:
data[new_var] = data[new_var] / divisor
#Update the list of columns if inplace False
if not inplace:
self.datablock.update_column_list()
def apply(
self, column, func, rowwise=True, inplace=True,
suffix='_modified', combine_data=False):
""" Used to apply any function on the data to allow flexibility in
preprocessing part by incorporating functions not directly included
in this package. Note that your function should be able to handle
missing values in data if they exist.
Parameters
__________
column : str
This represents the column on which the function is to be applied
func : pre-defined function
A function which can be of 2 types:
1. Takes a single value as input and returns a single value.
This function will be applied to each observation independently
and applicable if rowwise=True
2. Takes a list of numbers as input and returns a list of same
size. This will be applied to entire column at same time and
applicable if rowwise=False
rowwise : bool, default=True
if True, a function of type1 should be passed else function of
type2
inplace : bool, default=True
If True, the dataframes will be modified.
If False, new columns will be created with suffix as specified
in suffix parameter
suffix : str, default='_modified'
If inplace argument is False, then this is the suffix applied to
the column name to creat a new column.
Note that if such a column already exists, it will be overwritten
combine_data : bool, default=False
Works only in case of rowwise=False. If yes, the type 2 function
will be applied on the combined dataset together, ie. a vector
with observations of all the 3 datasets concatenated will be
passed to it and it should return a vector with numbers in the
same order so that they can be mapped back.
"""
#check:
self.check_datatype(column,'column',basestring)
if not callable(func):
raise self.InvalidInput("The func parameter should be a callable function")
self.check_datatype(rowwise,'rowwise',bool)
self.check_datatype(inplace,'inplace',bool)
self.check_datatype(suffix,'suffix',basestring)
self.subset_check(column)
if inplace:
new_var = column
else:
new_var = "".join([column,suffix])
for data in self.dp:
data[new_var] = data[column]
#function to apply func as per rowwise:
def applyfunc(df,col,rowwise):
if rowwise:
return df[col].apply(func)
else:
return func(list(df[col].values))
if combine_data:
#get tuple of available data
result = applyfunc(self.datablock.combined_data(),column,rowwise)
print(result)
ind=0
for data in self.dp:
data[new_var] = result[ind:ind+data.shape[0]]
ind=data.shape[0]
else:
for data in self.dp:
data[new_var] = applyfunc(data,column,rowwise)
#Update the list of columns if inplace False
if not inplace:
self.datablock.update_column_list()
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Nova."""
import copy
import re
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
from nova import exception
from nova.i18n import _LE, _LW
from nova import policies
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ENFORCER = None
# This list is about the resources which support user based policy enforcement.
# Avoid sending deprecation warning for those resources.
USER_BASED_RESOURCES = ['os-keypairs']
# oslo_policy will read the policy configuration file again when the file
# is changed in runtime so the old policy rules will be saved to
# saved_file_rules and used to compare with new rules to determine the
# rules whether were updated.
saved_file_rules = []
KEY_EXPR = re.compile(r'%\((\w+)\)s')
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def init(policy_file=None, rules=None, default_rule=None, use_conf=True):
"""Init an Enforcer class.
:param policy_file: Custom policy file to use, if none is specified,
`CONF.policy_file` will be used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from config file.
"""
global _ENFORCER
global saved_file_rules
if not _ENFORCER:
_ENFORCER = policy.Enforcer(CONF,
policy_file=policy_file,
rules=rules,
default_rule=default_rule,
use_conf=use_conf)
register_rules(_ENFORCER)
_ENFORCER.load_rules()
# Only the rules which are loaded from file may be changed.
current_file_rules = _ENFORCER.file_rules
current_file_rules = _serialize_rules(current_file_rules)
# Checks whether the rules are updated in the runtime
if saved_file_rules != current_file_rules:
_warning_for_deprecated_user_based_rules(current_file_rules)
saved_file_rules = copy.deepcopy(current_file_rules)
def _serialize_rules(rules):
"""Serialize all the Rule object as string which is used to compare the
rules list.
"""
result = [(rule_name, str(rule))
for rule_name, rule in rules.items()]
return sorted(result, key=lambda rule: rule[0])
def _warning_for_deprecated_user_based_rules(rules):
"""Warning user based policy enforcement used in the rule but the rule
doesn't support it.
"""
for rule in rules:
# We will skip the warning for the resources which support user based
# policy enforcement.
if [resource for resource in USER_BASED_RESOURCES
if resource in rule[0]]:
continue
if 'user_id' in KEY_EXPR.findall(rule[1]):
LOG.warning(_LW("The user_id attribute isn't supported in the "
"rule '%s'. All the user_id based policy "
"enforcement will be removed in the "
"future."), rule[0])
def set_rules(rules, overwrite=True, use_conf=False):
"""Set rules based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from config file.
"""
init(use_conf=False)
_ENFORCER.set_rules(rules, overwrite, use_conf)
def authorize(context, action, target, do_raise=True, exc=None):
"""Verifies that the action is valid on the target in this context.
:param context: nova context
:param action: string representing the action to be checked
this should be colon separated for clarity.
i.e. ``compute:create_instance``,
``compute:attach_volume``,
``volume:attach_volume``
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param do_raise: if True (the default), raises PolicyNotAuthorized;
if False, returns False
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to :meth:`authorize` (both
positional and keyword arguments) will be passed to
the exception class. If not specified,
:class:`PolicyNotAuthorized` will be used.
:raises nova.exception.PolicyNotAuthorized: if verification fails
and do_raise is True. Or if 'exc' is specified it will raise an
exception of that type.
:return: returns a non-False value (not necessarily "True") if
authorized, and the exact value False if not authorized and
do_raise is False.
"""
init()
credentials = context.to_policy_values()
if not exc:
exc = exception.PolicyNotAuthorized
try:
result = _ENFORCER.authorize(action, target, credentials,
do_raise=do_raise, exc=exc, action=action)
except policy.PolicyNotRegistered:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Policy not registered'))
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug('Policy check for %(action)s failed with credentials '
'%(credentials)s',
{'action': action, 'credentials': credentials})
return result
def check_is_admin(context):
"""Whether or not roles contains 'admin' role according to policy setting.
"""
init()
# the target is user-self
credentials = context.to_policy_values()
target = credentials
return _ENFORCER.authorize('context_is_admin', target, credentials)
@policy.register('is_admin')
class IsAdminCheck(policy.Check):
"""An explicit check for is_admin."""
def __init__(self, kind, match):
"""Initialize the check."""
self.expected = (match.lower() == 'true')
super(IsAdminCheck, self).__init__(kind, str(self.expected))
def __call__(self, target, creds, enforcer):
"""Determine whether is_admin matches the requested value."""
return creds['is_admin'] == self.expected
def get_rules():
if _ENFORCER:
return _ENFORCER.rules
def register_rules(enforcer):
enforcer.register_defaults(policies.list_rules())
def get_enforcer():
# This method is for use by oslopolicy CLI scripts. Those scripts need the
# 'output-file' and 'namespace' options, but having those in sys.argv means
# loading the Nova config options will fail as those are not expected to
# be present. So we pass in an arg list with those stripped out.
conf_args = []
# Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:]
i = 1
while i < len(sys.argv):
if sys.argv[i].strip('-') in ['namespace', 'output-file']:
i += 2
continue
conf_args.append(sys.argv[i])
i += 1
cfg.CONF(conf_args, project='nova')
init()
return _ENFORCER
def verify_deprecated_policy(old_policy, new_policy, default_rule, context):
"""Check the rule of the deprecated policy action
If the current rule of the deprecated policy action is set to a non-default
value, then a warning message is logged stating that the new policy
action should be used to dictate permissions as the old policy action is
being deprecated.
:param old_policy: policy action that is being deprecated
:param new_policy: policy action that is replacing old_policy
:param default_rule: the old_policy action default rule value
:param context: the nova context
"""
if _ENFORCER:
current_rule = str(_ENFORCER.rules[old_policy])
else:
current_rule = None
if current_rule != default_rule:
LOG.warning("Start using the new action '{0}'. The existing "
"action '{1}' is being deprecated and will be "
"removed in future release.".format(new_policy,
old_policy))
context.can(old_policy)
return True
else:
return False
|
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import requests
from six.moves import http_client
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_response(status=http_client.OK, content=b"", headers={}):
response = requests.Response()
response.status_code = status
response._content = content
response.headers = headers
response.request = requests.Request()
return response
def _make_requests_session(responses):
session = mock.create_autospec(requests.Session, instance=True)
session.request.side_effect = responses
return session
class TestMIMEApplicationHTTP(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.batch import MIMEApplicationHTTP
return MIMEApplicationHTTP
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_body_None(self):
METHOD = "DELETE"
PATH = "/path/to/api"
LINES = ["DELETE /path/to/api HTTP/1.1", ""]
mah = self._make_one(METHOD, PATH, {}, None)
self.assertEqual(mah.get_content_type(), "application/http")
self.assertEqual(mah.get_payload().splitlines(), LINES)
def test_ctor_body_str(self):
METHOD = "GET"
PATH = "/path/to/api"
BODY = "ABC"
HEADERS = {"Content-Length": len(BODY), "Content-Type": "text/plain"}
LINES = [
"GET /path/to/api HTTP/1.1",
"Content-Length: 3",
"Content-Type: text/plain",
"",
"ABC",
]
mah = self._make_one(METHOD, PATH, HEADERS, BODY)
self.assertEqual(mah.get_payload().splitlines(), LINES)
def test_ctor_body_dict(self):
METHOD = "GET"
PATH = "/path/to/api"
BODY = {"foo": "bar"}
HEADERS = {}
LINES = [
"GET /path/to/api HTTP/1.1",
"Content-Length: 14",
"Content-Type: application/json",
"",
'{"foo": "bar"}',
]
mah = self._make_one(METHOD, PATH, HEADERS, BODY)
self.assertEqual(mah.get_payload().splitlines(), LINES)
class TestBatch(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.batch import Batch
return Batch
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
http = _make_requests_session([])
connection = _Connection(http=http)
client = _Client(connection)
batch = self._make_one(client)
self.assertIs(batch._client, client)
self.assertEqual(len(batch._requests), 0)
self.assertEqual(len(batch._target_objects), 0)
def test_current(self):
from google.cloud.storage.client import Client
project = "PROJECT"
credentials = _make_credentials()
client = Client(project=project, credentials=credentials)
batch1 = self._make_one(client)
self.assertIsNone(batch1.current())
client._push_batch(batch1)
self.assertIs(batch1.current(), batch1)
batch2 = self._make_one(client)
client._push_batch(batch2)
self.assertIs(batch1.current(), batch2)
def test__make_request_GET_normal(self):
from google.cloud.storage.batch import _FutureDict
url = "http://example.com/api"
http = _make_requests_session([])
connection = _Connection(http=http)
batch = self._make_one(connection)
target = _MockObject()
response = batch._make_request("GET", url, target_object=target)
# Check the respone
self.assertEqual(response.status_code, 204)
self.assertIsInstance(response.json(), _FutureDict)
self.assertIsInstance(response.content, _FutureDict)
self.assertIs(target._properties, response.content)
# The real http request should not have been called yet.
http.request.assert_not_called()
# Check the queued request
self.assertEqual(len(batch._requests), 1)
request = batch._requests[0]
request_method, request_url, _, request_data, _ = request
self.assertEqual(request_method, "GET")
self.assertEqual(request_url, url)
self.assertIsNone(request_data)
def test__make_request_POST_normal(self):
from google.cloud.storage.batch import _FutureDict
url = "http://example.com/api"
http = _make_requests_session([])
connection = _Connection(http=http)
batch = self._make_one(connection)
data = {"foo": 1}
target = _MockObject()
response = batch._make_request(
"POST", url, data={"foo": 1}, target_object=target
)
self.assertEqual(response.status_code, 204)
self.assertIsInstance(response.content, _FutureDict)
self.assertIs(target._properties, response.content)
# The real http request should not have been called yet.
http.request.assert_not_called()
request = batch._requests[0]
request_method, request_url, _, request_data, _ = request
self.assertEqual(request_method, "POST")
self.assertEqual(request_url, url)
self.assertEqual(request_data, data)
def test__make_request_PATCH_normal(self):
from google.cloud.storage.batch import _FutureDict
url = "http://example.com/api"
http = _make_requests_session([])
connection = _Connection(http=http)
batch = self._make_one(connection)
data = {"foo": 1}
target = _MockObject()
response = batch._make_request(
"PATCH", url, data={"foo": 1}, target_object=target
)
self.assertEqual(response.status_code, 204)
self.assertIsInstance(response.content, _FutureDict)
self.assertIs(target._properties, response.content)
# The real http request should not have been called yet.
http.request.assert_not_called()
request = batch._requests[0]
request_method, request_url, _, request_data, _ = request
self.assertEqual(request_method, "PATCH")
self.assertEqual(request_url, url)
self.assertEqual(request_data, data)
def test__make_request_DELETE_normal(self):
from google.cloud.storage.batch import _FutureDict
url = "http://example.com/api"
http = _make_requests_session([])
connection = _Connection(http=http)
batch = self._make_one(connection)
target = _MockObject()
response = batch._make_request("DELETE", url, target_object=target)
# Check the respone
self.assertEqual(response.status_code, 204)
self.assertIsInstance(response.content, _FutureDict)
self.assertIs(target._properties, response.content)
# The real http request should not have been called yet.
http.request.assert_not_called()
# Check the queued request
self.assertEqual(len(batch._requests), 1)
request = batch._requests[0]
request_method, request_url, _, request_data, _ = request
self.assertEqual(request_method, "DELETE")
self.assertEqual(request_url, url)
self.assertIsNone(request_data)
def test__make_request_POST_too_many_requests(self):
url = "http://example.com/api"
http = _make_requests_session([])
connection = _Connection(http=http)
batch = self._make_one(connection)
batch._MAX_BATCH_SIZE = 1
batch._requests.append(("POST", url, {}, {"bar": 2}))
with self.assertRaises(ValueError):
batch._make_request("POST", url, data={"foo": 1})
def test_finish_empty(self):
http = _make_requests_session([])
connection = _Connection(http=http)
batch = self._make_one(connection)
with self.assertRaises(ValueError):
batch.finish()
def _get_payload_chunks(self, boundary, payload):
divider = "--" + boundary[len('boundary="') : -1]
chunks = payload.split(divider)[1:-1] # discard prolog / epilog
return chunks
def _check_subrequest_no_payload(self, chunk, method, url):
lines = chunk.splitlines()
# blank + 2 headers + blank + request + blank + blank
self.assertEqual(len(lines), 7)
self.assertEqual(lines[0], "")
self.assertEqual(lines[1], "Content-Type: application/http")
self.assertEqual(lines[2], "MIME-Version: 1.0")
self.assertEqual(lines[3], "")
self.assertEqual(lines[4], "%s %s HTTP/1.1" % (method, url))
self.assertEqual(lines[5], "")
self.assertEqual(lines[6], "")
def _check_subrequest_payload(self, chunk, method, url, payload):
import json
lines = chunk.splitlines()
# blank + 2 headers + blank + request + 2 headers + blank + body
payload_str = json.dumps(payload)
self.assertEqual(lines[0], "")
self.assertEqual(lines[1], "Content-Type: application/http")
self.assertEqual(lines[2], "MIME-Version: 1.0")
self.assertEqual(lines[3], "")
self.assertEqual(lines[4], "%s %s HTTP/1.1" % (method, url))
if method == "GET":
self.assertEqual(len(lines), 7)
self.assertEqual(lines[5], "")
self.assertEqual(lines[6], "")
else:
self.assertEqual(len(lines), 9)
self.assertEqual(lines[5], "Content-Length: %d" % len(payload_str))
self.assertEqual(lines[6], "Content-Type: application/json")
self.assertEqual(lines[7], "")
self.assertEqual(json.loads(lines[8]), payload)
def _get_mutlipart_request(self, http):
request_call = http.request.mock_calls[0][2]
request_headers = request_call["headers"]
request_body = request_call["data"]
content_type, boundary = [
value.strip() for value in request_headers["Content-Type"].split(";")
]
return request_headers, request_body, content_type, boundary
def test_finish_nonempty(self):
url = "http://api.example.com/other_api"
expected_response = _make_response(
content=_THREE_PART_MIME_RESPONSE,
headers={"content-type": 'multipart/mixed; boundary="DEADBEEF="'},
)
http = _make_requests_session([expected_response])
connection = _Connection(http=http)
client = _Client(connection)
batch = self._make_one(client)
batch.API_BASE_URL = "http://api.example.com"
batch._do_request("POST", url, {}, {"foo": 1, "bar": 2}, None)
batch._do_request("PATCH", url, {}, {"bar": 3}, None)
batch._do_request("DELETE", url, {}, None, None)
result = batch.finish()
self.assertEqual(len(result), len(batch._requests))
response1, response2, response3 = result
self.assertEqual(
response1.headers,
{"Content-Length": "20", "Content-Type": "application/json; charset=UTF-8"},
)
self.assertEqual(response1.json(), {"foo": 1, "bar": 2})
self.assertEqual(
response2.headers,
{"Content-Length": "20", "Content-Type": "application/json; charset=UTF-8"},
)
self.assertEqual(response2.json(), {"foo": 1, "bar": 3})
self.assertEqual(response3.headers, {"Content-Length": "0"})
self.assertEqual(response3.status_code, http_client.NO_CONTENT)
expected_url = "{}/batch/storage/v1".format(batch.API_BASE_URL)
http.request.assert_called_once_with(
method="POST",
url=expected_url,
headers=mock.ANY,
data=mock.ANY,
timeout=mock.ANY,
)
request_info = self._get_mutlipart_request(http)
request_headers, request_body, content_type, boundary = request_info
self.assertEqual(content_type, "multipart/mixed")
self.assertTrue(boundary.startswith('boundary="=='))
self.assertTrue(boundary.endswith('=="'))
self.assertEqual(request_headers["MIME-Version"], "1.0")
chunks = self._get_payload_chunks(boundary, request_body)
self.assertEqual(len(chunks), 3)
self._check_subrequest_payload(chunks[0], "POST", url, {"foo": 1, "bar": 2})
self._check_subrequest_payload(chunks[1], "PATCH", url, {"bar": 3})
self._check_subrequest_no_payload(chunks[2], "DELETE", url)
def test_finish_responses_mismatch(self):
url = "http://api.example.com/other_api"
expected_response = _make_response(
content=_TWO_PART_MIME_RESPONSE_WITH_FAIL,
headers={"content-type": 'multipart/mixed; boundary="DEADBEEF="'},
)
http = _make_requests_session([expected_response])
connection = _Connection(http=http)
client = _Client(connection)
batch = self._make_one(client)
batch.API_BASE_URL = "http://api.example.com"
batch._requests.append(("GET", url, {}, None))
with self.assertRaises(ValueError):
batch.finish()
def test_finish_nonempty_with_status_failure(self):
from google.cloud.exceptions import NotFound
url = "http://api.example.com/other_api"
expected_response = _make_response(
content=_TWO_PART_MIME_RESPONSE_WITH_FAIL,
headers={"content-type": 'multipart/mixed; boundary="DEADBEEF="'},
)
http = _make_requests_session([expected_response])
connection = _Connection(http=http)
client = _Client(connection)
batch = self._make_one(client)
batch.API_BASE_URL = "http://api.example.com"
target1 = _MockObject()
target2 = _MockObject()
batch._do_request("GET", url, {}, None, target1)
batch._do_request("GET", url, {}, None, target2)
# Make sure futures are not populated.
self.assertEqual(
[future for future in batch._target_objects], [target1, target2]
)
target2_future_before = target2._properties
with self.assertRaises(NotFound):
batch.finish()
self.assertEqual(target1._properties, {"foo": 1, "bar": 2})
self.assertIs(target2._properties, target2_future_before)
expected_url = "{}/batch/storage/v1".format(batch.API_BASE_URL)
http.request.assert_called_once_with(
method="POST",
url=expected_url,
headers=mock.ANY,
data=mock.ANY,
timeout=mock.ANY,
)
_, request_body, _, boundary = self._get_mutlipart_request(http)
chunks = self._get_payload_chunks(boundary, request_body)
self.assertEqual(len(chunks), 2)
self._check_subrequest_payload(chunks[0], "GET", url, {})
self._check_subrequest_payload(chunks[1], "GET", url, {})
def test_finish_nonempty_non_multipart_response(self):
url = "http://api.example.com/other_api"
http = _make_requests_session([_make_response()])
connection = _Connection(http=http)
client = _Client(connection)
batch = self._make_one(client)
batch._requests.append(("POST", url, {}, {"foo": 1, "bar": 2}))
with self.assertRaises(ValueError):
batch.finish()
def test_as_context_mgr_wo_error(self):
from google.cloud.storage.client import Client
url = "http://example.com/api"
expected_response = _make_response(
content=_THREE_PART_MIME_RESPONSE,
headers={"content-type": 'multipart/mixed; boundary="DEADBEEF="'},
)
http = _make_requests_session([expected_response])
project = "PROJECT"
credentials = _make_credentials()
client = Client(project=project, credentials=credentials)
client._http_internal = http
self.assertEqual(list(client._batch_stack), [])
target1 = _MockObject()
target2 = _MockObject()
target3 = _MockObject()
with self._make_one(client) as batch:
self.assertEqual(list(client._batch_stack), [batch])
batch._make_request(
"POST", url, {"foo": 1, "bar": 2}, target_object=target1
)
batch._make_request("PATCH", url, {"bar": 3}, target_object=target2)
batch._make_request("DELETE", url, target_object=target3)
self.assertEqual(list(client._batch_stack), [])
self.assertEqual(len(batch._requests), 3)
self.assertEqual(batch._requests[0][0], "POST")
self.assertEqual(batch._requests[1][0], "PATCH")
self.assertEqual(batch._requests[2][0], "DELETE")
self.assertEqual(batch._target_objects, [target1, target2, target3])
self.assertEqual(target1._properties, {"foo": 1, "bar": 2})
self.assertEqual(target2._properties, {"foo": 1, "bar": 3})
self.assertEqual(target3._properties, b"")
def test_as_context_mgr_w_error(self):
from google.cloud.storage.batch import _FutureDict
from google.cloud.storage.client import Client
URL = "http://example.com/api"
http = _make_requests_session([])
connection = _Connection(http=http)
project = "PROJECT"
credentials = _make_credentials()
client = Client(project=project, credentials=credentials)
client._base_connection = connection
self.assertEqual(list(client._batch_stack), [])
target1 = _MockObject()
target2 = _MockObject()
target3 = _MockObject()
try:
with self._make_one(client) as batch:
self.assertEqual(list(client._batch_stack), [batch])
batch._make_request(
"POST", URL, {"foo": 1, "bar": 2}, target_object=target1
)
batch._make_request("PATCH", URL, {"bar": 3}, target_object=target2)
batch._make_request("DELETE", URL, target_object=target3)
raise ValueError()
except ValueError:
pass
http.request.assert_not_called()
self.assertEqual(list(client._batch_stack), [])
self.assertEqual(len(batch._requests), 3)
self.assertEqual(batch._target_objects, [target1, target2, target3])
# Since the context manager fails, finish will not get called and
# the _properties will still be futures.
self.assertIsInstance(target1._properties, _FutureDict)
self.assertIsInstance(target2._properties, _FutureDict)
self.assertIsInstance(target3._properties, _FutureDict)
class Test__unpack_batch_response(unittest.TestCase):
def _call_fut(self, headers, content):
from google.cloud.storage.batch import _unpack_batch_response
response = _make_response(content=content, headers=headers)
return _unpack_batch_response(response)
def _unpack_helper(self, response, content):
result = list(self._call_fut(response, content))
self.assertEqual(len(result), 3)
self.assertEqual(result[0].status_code, http_client.OK)
self.assertEqual(result[0].json(), {u"bar": 2, u"foo": 1})
self.assertEqual(result[1].status_code, http_client.OK)
self.assertEqual(result[1].json(), {u"foo": 1, u"bar": 3})
self.assertEqual(result[2].status_code, http_client.NO_CONTENT)
def test_bytes_headers(self):
RESPONSE = {"content-type": b'multipart/mixed; boundary="DEADBEEF="'}
CONTENT = _THREE_PART_MIME_RESPONSE
self._unpack_helper(RESPONSE, CONTENT)
def test_unicode_headers(self):
RESPONSE = {"content-type": u'multipart/mixed; boundary="DEADBEEF="'}
CONTENT = _THREE_PART_MIME_RESPONSE
self._unpack_helper(RESPONSE, CONTENT)
_TWO_PART_MIME_RESPONSE_WITH_FAIL = b"""\
--DEADBEEF=
Content-Type: application/json
Content-ID: <response-8a09ca85-8d1d-4f45-9eb0-da8e8b07ec83+1>
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
Content-Length: 20
{"foo": 1, "bar": 2}
--DEADBEEF=
Content-Type: application/json
Content-ID: <response-8a09ca85-8d1d-4f45-9eb0-da8e8b07ec83+2>
HTTP/1.1 404 Not Found
Content-Type: application/json; charset=UTF-8
Content-Length: 35
{"error": {"message": "Not Found"}}
--DEADBEEF=--
"""
_THREE_PART_MIME_RESPONSE = b"""\
--DEADBEEF=
Content-Type: application/json
Content-ID: <response-8a09ca85-8d1d-4f45-9eb0-da8e8b07ec83+1>
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
Content-Length: 20
{"foo": 1, "bar": 2}
--DEADBEEF=
Content-Type: application/json
Content-ID: <response-8a09ca85-8d1d-4f45-9eb0-da8e8b07ec83+2>
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
Content-Length: 20
{"foo": 1, "bar": 3}
--DEADBEEF=
Content-Type: text/plain
Content-ID: <response-8a09ca85-8d1d-4f45-9eb0-da8e8b07ec83+3>
HTTP/1.1 204 No Content
Content-Length: 0
--DEADBEEF=--
"""
class Test__FutureDict(unittest.TestCase):
def _make_one(self, *args, **kw):
from google.cloud.storage.batch import _FutureDict
return _FutureDict(*args, **kw)
def test_get(self):
future = self._make_one()
self.assertRaises(KeyError, future.get, None)
def test___getitem__(self):
future = self._make_one()
value = orig_value = object()
with self.assertRaises(KeyError):
value = future[None]
self.assertIs(value, orig_value)
def test___setitem__(self):
future = self._make_one()
with self.assertRaises(KeyError):
future[None] = None
class _Connection(object):
project = "TESTING"
def __init__(self, **kw):
self.__dict__.update(kw)
def _make_request(self, method, url, data=None, headers=None, timeout=None):
return self.http.request(
url=url, method=method, headers=headers, data=data, timeout=timeout
)
class _MockObject(object):
pass
class _Client(object):
def __init__(self, connection):
self._base_connection = connection
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 12:52:42 2015
@author: jdh
"""
import os
import logging
from glob import glob
import re
import gdal
from gdal import gdalconst
import osr
from datetime import datetime
import numpy
from EOtools.DatasetDrivers import SceneDataset
from EOtools.execute import execute
import xml.etree.ElementTree as ET
from agdc.cube_util import DatasetError
from agdc.abstract_ingester import AbstractDataset
from agdc.landsat_ingester.landsat_bandstack import LandsatBandstack
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class LS5Dataset(AbstractDataset):
def __init__(self, dataset_path):
"""Opens the dataset and extracts metadata.
Most of the metadata is kept in self._ds which is
a EOtools.DatasetDrivers.SceneDataset object. Some extra metadata is
extracted and kept the instance attributes.
"""
self._dataset_path = dataset_path
LOGGER.info('Opening Dataset %s', self._dataset_path)
#self._ds = SceneDataset(default_metadata_required=False, utm_fix=True)
self._Open(self.get_dataset_path())
#
# Cache extra metadata in instance attributes.
#
self._dataset_size = 0
#get the rest of our metadata for implementing abstracted methods
self._Collect_Metadata(self.get_dataset_path())
if self.get_processing_level() in ['ORTHO', 'L1T', 'MAP']:
LOGGER.debug('Dataset %s is Level 1', self.get_dataset_path())
self._gcp_count = self._get_gcp_count()
self._mtl_text = self._get_mtl_text()
else:
self._gcp_count = None
self._mtl_text = None
self._xml_text = self.get_xml_text()
AbstractDataset.__init__(self)
def _Open(self, dataset_path,eAccessLevel=gdalconst.GA_ReadOnly):
"""Determine if this is pqa or nbar data.
"""
self.band_dict = {}
self._eAccess=eAccessLevel
filelist = [filename for filename in os.listdir(self._dataset_path)
if re.match('\w*_PQA_\w*.tif', filename)]
nodata_value = -9999 #TODO: Make this pull from database
if len(filelist) > 0:
#this is a pqa dataset
print "PQA dataset found."
nodata_value = None
self._isPQA = True
self._eAccess = eAccessLevel
#self.band_dict = {'file': filelist[0], 'band_file_number':1,'type':'Derived','dataset':dataset_path,'rasterIndex':1}
self.band_dict[0] = {'file_pattern':filelist[0],'nodata_value':nodata_value,'tile_layer':1, 'band_name':'Pixel Quality Assurance','resampling_method':'near','level_name':'PQA'}
return
self._isPQA=False
"""Open the directory and get the info we need
this info is bands, scene start time, scene end time, scene center time, bounds
"""
#find files matching *B#.tif
file_number = 0
band_names = ['Visible Blue', 'Visible Green', 'Visible Red', 'Near Infrared', 'Middle Infrared 1','Middle Infrared 2']
for bandNumber in [10,20,30,40,50,70]:
print file_number
filePattern = '\w*[B]'+str(bandNumber)+'.TIF'
tilelayer = 1 #I have no idea
bandinfo = {'file_pattern':filePattern,'nodata_value':nodata_value,'tile_layer':tilelayer, 'band_name':band_names[file_number],'resampling_method':'bilinear','nodata_val':-999}
self.band_dict[file_number]=bandinfo
print "band added"
file_number+=1
"""
Generate the provenance band data info
"""
def _Collect_Metadata(self, dataset_path):
"""
This method collects the various pieces of information needed to ingest the dataset
"""
RootBand = None
#open the first band and get size
# if self._isPQA == False:
RootBand = gdal.Open(self.find_band_file(self.band_dict[0]['file_pattern']),self._eAccess)
#else:
# RootBand = gdal.Open(self.find_band_file(self.band_dict['file']),self._eAccess)
self._x_size = RootBand.RasterXSize
self._y_size = RootBand.RasterYSize
#get the width and height
self._geo_transform = RootBand.GetGeoTransform()
self._projection = RootBand.GetProjection()
self._projectionRef = RootBand.GetProjectionRef()
self._spatial_ref = osr.SpatialReference()
self._spatial_ref.ImportFromWkt(RootBand.GetProjection())
#read in the xml file
filelist = [filename for filename in os.listdir(self._dataset_path)
if re.match('\w*.xml', filename)]
print "XML File: "+os.path.join(self._dataset_path,filelist[0])
xmlRoot = ET.parse(os.path.join(self._dataset_path,filelist[0])).getroot()
print "Root element: "+xmlRoot.tag
for child in xmlRoot:
print child.tag, child.attrib
self._global_metadata = xmlRoot[0]
print "Child node: "+self._global_metadata.tag
#get the relevant info. Note: global metadata must also have scene_Start_time and scene_end_time
satellite_tag_list = {'LANDSAT_5':'LS5'}
sensor_tag_list = {'TM':'TM'}
satellite_name = self._global_metadata.findall('{http://espa.cr.usgs.gov/v1.2}satellite')[0].text
self._satellite_tag = satellite_tag_list[satellite_name]
satellite_sensor = self._global_metadata.find('{http://espa.cr.usgs.gov/v1.2}instrument').text
self._sensor_tag = sensor_tag_list[satellite_sensor]
self._acquisition_date = self._global_metadata.find('{http://espa.cr.usgs.gov/v1.2}acquisition_date').text
self._scene_center_time = datetime.strptime(self._acquisition_date+'T'+self._global_metadata.find('{http://espa.cr.usgs.gov/v1.2}scene_center_time').text,"%Y-%m-%dT%H:%M:%S.%fZ")
#self._scene_start_time = datetime.strptime(self._global_metadata.find('{http://espa.cr.usgs.gov/v1.1}scene_start_time').text,"%Y-%m-%d\\T%H:%M:%S\\Z")
#self._scene_end_time = datetime.strptime(self._global_metadata.find('{http://espa.cr.usgs.gov/v1.1}scene_end_time').text,"%Y-%m-%d\\T%H:%M:%S\\Z")
self._scene_start_time = self._scene_center_time
self._scene_end_time = self._scene_center_time
self._scene_processed_time = datetime.now()
self._bounding_box = self._global_metadata.find('{http://espa.cr.usgs.gov/v1.2}bounding_coordinates')
self._north = float(self._bounding_box.find('{http://espa.cr.usgs.gov/v1.2}north').text)
self._south = float(self._bounding_box.find('{http://espa.cr.usgs.gov/v1.2}south').text)
self._east = float(self._bounding_box.find('{http://espa.cr.usgs.gov/v1.2}east').text)
self._west = float(self._bounding_box.find('{http://espa.cr.usgs.gov/v1.2}west').text)
self._wrs = self._global_metadata.find('{http://espa.cr.usgs.gov/v1.2}wrs').attrib
self._path = int(self._wrs['path'])
self._row = int(self._wrs['row'])
if self._satellite_tag == 'LS5' and self._isPQA==False:
self._processing_level = 'NBAR' #placeholder
elif self._isPQA == True:
self._processing_level = 'PQA'
#get lon lat box
self._ll_lon = self._west
self._ll_lat = self._south
self._ul_lon = self._west
self._ul_lat = self._north
self._lr_lon = self._east
self._lr_lat = self._south
self._ur_lon = self._east
self._ur_lat = self._north
#get x y box
self._ll_x=None
self._ll_y=None
self._lr_x=None
self._lr_y=None
self._ul_x=None
self._ul_y=None
self._ur_x=None
self._ur_y=None
self._spatial_ref_geo = self._spatial_ref.CloneGeogCS()
self._cxform_to_geo = osr.CoordinateTransformation(self._spatial_ref, self._spatial_ref_geo)
self._cxform_from_geo = osr.CoordinateTransformation(self._spatial_ref_geo, self._spatial_ref)
extents = self.GetExtent()
array_extents = numpy.array(extents)
centre_x = float(numpy.mean(array_extents[:,0]))
centre_y = float(numpy.mean(array_extents[:,1]))
extents.append([centre_x,centre_y])
if self._spatial_ref.IsGeographic():
self._lonlats = {
'CENTRE' : (extents[4][0], extents[4][1]),
'UL' : (extents[0][0], extents[0][1]),
'UR' : (extents[2][0], extents[2][1]),
'LL' : (extents[1][0], extents[1][1]),
'LR' : (extents[3][0], extents[3][1])
}
# If the scene is natively in geographics, we shouldn't need to
# project the co-ordinates to UTM.
# Set the georeferenced coordinates of the corner points if we don't already have them.
# These generally only get set when the product is FAST-EQR when they're forced to None
if not (self._ul_x and self._ul_y):
self._ul_x, self._ul_y = self._lonlats['UL']
if not (self._ur_x and self._ur_y):
self._ur_x, self._ur_y = self._lonlats['UR']
if not (self._ll_x and self._ll_y):
self._ll_x, self._ll_y = self._lonlats['LL']
if not (self._lr_x and self._lr_y):
self._lr_x, self._lr_y = self._lonlats['LR']
self._scene_centre_x, self._scene_centre_y = self._lonlats['CENTRE']
else:
self._coords = {
'CENTRE' : (extents[4][0], extents[4][1]),
'UL' : (extents[0][0], extents[0][1]),
'UR' : (extents[2][0], extents[2][1]),
'LL' : (extents[1][0], extents[1][1]),
'LR' : (extents[3][0], extents[3][1])
}
re_prj_extents=[]
for x,y in extents:
new_x, new_y, new_z = self._cxform_to_geo.TransformPoint(x,y)
re_prj_extents.append([new_x,new_y])
self._lonlats = {
'CENTRE' : (re_prj_extents[4][0], re_prj_extents[4][1]),
'UL' : (re_prj_extents[0][0], re_prj_extents[0][1]),
'UR' : (re_prj_extents[2][0], re_prj_extents[2][1]),
'LL' : (re_prj_extents[1][0], re_prj_extents[1][1]),
'LR' : (re_prj_extents[3][0], re_prj_extents[3][1])
}
if not (self._ul_x and self._ul_y):
self._ul_x, self._ul_y = self._lonlats['UL']
if not (self._ur_x and self._ur_y):
self._ur_x, self._ur_y = self._lonlats['UR']
if not (self._ll_x and self._ll_y):
self._ll_x, self._ll_y = self._lonlats['LL']
if not (self._lr_x and self._lr_y):
self._lr_x, self._lr_y = self._lonlats['LR']
self._scene_centre_x, self._scene_centre_y = self._lonlats['CENTRE']
self.metadata_dict = {}
self.metadata_dict['x-ref']=self.get_x_ref()
self.metadata_dict['y-ref']=self.get_y_ref()
def GetExtent(self):
"""Better optimized than the gdal one
"""
gt = self._geo_transform
extents = []
x_array = [0,self._x_size]
y_array = [0,self._y_size]
for px in x_array:
for py in y_array:
x = gt[0]+(px*gt[1])+(py*gt[2])
y = gt[3]+(px*gt[4])+(py*gt[5])
extents.append([x,y])
return extents
def find_band_file(self, file_pattern):
print "Looking for file based on file pattern"
"""Find the file in dataset_dir matching file_pattern and check
uniqueness.
Returns the path to the file if found, raises a DatasetError
otherwise."""
dataset_dir = self._dataset_path
if not os.path.isdir(dataset_dir):
raise DatasetError('%s is not a valid directory' % dataset_dir)
print "File pattern: "+file_pattern
filelist = [filename for filename in os.listdir(dataset_dir)
if re.match(file_pattern, filename)]
if not len(filelist) == 1:
raise DatasetError('Unable to find unique match ' +
'for file pattern %s' % file_pattern)
return os.path.join(dataset_dir, filelist[0])
def get_dataset_path(self):
"""The path to the dataset on disk."""
return self._dataset_path
def get_satellite_tag(self):
"""A short unique string identifying the satellite."""
return self._satellite_tag
def get_sensor_name(self):
"""A short string identifying the sensor.
The combination of satellite_tag and sensor_name must be unique.
"""
return self._sensor_tag
def get_processing_level(self):
"""A short string identifying the processing level or product.
The processing level must be unique for each satellite and sensor
combination.
"""
return self._processing_level
def get_x_ref(self):
"""The x (East-West axis) reference number for the dataset.
In whatever numbering scheme is used for this satellite.
"""
return self._path
def get_y_ref(self):
"""The y (North-South axis) reference number for the dataset.
In whatever numbering scheme is used for this satellite.
"""
return self._row
def get_start_datetime(self):
"""The start of the acquisition.
This is a datetime without timezone in UTC.
"""
return self._scene_start_time
def get_end_datetime(self):
"""The end of the acquisition.
This is a datatime without timezone in UTC.
"""
return self._scene_end_time
def get_datetime_processed(self):
"""The date and time when the dataset was processed or created.
This is used to determine if that dataset is newer than one
already in the database, and so should replace it.
It is a datetime without timezone in UTC.
"""
return self._scene_processed_time
def get_dataset_size(self):
"""The size of the dataset in kilobytes as an integer."""
command = "du -sk %s | cut -f1" % self.get_dataset_path()
LOGGER.debug('executing "%s"', command)
result = execute(command)
if result['returncode'] != 0:
raise DatasetError('Unable to calculate directory size: ' +
'"%s" failed: %s' % (command, result['stderr']))
LOGGER.debug('stdout = %s', result['stdout'])
return int(result['stdout'])
def get_ll_lon(self):
"""The longitude of the lower left corner of the coverage area."""
return self._ll_lon
def get_ll_lat(self):
"""The lattitude of the lower left corner of the coverage area."""
return self._ll_lat
def get_lr_lon(self):
"""The longitude of the lower right corner of the coverage area."""
return self._lr_lon
def get_lr_lat(self):
"""The lattitude of the lower right corner of the coverage area."""
return self._lr_lat
def get_ul_lon(self):
"""The longitude of the upper left corner of the coverage area."""
return self._ul_lon
def get_ul_lat(self):
"""The lattitude of the upper left corner of the coverage area."""
return self._ul_lat
def get_ur_lon(self):
"""The longitude of the upper right corner of the coverage area."""
return self._ur_lon
def get_ur_lat(self):
"""The lattitude of the upper right corner of the coverage area."""
return self._ur_lat
def get_projection(self):
"""The coordinate refererence system of the image data."""
return self._projection
def GetProjectionRef(self):
return self._projectionRef
def get_ll_x(self):
"""The x coordinate of the lower left corner of the coverage area.
This is according to the projection returned by get_projection.
"""
return self._ll_x
def get_ll_y(self):
"""The y coordinate of the lower left corner of the coverage area.
This is according to the projection returned by get_projection.
"""
return self._ll_y
def get_lr_x(self):
"""The x coordinate of the lower right corner of the coverage area.
This is according to the projection returned by get_projection.
"""
return self._lr_x
def get_lr_y(self):
"""The y coordinate of the lower right corner of the coverage area.
This is according to the projection returned by get_projection.
"""
return self._lr_y
def get_ul_x(self):
"""The x coordinate of the upper left corner of the coverage area.
This is according to the projection returned by get_projection.
"""
return self._ul_x
def get_ul_y(self):
"""The y coordinate of the upper left corner of the coverage area.
This is according to the projection returned by get_projection.
"""
return self._ul_y
def get_ur_x(self):
"""The x coordinate of the upper right corner of the coverage area.
This is according to the projection returned by get_projection.
"""
return self._ur_x
def get_ur_y(self):
"""The y coordinate of the upper right corner of the coverage area.
This is according to the projection returned by get_projection.
"""
return self._ur_y
def get_x_pixels(self):
"""The width of the dataset in pixels."""
return self._x_size
def get_y_pixels(self):
"""The height of the dataset in pixels."""
return self._y_size
def get_gcp_count(self):
"""The number of ground control points?"""
return 0
def get_mtl_text(self):
"""Text information?"""
return ''
def get_cloud_cover(self):
"""Percentage cloud cover of the aquisition if available."""
return 0.0
def get_xml_text(self):
"""XML metadata text for the dataset if available."""
return ''
#
# Methods used for tiling
#
def get_geo_transform(self):
"""The affine transform between pixel and geographic coordinates.
This is a list of six numbers describing a transformation between
the pixel x and y coordinates and the geographic x and y coordinates
in dataset's coordinate reference system.
See http://www.gdal.org/gdal_datamodel for details.
"""
return self._geo_transform
def stack_bands(self, band_list):
"""Creates and returns a band_stack object from the dataset.
band_list: a list of band numbers describing the bands to
be included in the stack.
PRE: The numbers in the band list must refer to bands present
in the dataset. This method (or things that it calls) should
raise an exception otherwise.
POST: The object returned supports the band_stack interface
(described below), allowing the datacube to chop the relevent
bands into tiles.
"""
return LandsatBandstack(self, self.band_dict)
|
|
"""
GRUB configuration - files `/boot/grub/grub2.cfg` and `/boot/grub.conf`
=======================================================================
This parser reads the configuration of the GRand Unified Bootloader, versions
1 or 2.
This is currently a fairly simple parsing process. Data read from the file
is put into roughly three categories:
* **configs**: lines read from the file that aren't boot options (i.e.
excluding lines that go in the *title* and *menuentry* sections). These
are split into pairs on the first '=' sign.
* **title**: (GRUB v1 only) lines prefixed by the word 'title'. All following
lines up to the next title line are folded together.
* **menuentry**: (GRUB v2 only) lines prefixed by the word 'menuentry'. All
following lines up to the line starting with '}' are treated as part of one
menu entry.
Each of these categories is (currently) stored as a simple list of tuples.
* For the list of **configs**, the tuples are (key, value) pairs based on
the line, split on the first '=' character. If nothing is found after the
'=' character, then the value is ``None``.
* For the **title** list, there will be exactly two items in this list:
* The first item will be a tuple of two items: 'title_name' and the
title of the boot option.
* The second item will be a tuple of two items: 'kernel' and the entire
rest of the kernel boot line as if it had been given all on one line.
* For the **menuentry** list:
* the first item will be a tuple of two items: 'menuentry_name' and the
full text between 'menuentry' and '{'.
* the rest of the items will be tuples of that line in the menu entry
configuration, split on the first space. If no space is found after the
first word, the value will be ``None``. So ``load_video`` will be stored
as ``('load_video', None)`` and ``set root='hd0,msdos1'`` will be stored
as ``('set', "root='hd0,msdos1'")``.
Note:
For GRUB version 2, all lines between the ``if`` and ``fi`` will be ignored
due to we cannot analyze the result of the bash conditions.
There are several helper functions for dealing with the Intel IOMMU and for
extracting the kernel and initrd configurations available.
"""
from .. import Parser, parser, get_active_lines, defaults, LegacyItemAccess, AttributeDict
IOMMU = "intel_iommu=on"
GRUB_KERNELS = 'grub_kernels'
GRUB_INITRDS = 'grub_initrds'
class GrubConfig(LegacyItemAccess, Parser):
"""
Parser for configuration for both GRUB versions 1 and 2.
"""
def __init__(self, *args, **kwargs):
self._boot_entries = []
super(GrubConfig, self).__init__(*args, **kwargs)
def parse_content(self, content):
"""
Parse grub configuration file to create a dict with this structure::
{
"configs": [ (name, value), (name, value) ...],
"title": [
[(title_name, name), (cmd, opt), (cmd, opt) ...],
[(title_name, another_name), ...]
],
"menuentry": [
[(menuentry_name, its name), (cmd, opt), (cmd, opt) ...],
[(menuentry_name, another_name), ...]
],
}
"""
line_iter = iter(get_active_lines(content))
conf = {"configs": [], "title": [], "menuentry": []}
line = None
while (True):
try:
if line is None:
line = line_iter.next()
if line.startswith('title '):
last_line = _parse_title(line_iter, line, conf)
line = last_line
elif line.startswith('menuentry '):
_parse_menu_entry(line_iter, line, conf)
line = None
else:
conf["configs"].append(_parse_config(line))
line = None
except StopIteration:
self.data = conf
break
if not self.data.get('title'):
self.data.pop('title')
if not self.data.get('menuentry'):
self.data.pop('menuentry')
if not self.data.get('configs'):
self.data.pop('configs')
for line_full in self.data.get('title', []) + self.data.get('menuentry', []):
for name, line in line_full:
if name == 'menuentry_name' or name == 'title_name':
entry = {}
entry['name'] = line
elif entry and name.startswith(('kernel', 'linux')):
entry['cmdline'] = line
self._boot_entries.append(AttributeDict(entry))
break
@property
def boot_entries(self):
"""
Get all boot entries in GRUB configuration.
Returns:
(list): A list of AttributeDict objects for each boot entry in below format:
- 'name': "Red Hat Enterprise Linux Server"
- 'cmdline': "kernel /vmlinuz-2.6.32-431.11.2.el6.x86_64 crashkernel=128M rhgb quiet"
"""
return self._boot_entries
@property
@defaults()
def is_kdump_iommu_enabled(self):
"""
Does any kernel have 'intel_iommu=on' set?
Returns:
(bool): ``True`` when 'intel_iommu=on' is set, otherwise returns ``False``
"""
for line in self._boot_entries:
if line.cmdline and IOMMU in line.cmdline:
return True
return False
@property
@defaults()
def kernel_initrds(self):
"""
Get the `kernel` and `initrd` files referenced in GRUB configuration files
Returns:
(dict): Returns a dict of the `kernel` and `initrd` files referenced
in GRUB configuration files
"""
kernels = []
initrds = []
name_values = [(k, v) for k, v in self.data.get('configs', [])]
for value in self.data.get('title', []) + self.data.get('menuentry', []):
name_values.extend(value)
for name, value in name_values:
if name.startswith('module'):
if 'vmlinuz' in value:
kernels.append(_parse_kernel_initrds_value(value))
elif 'initrd' in value or 'initramfs' in value:
initrds.append(_parse_kernel_initrds_value(value))
elif (name.startswith(('kernel', 'linux'))):
if 'ipxe.lkrn' in value:
# Machine PXE boots the kernel, assume all is ok
return {}
elif 'xen.gz' not in value:
kernels.append(_parse_kernel_initrds_value(value))
elif name.startswith('initrd') or name.startswith('initrd16'):
initrds.append(_parse_kernel_initrds_value(value))
return {GRUB_KERNELS: kernels, GRUB_INITRDS: initrds}
@parser("grub.conf")
class Grub1Config(GrubConfig):
"""
Parser for configuration for GRUB version 1.
Examples:
>>> grub1_content = '''
... default=0
... timeout=0
... splashimage=(hd0,0)/grub/splash.xpm.gz
... hiddenmenu
... title Red Hat Enterprise Linux Server (2.6.32-431.17.1.el6.x86_64)
... kernel /vmlinuz-2.6.32-431.17.1.el6.x86_64 crashkernel=128M rhgb quiet
... title Red Hat Enterprise Linux Server (2.6.32-431.11.2.el6.x86_64)
... kernel /vmlinuz-2.6.32-431.11.2.el6.x86_64 crashkernel=128M rhgb quiet
... '''.strip()
>>> from insights.tests import context_wrap
>>> shared = {Grub1Config: Grub1Config(context_wrap(grub1_content))}
>>> config = shared[Grub1Config]
>>> config['configs']
[('default', '0'), ('timeout', '0'), ('splashimage', '(hd0,0)/grub/splash.xpm.gz'), ('hiddenmenu', None)]
>>> config['title'][0]
[('title_name', 'Red Hat Enterprise Linux Server (2.6.32-431.17.1.el6.x86_64)'), ('kernel', '/vmlinuz-2.6.32-431.17.1.el6.x86_64 crashkernel=128M rhgb quiet')]
>>> config['title'][1][0][1]
'Red Hat Enterprise Linux Server (2.6.32-431.11.2.el6.x86_64)'
>>> config.boot_entries[1].name
'Red Hat Enterprise Linux Server (2.6.32-431.11.2.el6.x86_64)'
>>> config.boot_entries[1].cmdline
"kernel /vmlinuz-2.6.32-431.11.2.el6.x86_64 crashkernel=128M rhgb quiet"
>>> config.is_kdump_iommu_enabled
False
>>> config.kernel_initrds['grub_kernels'][0]
'vmlinuz-2.6.32-431.17.1.el6.x86_64'
"""
def __init__(self, *args, **kwargs):
super(Grub1Config, self).__init__(*args, **kwargs)
self._version = 1
self._efi = False
def get_current_title(self):
"""
Get the current default title from the ``default`` option in the
main configuration. (GRUB v1 only)
Returns:
list: A list contains all settings of the default boot entry:
- [(title_name, name), (cmd, opt), (cmd, opt) ...]
"""
# if no 'default' in grub.conf, set default to 0
idx = '0'
conf = self.data.get('configs', [])
for v in conf:
if v[0] == 'default':
idx = v[1]
if idx.isdigit():
idx = int(idx)
title = self.data['title']
if len(title) > idx:
return title[idx]
return None
@parser('grub2.cfg')
class Grub2Config(GrubConfig):
"""
Parser for configuration for GRUB version 2.
Examples:
>>> grub2_content = '''
... ### BEGIN /etc/grub.d/00_header ###
... set pager=1
... /
... if [ -s $prefix/grubenv ]; then
... load_env
... fi
... #[...]
... if [ x"${feature_menuentry_id}" = xy ]; then
... menuentry_id_option="--id"
... else
... menuentry_id_option=""
... fi
... #[...]
... ### BEGIN /etc/grub.d/10_linux ###
... menuentry 'Red Hat Enterprise Linux Workstation (3.10.0-327.36.3.el7.x86_64) 7.2 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-123.13.2.el7.x86_64-advanced-fbff9f50-62c3-484e-bca5-d53f672cda7c' {
... load_video
... set gfxpayload=keep
... insmod gzio
... insmod part_msdos
... insmod ext2
... set root='hd0,msdos1'
... if [ x$feature_platform_search_hint = xy ]; then
... search --no-floppy --fs-uuid --set=root --hint-bios=hd0,msdos1 --hint-efi=hd0,msdos1 --hint-baremetal=ahci0,msdos1 --hint='hd0,msdos1' 1184ab74-77b5-4cfa-81d3-fb87b0457577
... else
... search --no-floppy --fs-uuid --set=root 1184ab74-77b5-4cfa-81d3-fb87b0457577
... fi
... linux16 /vmlinuz-3.10.0-327.36.3.el7.x86_64 root=/dev/RHEL7CSB/Root ro rd.lvm.lv=RHEL7CSB/Root rd.luks.uuid=luks-96c66446-77fd-4431-9508-f6912bd84194 crashkernel=128M@16M rd.lvm.lv=RHEL7CSB/Swap vconsole.font=latarcyrheb-sun16 rhgb quiet LANG=en_GB.utf8
... initrd16 /initramfs-3.10.0-327.36.3.el7.x86_64.img
... }
... '''.strip()
>>> from insights.tests import context_wrap
>>> shared = {Grub2Config: Grub2Config(context_wrap(grub2_content))}
>>> config = shared[Grub2Config]
>>> config.boot_entries[0].name
"'Red Hat Enterprise Linux Workstation (3.10.0-327.36.3.el7.x86_64) 7.2 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-123.13.2.el7.x86_64-advanced-fbff9f50-62c3-484e-bca5-d53f672cda7c'"
>>> config.boot_entries[0].cmdline
"linux16 /vmlinuz-3.10.0-327.36.3.el7.x86_64 root=/dev/RHEL7CSB/Root ro rd.lvm.lv=RHEL7CSB/Root rd.luks.uuid=luks-96c66446-77fd-4431-9508-f6912bd84194 crashkernel=128M@16M rd.lvm.lv=RHEL7CSB/Swap vconsole.font=latarcyrheb-sun16 rhgb quiet LANG=en_GB.utf8"
>>> config['configs']
[('set pager', '1'), ('/', None)]
>>> config['menuentry']
[[('menuentry_name', "'Red Hat Enterprise Linux Workstation (3.10.0-327.36.3.el7.x86_64) 7.2 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-123.13.2.el7.x86_64-advanced-fbff9f50-62c3-484e-bca5-d53f672cda7c'"), ('load_video', None), ('set', 'gfxpayload=keep'), ('insmod', 'gzio'), ('insmod', 'part_msdos'), ('insmod', 'ext2'), ('set', "root='hd0,msdos1'"), ('linux16', '/vmlinuz-3.10.0-327.36.3.el7.x86_64 root=/dev/RHEL7CSB/Root ro rd.lvm.lv=RHEL7CSB/Root rd.luks.uuid=luks-96c66446-77fd-4431-9508-f6912bd84194 crashkernel=128M@16M rd.lvm.lv=RHEL7CSB/Swap vconsole.font=latarcyrheb-sun16 rhgb quiet LANG=en_GB.utf8'), ('initrd16', '/initramfs-3.10.0-327.36.3.el7.x86_64.img')]]
>>> config.kernel_initrds['grub_kernels'][0]
'vmlinuz-3.10.0-327.36.3.el7.x86_64'
>>> config.is_kdump_iommu_enabled
False
"""
def __init__(self, *args, **kwargs):
super(Grub2Config, self).__init__(*args, **kwargs)
self._version = 2
self._efi = False
@parser('grub2-efi.cfg')
class Grub2EFIConfig(GrubConfig):
"""Parses grub2 configuration for EFI-based systems"""
def __init__(self, *args, **kwargs):
super(Grub2EFIConfig, self).__init__(*args, **kwargs)
self._version = 2
self._efi = True
def _parse_line(sep, line):
"""
Parse a grub commands/config with format: cmd{sep}opts
Returns: (name, value): value can be None
"""
strs = line.split(sep, 1)
return (strs[0].strip(), None) if len(strs) == 1 else (strs[0].strip(), strs[1].strip())
def _parse_cmd(line):
"""
Parse commands within grub v1/v2 config using space delimeter
"""
return _parse_line(" ", line)
def _parse_config(line):
"""
Parse configuration lines in grub v1/v2 config
"""
if "=" not in line:
return _parse_cmd(line)
else:
return _parse_line("=", line)
def _parse_script(list, line, line_iter):
"""
Eliminate any bash script contained in the grub v2 configuration
"""
ifIdx = 0
while (True):
line = line_iter.next()
if line.startswith("fi"):
if ifIdx == 0:
return
ifIdx -= 1
elif line.startswith("if"):
ifIdx += 1
def _parse_menu_entry(line_iter, cur_line, conf):
"""
Parse each `menuentry` that the grub v2 configuration contains
* Uses "_parse_script" to eliminate bash scripts
"""
menu = []
conf['menuentry'].append(menu)
n, entry = _parse_line("menuentry", cur_line)
entry_name, v = _parse_line("{", entry)
if not entry_name:
raise Exception("Cannot parse menuentry line: {}".format(cur_line))
menu.append(('menuentry_name', entry_name))
if v:
menu.append(_parse_cmd(v))
while (True):
line = line_iter.next()
if "{" in line:
n, v = _parse_line("{", line)
if v:
menu.append(_parse_cmd(v))
elif "}" in line:
n, v = _parse_line("}", line)
if n:
menu.append(_parse_cmd(n))
return
elif line.startswith("if"):
_parse_script(menu, line, line_iter)
else:
menu.append(_parse_cmd(line))
def _parse_title(line_iter, cur_line, conf):
"""
Parse "title" in grub v1 config
"""
title = []
conf['title'].append(title)
title.append(('title_name', cur_line.split('title', 1)[1].strip()))
while (True):
line = line_iter.next()
if line.startswith("title "):
return line
cmd, opt = _parse_cmd(line)
title.append((cmd, opt))
def _parse_kernel_initrds_value(line):
"""
Called by "kernel_initrds" method to parse the kernel and
initrds lines in the grub v1/v2 config
"""
return line.split()[0].split('/')[-1]
|
|
#!/usr/bin/env/python
import numpy as np
from scipy import signal
from scipy.ndimage import filters
# ================================================================
# General purpose funcs
# ================================================================
# ------------------------ create classic patterns
def cylinder(length, amp=1, ampStd=.02, noiseStd=.02):
"""noisy elevated section, from CBF dataset"""
amp = amp + np.random.randn(length) * ampStd
return np.random.randn(length) * noiseStd + amp
def bell(length, amp=1, ampStd=.02, noiseStd=.02):
"""noisy ascending ramp, from CBF dataset"""
amp = amp + np.random.randn(1) * ampStd
return np.linspace(0, amp, length) + np.random.randn(length) * noiseStd
def funnel(length, amp=1, ampStd=.02, noiseStd=.02):
"""noisy descending ramp, from CBF dataset"""
return bell(length, amp, ampStd, noiseStd)[::-1]
def sines(length, amp=1, ampStd=.02, noiseStd=.02, periods=1, periodOffset=0):
amp = amp + np.random.randn() * ampStd
tstart = 2 * np.pi * periodOffset
tend = 2 * np.pi * (periods + periodOffset)
t = np.linspace(tstart, tend, length, endpoint=False)
return np.sin(t) * amp + np.random.randn(length) * noiseStd
# def warpedSines(length, firstPieceFrac=.33, splitFrac=.5, periods=1, **kwargs):
# firstPieceLen = int(firstPieceFrac * length)
# secondPieceLen = length - firstPieceLen
# firstPiecePeriods = splitFrac * periods
# secondPiecePeriods = (1. - splitFrac) * periods
# sine1 = sines(firstPieceLen, periods=firstPiecePeriods, **kwargs)
# phaseShiftFrac = splitFrac + .5 / secondPieceLen
# # phaseShiftFrac = splitFrac
# sine2 = sines(secondPieceLen, periods=secondPiecePeriods,
# periodOffset=phaseShiftFrac, **kwargs)
# return np.r_[sine1, sine2]
def warpedSines(length, origFracs=.5, newFracs=.33, periods=1, **kwargs):
if not origFracs:
raise ValueError("Argument origFracs is required; received {}".format(
origFracs))
if not newFracs:
raise ValueError("Argument newFracs is required; received {}".format(
newFracs))
# ensure fractions are collections
if not hasattr(origFracs, '__len__'):
origFracs = [origFracs]
if not hasattr(newFracs, '__len__'):
newFracs = [newFracs]
# ensure fractions given are between 0 and 1
if np.min(origFracs) < 0.:
raise ValueError("origFracs contained values < 0!")
if np.max(origFracs) > 1.:
raise ValueError("origFracs contained values > 1!")
if np.min(newFracs) < 0.:
raise ValueError("newFracs contained values < 0!")
if np.max(newFracs) > 1.:
raise ValueError("newFracs contained values > 1!")
# have each start with 0, end with 1, and be monotonic and nonrepeated
origFracs = [0] + sorted(origFracs) + [1.]
newFracs = [0] + sorted(newFracs) + [1.]
origFracs = np.unique(origFracs)
newFracs = np.unique(newFracs)
if len(origFracs) != len(newFracs):
raise IndexError("origFracs length {} != newFracs length {}".format(
len(origFracs), len(newFracs)))
print "origFracs", origFracs
print "newFracs", newFracs
pieces = []
numPieces = len(origFracs) - 1
# for i, origFrac in enumerate(origFracs[:-1]): # :-1 since we appended a 1
for i in range(numPieces): # :-1 since we appended a 1
origFrac = origFracs[i]
newFrac = newFracs[i]
# determine end of this piece
# if isLastPiece: # this is last piece
# nextOrigFrac = 1.
# nextNewFrac = 1.
# else:
# nextOrigFrac = origFracs[i+1]
# nextNewFrac = newFracs[i+1]
origFrac = origFracs[i]
newFrac = newFracs[i]
nextOrigFrac = origFracs[i+1]
nextNewFrac = newFracs[i+1]
deltaOrigFrac = nextOrigFrac - origFrac
deltaNewFrac = nextNewFrac - newFrac
isLastPiece = i == (numPieces - 1)
if isLastPiece: # ensure output is correct length (despite rounding)
pieceLen = length - sum([len(piece) for piece in pieces])
else:
pieceLen = int(deltaNewFrac * length)
print "creating piece of len", pieceLen
piecePeriods = deltaOrigFrac * periods
piecePeriodOffset = origFrac
sinewave = sines(pieceLen, periods=piecePeriods,
periodOffset=piecePeriodOffset, **kwargs)
pieces.append(sinewave)
return np.hstack(pieces)
def randconst(shape, mean=0., std=1.):
try:
return np.random.randn(*shape) * std + mean
except:
return np.random.randn(shape) * std + mean
def randwalk(shape, std=1):
try:
if len(shape) > 1 and not any([dim == 1 for dim in shape]): # specify axis=1 if 2D
return np.cumsum(randconst(shape, std=std), axis=1)
return np.cumsum(randconst(shape, std=std))
except:
return np.cumsum(randconst(shape, std=std))
def notSoRandomWalk(shape, std=1, trendFilterLength=32, lpfLength=16):
"""bandpass filter a random walk so that the low-frequency trend /
drift is eliminated and the high-frequency noise is attenuated"""
walk = randwalk(shape, std=std)
filt = np.hamming(trendFilterLength)
filt /= np.sum(filt)
whichAxis = len(walk.shape) > 1 # 0 iff 1d, else 1
# subtract baseline drift, roughly
trend = filters.convolve1d(walk, weights=filt, axis=whichAxis, mode='reflect')
walk -= trend
# subtract noisey spikes
walk = filters.convolve1d(walk, weights=np.hamming(lpfLength), axis=whichAxis, mode='reflect')
return walk
def randWithFreqMagMatching(X, shape=None):
"""Given a data matrix X, returns a matrix of the same dimensions
whose rows have the same frequency magnitude spectra as the rows of
X, but randomly shuffled phases.
"""
if shape is None:
shape = X.shape
whichAxis = len(shape) > 1 # 1D -> 0; 2D -> 1
avgFFT = np.fft.fft(X, axis=whichAxis)
mags = np.absolute(avgFFT)
phases = np.angle(avgFFT)
if whichAxis:
for i in range(len(X)):
np.random.shuffle(phases[i]) # shuffle phase of each row in place
else:
np.random.shuffle(phases)
noiseFFT = mags * np.exp(1.j * phases)
noise = np.fft.ifft(noiseFFT)
return noise
# ------------------------ utility funcs
def embedSubseq(fullseq, subseq, startIdx=None, sameMean=True):
if startIdx is None:
maxStartIdx = len(fullseq) - len(subseq)
startIdx = np.random.choice(np.arange(maxStartIdx + 1))
endIdx = startIdx+len(subseq)
mean = np.mean(fullseq[startIdx:endIdx])
# print "embedSubseq(), ", len(subseq), startIdx, endIdx, mean*sameMean
fullseq[startIdx:endIdx] = subseq + mean*sameMean
def createMotif(background, instance1, instance2, sameMean=False,
returnStartIdxs=False, **sink):
maxIdx1 = len(background)/2 - len(instance1) # can't encroach on 2nd half
start1 = np.random.choice(np.arange(maxIdx1))
maxEndIdx2 = (len(background) - len(instance2)) - len(background)/2 # can't encroach on 1st half
start2 = np.random.choice(np.arange(maxEndIdx2)) + len(background)/2
seq = background.copy()
embedSubseq(seq, instance1, start1, sameMean)
embedSubseq(seq, instance2, start2, sameMean)
if returnStartIdxs:
return seq, start1, start2
return seq
def seedRng(seed):
if seed:
np.random.seed(seed)
def randWarpingPath(seqLength, stepConstraints=True, reallyWarped=False):
# step constraints = at most one in each direction
maxIdx = seqLength - 1
i = 0
j = 0
wasHorz = False
wasVert = False
path = [(0,0)]
# random choices--equiprobable by default
horzThresh = .33
vertThresh = .67
# actively give it a weird warping path by just increasing i for a while
if reallyWarped:
for k in range(1, int(maxIdx / 2)):
i = k
j = int(k / 4)
path.append((i, j))
horzThresh = .5
vertThresh = .75
canIncrement_i = i < maxIdx
canIncrement_j = j < maxIdx
while canIncrement_i or canIncrement_j:
randNum = np.random.rand()
if (not canIncrement_i) or (canIncrement_j and randNum < horzThresh and not wasHorz):
# horizontal step
j += 1
wasHorz = True and stepConstraints
wasVert = False
elif (not canIncrement_j) or (canIncrement_i and randNum < vertThresh and not wasVert):
# vertical step
i += 1
wasHorz = False
wasVert = True and stepConstraints
elif canIncrement_i and canIncrement_j:
# diagonal step
i += 1
j += 1
wasHorz = False
wasVert = False
path.append((i,j))
canIncrement_i = i < maxIdx
canIncrement_j = j < maxIdx
return path
def warpedSeq(seq, sameLength=True, useI=True, **kwargs):
path = randWarpingPath(len(seq), **kwargs) # list of (i,j) pairs
idxs_i, idxs_j = zip(*path) # tuple of i vals, tuple of j vals
idxs = idxs_i if useI else idxs_j # use i idxs or j idxs
warped = seq[np.asarray(idxs)]
if sameLength:
warped = signal.resample(warped, len(seq))
return warped
def appendZeros(A, length, axis=1):
if length < 1:
return A
A = np.asarray(A)
if len(A.shape) == 1:
return np.r_[A, np.zeros(length)]
if axis == 0:
return np.vstack((A, np.zeros((length, A.shape[1]))))
if axis == 1:
return np.hstack((A, np.zeros((A.shape[0], length))))
def ensure2D(X): # TODO if we add deps, use impl of this in arrays.py
X = np.asarray(X)
if len(X.shape) == 1:
X = X.reshape((-1, 1)) # ensure 2d array
return X
def ensureIterable(seqs): # TODO deprecated; use ensureIsCollection
if not isinstance(seqs, (set, frozenset, list, tuple)):
seqs = [seqs]
return seqs
def ensureIsCollection(seqs):
if not isinstance(seqs, (set, frozenset, list, tuple)):
seqs = [seqs]
return seqs
def addNoiseDims(X, numToAdd=-1, noiseType='randwalk'):
X = ensure2D(X)
if numToAdd == 0:
return X
if numToAdd < 0:
numToAdd = X.shape[1]
elif numToAdd < 1.:
numToAdd = int(X.shape[1] * numToAdd)
noiseShape = (X.shape[0], numToAdd)
if noiseType is None or noiseType == 'randwalk':
noise = randwalk(noiseShape)
elif noiseType == 'white' or noiseType == 'gaussian':
noise = randconst(noiseShape)
else:
raise ValueError("Unrecognized noise type: {}".format(noiseType))
return np.hstack((X, ensure2D(noise)))
def addAdversarialDims(X, numToAdd, startIdxs, endIdxs):
X = ensure2D(X)
if numToAdd == 0:
return X
if numToAdd < 0:
numToAdd = X.shape[1]
elif numToAdd < 1.:
numToAdd = int(X.shape[1] * numToAdd)
signalLength = len(X)
newDims = []
for i in range(numToAdd):
sig = createAdversarialSignal(startIdxs, endIdxs, signalLength)
newDims.append(sig.reshape((-1,1))) # col vect
newDims = np.hstack(newDims)
return np.hstack((X, newDims))
def createAdversarialSignal(startIdxs, endIdxs, signalLength):
assert(len(startIdxs) == len(endIdxs))
numInstances = len(startIdxs)
# avgLength =
pairs = np.arange(numInstances)
if len(pairs) % 2 != 0: # handle odd numbers of instances
pairs = np.append(pairs, np.random.choice(pairs)) # duplicate some idx
np.random.shuffle(pairs)
pairs = pairs.reshape((-1, 2)) # random
out = np.zeros(signalLength)
for pair in pairs:
start1, end1 = startIdxs[pair[0]], endIdxs[pair[0]]
start2, end2 = startIdxs[pair[1]], endIdxs[pair[1]]
length1 = end1 - start1 # end idxs not inclusive
length2 = end2 - start2
subseq = randwalk(length1)
negSeq = -subseq
if length1 != length2:
negSeq = signal.resample(negSeq, length2)
out[start1:end1] = subseq
out[start2:end2] = negSeq
return out
# ================================================================
# Create particular synthetic datasets (for prototyping / smoke testing)
# ================================================================
DEFAULT_MOTIF_LEN = 400
DEFAULT_INSTANCE_LEN = 50
# ------------------------------------------------
# Single time series
# ------------------------------------------------
def trianglesMotif(noise=.02, backgroundNoise=.02, seed=None, **kwargs):
seedRng(seed)
background = randconst(DEFAULT_MOTIF_LEN, std=backgroundNoise)
m = DEFAULT_INSTANCE_LEN
inst1 = funnel(m, ampStd=0, noiseStd=noise)
inst2 = funnel(m, ampStd=0, noiseStd=noise)
return createMotif(background, inst1, inst2, **kwargs), m
def rectsMotif(noise=.02, backgroundNoise=.02, seed=None, **kwargs):
seedRng(seed)
background = randconst(DEFAULT_MOTIF_LEN, std=backgroundNoise)
m = DEFAULT_INSTANCE_LEN
inst1 = cylinder(m, ampStd=0, noiseStd=noise)
inst2 = cylinder(m, ampStd=0, noiseStd=noise)
return createMotif(background, inst1, inst2, sameMean=False, **kwargs), m
def sinesMotif(noise=0, backgroundNoise=.02, periods=1, seed=None, **kwargs):
seedRng(seed)
background = randconst(DEFAULT_MOTIF_LEN, std=backgroundNoise)
m = DEFAULT_INSTANCE_LEN
inst1 = sines(m, ampStd=0, noiseStd=noise, periods=periods)
inst2 = sines(m, ampStd=0, noiseStd=noise, periods=periods)
return createMotif(background, inst1, inst2, sameMean=False, **kwargs), m
def multiShapesMotif(noise=0, backgroundNoise=.02, periods=1, seed=None, **kwargs):
seedRng(seed)
background = randconst((DEFAULT_MOTIF_LEN,3)) * backgroundNoise
m = DEFAULT_INSTANCE_LEN
inst1 = np.c_[funnel(m, ampStd=0, noiseStd=noise),
sines(m, ampStd=0, noiseStd=noise, periods=periods),
bell(m, ampStd=0, noiseStd=noise)]
inst2 = np.c_[funnel(m, ampStd=0, noiseStd=noise),
sines(m, ampStd=0, noiseStd=noise, periods=periods),
bell(m, ampStd=0, noiseStd=noise)]
return createMotif(background, inst1, inst2, sameMean=False, **kwargs), m
def makeThreeTriangles(length=400, m=40, noise=0, returnStartIdxs=False):
patterns = [funnel(m, noiseStd=noise) for i in range(3)]
startIdxs = [60, 160, 330]
seq = randconst(length, std=.05) # noise std dev = .05
for idx, p in zip(startIdxs, patterns):
embedSubseq(seq, p, idx)
return seq if returnStartIdxs else seq, startIdxs
def makeTwoTriangles(length=400, m=40, noise=0, returnStartIdxs=False):
if hasattr(m, '__len__'):
patterns = [funnel(mi, noiseStd=noise) for mi in m]
else:
patterns = [funnel(m, noiseStd=noise) for i in range(2)]
startIdxs = [110, 280]
seq = randconst(length, std=.05) # noise std dev = .05
for idx, p in zip(startIdxs, patterns):
embedSubseq(seq, p, idx)
return seq if returnStartIdxs else seq, startIdxs
# ------------------------------------------------
# Multiple time series
# ------------------------------------------------
def collectionOfTsUsingCreationFunc(func, count, **kwargs):
return [func(**kwargs) for i in range(count)]
def alignSequentialEndpoints(seqs):
"""add values to each seq in seqs such that seqs[i, -1] = seqs[i+1, 0]"""
aligned = [seqs[0]]
prevSeq = seqs[0]
# prevSeq = ensure2D(seqs[0])
for seq in seqs[1:]:
# seq = ensure2D(seq)
gap = prevSeq[-1] - seq[0]
adjustedSeq = seq + gap
prevSeq = adjustedSeq
aligned.append(adjustedSeq)
return aligned
def concatSeqs(seqs, axis=0):
asArrays = [np.asarray(seq) for seq in seqs]
numDims = [len(seq.shape) for seq in asArrays]
assert(len(np.unique(numDims)) == 1) # must all have same dimensionality
if numDims[0] == 1 or axis == 1:
return np.hstack(asArrays)
elif numDims[0] == 2 and axis == 0:
return np.vstack(asArrays)
else:
raise ValueError("Does not support ndarrays with n > 2")
def concatWithAlignedEndpoints(seqs):
alignedSeqs = alignSequentialEndpoints(seqs)
return concatSeqs(alignedSeqs)
def createPadding(seqs, minPaddingFractionOfLength=1., maxPaddingFractionOfLength=1.,
padFunc=randwalk, paddingStdDevRatio=1., **kwargs):
"""creates n+1 padding seqs to go around the n seqs based on their lengths"""
# ensure seqs is a collection so we can iterate thru it
# wasCollection = True
seqs = ensureIterable(seqs)
# determine shape of padding for each seq
seqLengths = np.array([len(seq) for seq in seqs])
minLengths = minPaddingFractionOfLength * seqLengths
maxLengths = maxPaddingFractionOfLength * seqLengths
maxLengths = np.maximum(minLengths, maxLengths)
lengthDiffs = maxLengths - minLengths
padLengths = np.random.rand(len(seqs)) * lengthDiffs + minLengths
padLengths = padLengths.astype(np.int)
if len(seqs[0].shape) > 1:
nDims = seqs[0].shape[1]
padShapes = [[padLen, nDims] for padLen in padLengths]
padShapes.append(padShapes[-1])
padShapes = np.array(padShapes)
else:
padShapes = np.append(padLengths, padLengths[-1]) # have padding after end
# create padding; should have variance that's the specified ratio of the
# original data's variance so that the shape of the padding is meaningful
padding = [padFunc(shape=padShape, **kwargs) for padShape in padShapes]
if paddingStdDevRatio > 0.:
seqStd = np.mean([np.std(seq) for seq in seqs])
paddingStd = np.mean([np.std(pad) for pad in padding])
currentRatio = paddingStd / seqStd
multiplyBy = paddingStdDevRatio / currentRatio
padding = [pad * multiplyBy for pad in padding]
return padding
def concatWithPadding(seqs, **paddingKwargs):
seqs = ensureIterable(seqs)
seqs = [ensure2D(seq) for seq in seqs]
padding = createPadding(seqs, **paddingKwargs)
padding = [ensure2D(pad) for pad in padding]
totalSeqLength = np.sum([len(seq) for seq in seqs])
totalPadLength = np.sum([len(pad) for pad in padding])
totalLen = totalSeqLength + totalPadLength
nDims = seqs[0].shape[1]
padded = np.empty((totalLen, nDims))
seqStartIdxs = np.empty(len(seqs), dtype=np.int)
seqEndIdxs = np.empty(len(seqs), dtype=np.int)
currentIdx = 0
prevValue = 0.
for i, seq in enumerate(seqs):
seqLen = len(seq)
pad = padding[i]
padLen = len(pad)
pad += (prevValue - pad[0]) # force equal endpoints
prevValue = pad[-1]
padded[currentIdx:(currentIdx+padLen)] = pad
currentIdx += padLen
seq += (prevValue - seq[0])
prevValue = seq[-1]
padded[currentIdx:(currentIdx+seqLen)] = seq
seqStartIdxs[i] = currentIdx
currentIdx += seqLen
seqEndIdxs[i] = currentIdx
finalPad = padding[-1]
padded[currentIdx:] = finalPad + (prevValue - finalPad[0])
return padded, seqStartIdxs, seqEndIdxs
def makeWhiteNoiseSeqs(count=10, shape=50, **kwargs):
return collectionOfTsUsingCreationFunc(randconst, count, shape=shape, **kwargs)
def makeRandWalkSeqs(count=10, shape=50, **kwargs):
return collectionOfTsUsingCreationFunc(randwalk, count, shape=shape, **kwargs)
def makeTriangleSeqs(count=2, shape=50, **kwargs):
return collectionOfTsUsingCreationFunc(funnel, count, length=shape, **kwargs)
def makeSinesSeqs(count=2, shape=50, **kwargs):
return collectionOfTsUsingCreationFunc(sines, count, length=shape, **kwargs)
def makeSinesDataset(numSines=2, numNoise=10, startIdx=60, warped=False, **kwargs):
# sines = makeSinesSeqs(numSines, shape=80, noiseStd=0.0, **kwargs)
sines = makeSinesSeqs(numSines, shape=80, noiseStd=0.1, **kwargs)
if warped:
sines = map(lambda s: warpedSeq(s), sines)
background = makeRandWalkSeqs(numNoise, shape=200, std=.5, **kwargs)
for i, s in enumerate(sines):
embedSubseq(background[i], s, startIdx)
return background
# ================================================================
# Testing
# ================================================================
if __name__ == '__main__':
import matplotlib.pyplot as plt
# for i in range(5):
# plt.plot(cylinder(20), 'g')
# plt.plot(bell(20), 'b')
# plt.plot(funnel(20), 'k')
# plt.plot(sines(20), 'r')
# plt.plot(createMotif(randwalk(200, std=.1), bell(30), bell(32)))
# plt.plot(createMotif(randwalk(200, std=.1), sines(30), sines(32)))
# plt.plot(trianglesMotif()[0], lw=2)
# plt.plot(rectsMotif()[0], lw=2)
plt.plot(sinesMotif()[0], lw=2)
plt.show()
|
|
import argparse
import getpass
import os
from os.path import expanduser
import requests
import sys
import time
import yaml
from yaml import SafeDumper
import logging
class ConfigurationCache():
def __init__(self, filename=None):
self.cache = None
self.filename = filename if filename is not None else os.path.join(expanduser('~'), '.highwinds')
def read(self):
with os.fdopen(os.open(self.filename, os.O_RDONLY | os.O_CREAT, 0600), 'r') as f:
self.cache = yaml.load(f)
if self.cache is None:
self.cache = {}
return self.cache
def set(self, key, value):
if self.cache is None:
self.read()
self.cache[key] = value
with os.fdopen(os.open(self.filename, os.O_WRONLY | os.O_CREAT, 0600), 'w') as f:
return yaml.dump(self.cache, f, Dumper=SafeDumper, default_flow_style=False)
def get(self, key, default=None):
if self.cache is None:
self.read()
return self.cache.get(key, default)
class APIError(Exception):
def __init__(self, message, context):
super(APIError, self).__init__(message)
self.context = context
class APIClient:
def __init__(self, base_url='https://striketracker.highwinds.com', token=None):
self.base_url = base_url
self.token = token
def version(self):
response = requests.get(self.base_url + '/version')
return response.headers['X-Cdnws-Version']
def me(self):
user_response = requests.get(
self.base_url + '/api/v1/users/me', headers={'Authorization': 'Bearer %s' % self.token})
if user_response.status_code == 200:
return user_response.json()
else:
raise APIError('Could not fetch user details', user_response)
def get_host(self, account, host):
response = requests.get(
self.base_url + '/api/v1/accounts/{account}/hosts/{host}'.format(account=account, host=host),
headers={'Authorization': 'Bearer %s' % self.token})
if response.status_code == 200:
return response.json()
else:
raise APIError('Could not fetch host', response)
def create_host(self, account, host):
response = requests.post(
self.base_url + '/api/v1/accounts/{account}/hosts'.format(account=account, host=host),
headers={
'Authorization': 'Bearer %s' % self.token,
'Content-Type': 'application/json'
},
json=host)
if response.status_code == 201:
return response.json()
else:
raise APIError('Could not create host', response)
def create_scope(self, account, host, scope):
response = requests.post(
self.base_url + '/api/v1/accounts/{account}/hosts/{host}/configuration/scopes'
.format(account=account, host=host),
headers={
'Authorization': 'Bearer %s' % self.token,
'Content-Type': 'application/json'
},
json=scope)
if response.status_code == 200:
return response.json()
else:
raise APIError('Could not create scope', response)
def update_configuration(self, account, host, scope, configuration):
response = requests.put(
self.base_url + '/api/v1/accounts/{account}/hosts/{host}/configuration/{scope}'
.format(account=account, host=host, scope=scope),
headers={
'Authorization': 'Bearer %s' % self.token,
'Content-Type': 'application/json'
},
json=configuration)
if response.status_code == 200:
return response.json()
else:
raise APIError('Could not update configuration', response)
def get_configuration(self, account, host, scope):
response = requests.get(
self.base_url + '/api/v1/accounts/{account}/hosts/{host}/configuration/{scope}'
.format(account=account, host=host, scope=scope),
headers={
'Authorization': 'Bearer %s' % self.token,
'Content-Type': 'application/json'
})
if response.status_code == 200:
return response.json()
else:
raise APIError('Could not fetch configuration', response)
def create_token(self, username, password, application=None):
if application is None:
application = 'StrikeTracker Python client'
# Grab an access token to use to fetch user
response = requests.post(self.base_url + '/auth/token', data={
"username": username, "password": password, "grant_type": "password"
}, headers={
'User-Agent': application
})
auth = response.json()
if 'access_token' not in auth:
raise APIError('Could not fetch access token', response)
access_token = auth['access_token']
# Grab user's id and root account hash
user_response = requests.get(self.base_url + '/api/v1/users/me', headers={'Authorization': 'Bearer %s' % access_token})
user = user_response.json()
if 'accountHash' not in user or 'id' not in user:
raise APIError('Could not fetch user\'s root account hash', user_response)
account_hash = user['accountHash']
user_id = user['id']
# Generate a new API token
token_response = requests.post(self.base_url + ('/api/v1/accounts/{account_hash}/users/{user_id}/tokens'.format(
account_hash=account_hash, user_id=user_id
)), json={
"password": password, "application": application
}, headers={
'Authorization': 'Bearer %s' % access_token,
'Content-Type': 'application/json'
})
if 'token' not in token_response.json():
raise APIError('Could not generate API token', token_response)
self.token = token_response.json()['token']
return self.token
def purge(self, account_hash, urls):
purge_response = requests.post(self.base_url + ('/api/v1/accounts/%s/purge' % account_hash), json={
"list": urls
}, headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % self.token
})
if 'id' not in purge_response.json():
raise APIError('Could not send purge batch', purge_response)
return purge_response.json()['id']
def purge_status(self, account_hash, job_id):
status_response = requests.get(self.base_url + ('/api/v1/accounts/%s/purge/%s' % (account_hash, job_id,)), headers={
'Authorization': 'Bearer %s' % self.token,
})
if 'progress' not in status_response.json():
raise APIError('Could not fetch purge status', status_response)
return float(status_response.json()['progress'])
def command(arguments=()):
def apply_args(fn):
def wrapper(self, *args, **kwargs):
# Apply arguments
for arg in arguments:
name = arg['name']
arg_copy = arg.copy()
del arg_copy['name']
self.parser.add_argument(name, **arg_copy)
self.args = self.parser.parse_args()
# Optionally turn on verbose logging
if self.args.verbose:
try:
import http.client as http_client
except ImportError:
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Load token store
if not self.args.token:
self.client.token = self.cache.get('token')
else:
self.client.token = self.args.token
# Call original function
fn(self, *args, **kwargs)
return wrapper
return apply_args
def authenticated(fn):
def wrapper(self, *args, **kwargs):
if self.client.token is None:
sys.stderr.write(
"This command requires authentication. Either run `striketracker init` to cache credentials locally, or "
"supply the --token parameter on the command line.\n")
exit(1)
fn(self, *args, **kwargs)
return wrapper
class Command:
def __init__(self, cache=None):
# Instantiate library
base_url = os.environ.get('STRIKETRACKER_BASE_URL', 'https://striketracker.highwinds.com')
self.client = APIClient(base_url)
self.cache = ConfigurationCache(cache)
# Read in command line arguments
self.parser = argparse.ArgumentParser(description='Command line interface to the Highwinds CDN')
methodList = [method for method in dir(self) if callable(getattr(self, method)) and method[0] != '_']
methodList.sort()
self.parser.add_argument('action', help=",".join(methodList))
self.parser.add_argument('--token', help='Token to use for this action')
self.parser.add_argument('-v', '--verbose', help='Turn on verbose logging', action='store_true')
# Call command
command = sys.argv[1] if len(sys.argv) > 1 else None
if len(sys.argv) == 1 or "-" in sys.argv[1]:
self.parser.print_help(file=sys.stdout)
elif hasattr(self, sys.argv[1]):
getattr(self, sys.argv[1])()
else:
sys.stderr.write("Unknown command: %s\n" % command)
def _print(self, obj):
yaml.dump(obj, sys.stdout, Dumper=SafeDumper, default_flow_style=False)
def _error(self, e):
sys.stderr.write(e.message + "\n")
try:
sys.stderr.write(e.context.json()['error'] + "\n")
except:
pass
exit(1)
@command([
{'name': '--application', 'help': 'Name of application with which to register this token'}
])
def init(self):
sys.stdout.write("Initializing configuration...\n")
if self.args.token:
token = self.args.token
else:
token = self.client.create_token(
username=raw_input('Username: '),
password=getpass.getpass(),
application=self.args.application if hasattr(self.args, 'application') else None
)
self.cache.set('token', token)
sys.stdout.write('Successfully saved token\n')
@command()
def version(self):
sys.stdout.write(self.client.version())
sys.stdout.write("\n")
@command()
@authenticated
def me(self):
user = self.client.me()
self._print(user)
@command([
{'name': 'account', 'help': 'Account from which to purge assets'},
{'name': '--poll', 'help': 'Poll for purge status to be complete instead of returning id',
'action': 'store_true'},
{'name': '--invalidate-only', 'help': 'Force revalidation on assets instead of removing them',
'action': 'store_true'},
{'name': '--purge-all-dynamic', 'help': 'Purge all dynamic version of asset',
'action': 'store_true'},
{'name': '--recursive', 'help': 'Purge all assets at this path recursively',
'action': 'store_true'},
])
@authenticated
def purge(self):
sys.stderr.write('Reading urls from stdin\n')
urls = []
for url in sys.stdin:
urls.append({
"url": url.strip(),
"purgeAllDynamic": self.args.purge_all_dynamic,
"recursive": self.args.recursive,
"invalidateOnly": self.args.invalidate_only
})
# Send batch to CDN
try:
job_id = self.client.purge(self.args.account, urls)
except APIError as e:
self._error(e)
# Optionally poll for progress
if self.args.poll:
progress = 0.0
sys.stderr.write('Sending purge...')
while progress < 1.0:
progress = self.client.purge_status(self.args.account, job_id)
sys.stderr.write('.')
time.sleep(0.1)
sys.stderr.write('Done!\n')
else:
sys.stdout.write(job_id)
sys.stdout.write("\n")
@command([
{'name': 'account', 'help': 'Account from which to purge assets'},
{'name': 'job_id', 'help': 'Job id for which to fetch status'},
])
@authenticated
def purge_status(self):
sys.stdout.write(self.client.purge_status(self.args.account, self.args.job_id))
sys.stdout.write("\n")
@command([
{'name': 'account', 'help': 'Account from which to purge assets'},
{'name': 'host', 'help': 'Hash of host to clone'},
])
@authenticated
def get_host(self):
try:
host = self.client.get_host(self.args.account, self.args.host)
except APIError as e:
self._error(e)
self._print(host)
@command([
{'name': 'account', 'help': 'Account from which to purge assets'},
{'name': 'host', 'help': 'Hash of host to clone'},
])
@authenticated
def clone_host(self):
try:
# Grab host to clone
host = self.client.get_host(self.args.account, self.args.host)
# Create new host
new_host = self.client.create_host(self.args.account, {
"name": "%s (copy)" % host['name'],
"services": host['services']
})
except APIError as e:
self._error(e)
sys.stdout.write("\nHost:\n")
yaml.dump(new_host, sys.stdout, Dumper=SafeDumper, default_flow_style=False)
# Iterate over the source's scopes
sys.stdout.write("\nConfiguration:")
for scope in host['scopes']:
# Create each required scope
try:
new_scope = self.client.create_scope(self.args.account, new_host['hashCode'], {
"platform": scope['platform'],
"path": scope['path']
})
# Get configuration from source
old_configuration = self.client.get_configuration(
self.args.account, self.args.host, scope['id'])
# Delete scope and hostnames
del old_configuration['scope']
if 'hostname' in old_configuration.keys():
del old_configuration['hostname']
# Delete IDs
def strip_ids(typeInstance):
if 'id' in typeInstance:
del typeInstance['id']
for typeName, confType in old_configuration.iteritems():
if type(confType) is list:
for index in range(len(confType)):
strip_ids(confType[index])
else:
strip_ids(confType)
# Post configuration to target
new_configuration = self.client.update_configuration(
self.args.account, new_host['hashCode'], new_scope['id'], old_configuration)
sys.stdout.write("\n{platform}\t{path}\n".format(**new_scope))
yaml.dump(new_configuration, sys.stdout, Dumper=SafeDumper, default_flow_style=False)
except APIError as e:
self._error(e)
|
|
import httplib
import re
import os
import requests
import json
from datetime import datetime
from distutils.version import LooseVersion
from cumulusci.tasks.release_notes.github_api import GithubApiMixin
from cumulusci.tasks.release_notes.parser import ChangeNotesLinesParser
from cumulusci.tasks.release_notes.parser import IssuesParser
from cumulusci.tasks.release_notes.parser import GithubIssuesParser
from cumulusci.tasks.release_notes.parser import CommentingGithubIssuesParser
from cumulusci.tasks.release_notes.provider import StaticChangeNotesProvider
from cumulusci.tasks.release_notes.provider import DirectoryChangeNotesProvider
from cumulusci.tasks.release_notes.provider import GithubChangeNotesProvider
from cumulusci.tasks.release_notes.exceptions import GithubApiNotFoundError
class BaseReleaseNotesGenerator(object):
def __init__(self):
self.change_notes = []
self.init_parsers()
self.init_change_notes()
def __call__(self):
self._parse_change_notes()
return self.render()
def init_change_notes(self):
self.change_notes = self._init_change_notes()
def _init_change_notes(self):
""" Subclasses should override this method to return an initialized
subclass of BaseChangeNotesProvider """
return []
def init_parsers(self):
""" Initializes the parser instances as the list self.parsers """
self.parsers = []
self._init_parsers()
def _init_parsers(self):
""" Subclasses should override this method to initialize their
parsers """
pass
def _parse_change_notes(self):
""" Parses all change_notes in self.change_notes() through all parsers
in self.parsers """
for change_note in self.change_notes():
self._parse_change_note(change_note)
def _parse_change_note(self, change_note):
""" Parses an individual change note through all parsers in
self.parsers """
for parser in self.parsers:
parser.parse(change_note)
def render(self):
""" Returns the rendered release notes from all parsers as a string """
release_notes = []
for parser in self.parsers:
parser_content = parser.render()
if parser_content is not None:
release_notes.append(parser_content)
return u'\r\n\r\n'.join(release_notes)
class StaticReleaseNotesGenerator(BaseReleaseNotesGenerator):
def __init__(self, change_notes):
self._change_notes = change_notes
super(StaticReleaseNotesGenerator, self).__init__()
def _init_parsers(self):
self.parsers.append(ChangeNotesLinesParser(
self, 'Critical Changes'))
self.parsers.append(ChangeNotesLinesParser(self, 'Changes'))
self.parsers.append(IssuesParser(
self, 'Issues Closed'))
def _init_change_notes(self):
return StaticChangeNotesProvider(self, self._change_notes)
class DirectoryReleaseNotesGenerator(BaseReleaseNotesGenerator):
def __init__(self, directory):
self.directory = directory
super(DirectoryReleaseNotesGenerator, self).__init__()
def _init_parsers(self):
self.parsers.append(ChangeNotesLinesParser(
self, 'Critical Changes'))
self.parsers.append(ChangeNotesLinesParser(self, 'Changes'))
self.parsers.append(IssuesParser(
self, 'Issues Closed'))
def _init_change_notes(self):
return DirectoryChangeNotesProvider(self, self.directory)
class GithubReleaseNotesGenerator(BaseReleaseNotesGenerator):
def __init__(self, github_info, current_tag, last_tag=None):
self.github_info = github_info
self.current_tag = current_tag
self.last_tag = last_tag
super(GithubReleaseNotesGenerator, self).__init__()
def _init_parsers(self):
self.parsers.append(
ChangeNotesLinesParser(
self,
'Critical Changes',
)
)
self.parsers.append(
ChangeNotesLinesParser(self, 'Changes')
)
self.parsers.append(
GithubIssuesParser(self, 'Issues Closed')
)
def _init_change_notes(self):
return GithubChangeNotesProvider(
self,
self.current_tag,
self.last_tag
)
class PublishingGithubReleaseNotesGenerator(GithubReleaseNotesGenerator, GithubApiMixin):
def __call__(self):
content = super(PublishingGithubReleaseNotesGenerator, self).__call__()
return self.publish(content)
def _init_parsers(self):
self.parsers.append(
ChangeNotesLinesParser(
self,
'Critical Changes',
)
)
self.parsers.append(
ChangeNotesLinesParser(self, 'Changes')
)
self.parsers.append(
CommentingGithubIssuesParser(self, 'Issues Closed')
)
def publish(self, content):
release = self._get_release()
return self._update_release(release, content)
def _get_release(self):
# Query for the release
return self.call_api('/releases/tags/{}'.format(self.current_tag))
def _update_release(self, release, content):
if release['body']:
new_body = []
current_parser = None
is_start_line = False
for parser in self.parsers:
parser.replaced = False
# update existing sections
for line in release['body'].splitlines():
if current_parser:
if current_parser._is_end_line(current_parser._process_line(line)):
parser_content = current_parser.render()
if parser_content:
# replace existing section with new content
new_body.append(parser_content + '\r\n')
current_parser = None
for parser in self.parsers:
if parser._render_header().strip() == parser._process_line(line).strip():
parser.replaced = True
current_parser = parser
is_start_line = True
break
else:
is_start_line = False
if is_start_line:
continue
if current_parser:
continue
else:
# preserve existing sections
new_body.append(line.strip())
# catch section without end line
if current_parser:
new_body.append(current_parser.render())
# add new sections at bottom
for parser in self.parsers:
parser_content = parser.render()
if parser_content and not parser.replaced:
new_body.append(parser_content + '\r\n')
release['body'] = u'\r\n'.join(new_body)
else:
release['body'] = content
if release.get('id'):
resp = self.call_api(
'/releases/{}'.format(release['id']), data=release)
else:
resp = self.call_api('/releases', data=release)
return release['body']
|
|
import gui
#Global Variables
b = '[ ]'
GRID_HEIGHT = 5
GRID_WIDTH = 5
KNIGHT_MOVE = [[2,1], [2,-1], [-2,1], [-2,-1], [1,2], [1,-2], [-1,2], [-1,-2]]
#Grid Class
class Grid:
def __init__(self):
# empty grid
self.board = [[b,b,b,b,b], \
[b,b,b,b,b], \
[b,b,b,b,b], \
[b,b,b,b,b], \
[b,b,b,b,b]]
def save_grid(self, name, char):
# Saves the grid to an open file
write = open(name, 'w')
for x in range(0, GRID_HEIGHT):
for y in range(0, GRID_WIDTH):
write.write(self.board[x][y] + ',')
write.write('\n')
write.close()
# Used for loading a grid from a file
def load_file(_file):
grid = []
_file.readline() # reads the header
#loops through each line of the file
for i in range(GRID_WIDTH):
line = _file.readline()[:-1]
grid.append(list(line))
return grid
write = open(name, 'w')
#Save the character and ending the line.
write.write(str(char))
write.write('\n')
for x in range(0, GRID_HEIGHT):
for y in range(0, GRID_WIDTH):
write.write(self.board[x][y] + ',')
write.write('\n')
write.close()
# Used for loading a grid from a file
def load_grid(self, name):
self.tempgrid = [[b]*GRID_WIDTH for i in range(GRID_HEIGHT)]
tempdic = {}
read = open(name, 'r')
charline = read.readline()
for x in range(0, GRID_HEIGHT):
line = read.readline()
line = line.split(',')
for y in range(0, GRID_WIDTH):
gridvar = line[y]
self.tempgrid[x][y] = gridvar
if self.tempgrid[x][y] != b:
tempdic[self.tempgrid[x][y]] = [x, y]
read.close()
return tempdic
def load_char(self, name):
read = open(name, 'r')
read.seek(0)
char = read.readline()
read.close()
return char
#initialize board
def setup_board(self, piece_loc):
for i in piece_loc:
location = piece_loc[i]
if location != 'dead':
#row and column are equal to loc's x and y co-ordinates (loc is a list with only two values)
row = int(location[0])
col = int(location[1])
self.board[row][col] = i
#Changes the selected piece's location and updates the grid as well as returning the new co-ordinates of all the pieces(piece_loc)
def move_piece(self, row, col, piece, piece_loc):
piece_loc[piece] = [row, col]
self.board = recreate_grid(piece_loc)
return piece_loc
def validate_location(self, row, col, new_row, new_col, piece, oth_board):
#If piece selected is a knight
#If piece selected is a knight
if piece[1] == 'K':
#Change 'BK' check to 'K' | WE DONT WANT IT TO KILL ITS OWN PIECE
# We check if the knight is moving in an L-shape, and if the spot is either empty or has an enemy piece
for i in range(8):
if (new_col == col + KNIGHT_MOVE[i][0]) and (new_row == row + KNIGHT_MOVE[i][1]) and (self.board[new_row][new_col] == b):
# if enemy is killed, taunt them!
return True
#If the piece is a pawn
elif piece[1] == 'P':
#For white, forward is negative
if piece[0] == 'W':
forward = -1
#Vice-versa
elif piece[0] == 'B':
forward = 1
# If there's nothing in front of the piece, move it up
if (new_col == col) and (new_row == row + forward) and (self.board[new_row][new_col] == b) and (oth_board[new_row][new_col] == b):
return True
# If there is an enemy to the top right, kill it
elif (new_col == col + 1) and (new_row == row+ forward) and (self.board[new_row][new_col]== b) and (oth_board[new_row][new_col] != b):
return True
# If there is an enemy to the top left, kill it
elif (new_col == col - 1) and (new_row == row + forward) and (self.board[new_row][new_col] == b) and (oth_board[new_row][new_col] != b):
return True
return False
### Lets pieces kill each other n so on###
def finalize_move( dic_human, dic_ai, last_human, last_ai,):
#print('new')
test_h = dict(dic_human)
test_a = dict(dic_ai)
#checks the simultaneous move and see who has the upper hand
for piece_h in test_h:
for piece_a in test_a:
if test_h[piece_h] == test_a[piece_a]:
#print('same', piece_h, piece_a)
if piece_h == last_human:
#print('h')
if piece_a == last_ai:
#print('a')
#if both peice are knight both are dead
if piece_h[1] == piece_a[1]:
#print('type1')
dic_human[last_human] = 'dead'
dic_ai[last_ai] = 'dead'
#if human has a pawn and ai has a knight, human dead
elif piece_h[1] > piece_a[1]:
#print('typea')
dic_human[last_human] = 'dead'
# if human has a knight and ai has a pawn, human wins
else:
#print('typeh')
dic_ai[last_ai] = 'dead'
else:
dic_ai[piece_a] = 'dead'
else:
dic_human[piece_h] = 'dead'
"""for i in dic_ai:
if dic_human[last_human] == dic_ai[i]:
print(dic_human[last_human],dic_ai[i], i, last_human)
if i == last_human:
dic_human[last_human] = 'dead'
dic_ai[i] = 'dead'
else:
dic_ai[i] = 'dead'
if last_ai != 'none':
for i in dic_human:
if dic_ai[last_ai] == dic_human[i]:
if i == last_ai:
dic_human[i] = 'dead'
dic_ai[last_ai] = 'dead'
else:
dic_human[i] = 'dead'"""
return dic_human, dic_ai
### creates a grid from a dictionary ###
def recreate_grid(dic):
temp_grid = [[b for i in range(GRID_WIDTH)] for j in range(GRID_HEIGHT)]
for i in dic: # loop thru dictionary
location = dic[i]
if location != 'dead': # if not dead
# place the piece on the board
row = int(location[0])
col = int(location[1])
temp_grid[row][col] = i
return temp_grid
### checks if either side has won (if any pawns left)###
def get_winner(dic_human, dic_ai):
x = 0
y = 0
for piece in dic_human:
if piece[1] == 'P' and dic_human[piece] != 'dead':
x =+ 1
for piece in dic_ai:
if piece[1] == 'P' and dic_ai[piece] != 'dead':
y =+ 1
if x == 0 and y == 0:
return 'draw'
elif x == 0 and y > 0:
return 'ai'
elif x > 0 and y == 0:
return 'human'
else:
return 'none'
### turns pawns to knights if possible ###
def check_knight(dic):
counter = 0
mp = 'none'
#lopps throught the dictionary to see how many knight are alive
for piece in dic:
if piece[1] == 'K' and dic[piece] != 'dead':
counter += 1
#check the dictionary to figure out which players it belongs to apply the right row
if piece[0] == 'B':
end_row = 4
elif piece[0] == 'W':
end_row = 0
for piece in dic:
loc = dic[piece]
#if a pawn has reached it's correct end row
if loc[0] == end_row and piece[1] == 'P':
#if there is less than 2 knight alive, promote the pawn into a knight
if counter < 2:
del dic[piece]
num = int(piece[2]) + 2
new_piece = str(piece)
new_piece = new_piece[0] + 'K' + str(num)
dic[new_piece] = loc
else:
mp = piece
return dic, mp
#save game
def save_game(ai, human):
ai.save_grid('ai.apoc')
human.save_grid('human.apoc')
#new game
def new_game():
new_game = gui.ApocalypseGUI(960, 720)
|
|
import datetime
import decimal
import warnings
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import utils
from django.utils import six, timezone
from django.utils.dateparse import parse_duration
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, returns the SQL necessary to cast the result of
a union to that type. Note that the resulting string should contain a
'%s' placeholder for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunk_sql() method')
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain Unicode values.
to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_unicode(k): to_unicode(v) for k, v in params.items()}
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Takes a SQL script that may contain multiple lines and returns a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
try:
import sqlparse
except ImportError:
raise ImproperlyConfigured(
"sqlparse is required if you don't split your SQL "
"statements manually."
)
else:
return [sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement]
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide a sql_flush() method')
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def value_to_db_date(self, value):
"""
Transforms a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_datetime(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_time(self, value):
"""
Transforms a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transforms a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def value_to_db_ipaddress(self, value):
"""
Transforms a string representation of an IP address into the expected
type for the backend driver.
"""
return value
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
return [first, second]
def get_db_converters(self, expression):
"""Get a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for coverter functions.
"""
return []
def convert_durationfield_value(self, value, expression, context):
if value is not None:
value = str(decimal.Decimal(value) / decimal.Decimal(1000000))
value = parse_duration(value)
return value
def check_aggregate_support(self, aggregate_func):
warnings.warn(
"check_aggregate_support has been deprecated. Use "
"check_expression_support instead.",
RemovedInDjango21Warning, stacklevel=2)
return self.check_expression_support(aggregate_func)
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def modify_insert_params(self, placeholders, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
returns a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
|
|
#!/usr/bin/env python
"""A simple script to download all Gmail emails with a given label.
This script will write a CSV file with the name passed to '--output-file'.
The CSV file will have the format:
<sender domain name>,<email subject>,<email text>
The label you wish to download all emails from is provided with '--label'
and is the label name that you see in Gmail's web interface.
All functions labeled as 'example code' are copyright
by Google under the Apache 2.0 license. The docstrings
for those functions cite the specific sources for
those code samples.
All code that is not explicitly labeled as Google example code
is licensed under the MIT license, with James Mishra
(j@jamesmishra.com) as the copyright holder.
"""
from __future__ import print_function
import httplib2
import base64
import email
import unicodecsv
import unicodedata
import argparse
import sys
import re
from BeautifulSoup import BeautifulSoup
from lxml.html.clean import Cleaner
# The below imports are for the Gmail API.
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
from apiclient import errors
# Allows the handling of very large CSV fields.
# May cause errors in some architectures.
unicodecsv.field_size_limit(sys.maxsize)
def stderr(*args):
"""
Convert arguments to string and print them to stderr
"""
print(" ".join(str(a) for a in args), file=sys.stderr)
def auth():
"""
Return an authenticated Gmail service.
Based on example code from
https://developers.google.com/gmail/api/quickstart/quickstart-python
"""
# Path to the client_secret.json file downloaded from the Developer Console
CLIENT_SECRET_FILE = 'client_secret.json'
OAUTH_SCOPE = 'https://www.googleapis.com/auth/gmail.readonly'
# Location of the credentials storage file
STORAGE = Storage('gmail.storage')
# Start the OAuth flow to retrieve credentials
flow = flow_from_clientsecrets(CLIENT_SECRET_FILE, scope=OAUTH_SCOPE)
http = httplib2.Http()
# Try to retrieve credentials from storage or run the flow to generate them
credentials = STORAGE.get()
if credentials is None or credentials.invalid:
credentials = run(flow, STORAGE, http=http)
# Authorize the httplib2.Http object with our credentials
http = credentials.authorize(http)
# Build the Gmail service from discovery
gmail_service = build('gmail', 'v1', http=http)
return gmail_service
def get_threads(gmail_service):
"""
Takes an authenticated Gmail service and returns a list of email thread IDs.
Based on example code from Google's website.
"""
# Retrieve a page of threads
threads = gmail_service.users().threads().list(userId='me').execute()
# Print ID for each thread
if threads['threads']:
for thread in threads['threads']:
print('Thread ID: %s' % (thread['id']))
def list_labels(service, user_id):
"""
Get a list all labels in the user's mailbox.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
Returns:
A list all Labels in the user's mailbox.
Based on example code from
https://developers.google.com/gmail/api/v1/reference/users/labels/list
"""
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
return labels
except errors.HttpError, error:
print('An error occurred: %s' % error)
def list_messages_with_label(service, user_id, label_ids=[]):
"""
List all Messages of the user's mailbox with label_ids applied.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
label_ids: Only return Messages with these labelIds applied.
Returns:
List of Messages that have all required Labels applied. Note that the
returned list contains Message IDs, you must use get with the
appropriate id to get the details of a Message.
Based on example code from
https://developers.google.com/gmail/api/v1/reference/users/messages/list
"""
try:
response = service.users().messages().list(userId=user_id,
labelIds=label_ids).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id,
labelIds=label_ids, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: %s' % error)
def get_message_from_id(service, user_id, msg_id):
"""Get a Message with given ID.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_id: The ID of the Message required.
Returns:
A Message.
Based on example code from
https://developers.google.com/gmail/api/v1/reference/users/messages/get
"""
try:
message = service.users().messages().get(
userId=user_id, id=msg_id).execute()
return message
except errors.HttpError, error:
print('An error occurred: %s' % error)
def get_raw_message_from_id(service, user_id, msg_id):
"""Get a Message and use it to create a MIME Message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_id: The ID of the Message required.
Returns:
A MIME Message, consisting of data from Message.
Based on example code from
https://developers.google.com/gmail/api/v1/reference/users/messages/get
"""
try:
message = service.users().messages().get(userId=user_id, id=msg_id,
format='raw').execute()
msg_str = base64.urlsafe_b64decode(message['raw'].encode('ascii'))
return msg_str
except errors.HttpError, error:
print("An error occured: %s" % error)
def get_label_id_from_name(service, user, label_name):
"""
Given an authenticated Gmail service, user, and a label name
(visible in Gmail), find the internal label ID.
This function returns False if the given label name was
not found.
"""
labels = list_labels(service, user)
for label in labels:
if label['name'] == label_name:
return label['id']
else:
return False
def concat_email_text(mime_msg):
"""
Given a Python MIME message object, walk through the MIME
multipart tree and concatenate all of the text we can find.
Return this text.
"""
text = ""
for part in mime_msg.walk():
payload = part.get_payload(decode=True)
if payload is not None:
text += " "
text += payload
return text
def fix_spaces_cr_lf(input_str):
"""
Given an input string,
remove HTML entity non-breaking spaces, carriage returns,
and line feeds, replacing them all with spaces. Also,
remove all consecutive spaces with one space. Finally,
strip all whitespace on the ends of the string.
"""
input_str = input_str.replace(" ", " ").replace("\r", " ")\
.replace("\n", " ").strip()
return " ".join(input_str.split()).strip()
URL_REGEX = re.compile(r'http.+? ', re.DOTALL)
def remove_urls(text):
"""
Given a text string, return it without any URLs in it.
"""
return re.sub(URL_REGEX, '', text)
#UNICODE_PUNCTUATION = dict.fromkeys(i for i in xrange(sys.maxunicode)
# if unicodedata.category(unichr(i)).startswith('P'))
#
#HTML_ENTITY_REGEX = re.compile(r'&[^\s]*;')
def remove_punctuation(text):
return text
# text = re.sub(HTML_ENTITY_REGEX, '', text)
# return text.translate(UNICODE_PUNCTUATION)
def html_to_text(html_page_string):
"""
Takes a full HTML document as a string and returns
the text within the <body>
"""
#return html_page_string
# Stripts CSS from the HTML
html_page_string = Cleaner(style=True).clean_html(html_page_string)
# Now we strip everything else...
# BeautifulSoup is unable to strip CSS <style> tags by
# itself, so that's why Cleaner helps out.
soup = BeautifulSoup(html_page_string)
# Concatenate all of the text in tags, and then remove
# all of the embedded URLs.
return remove_urls(" ".join(soup.findAll(text=True)))
def save_str_to_csv(raw_msg, output_file, append=True):
"""
Takes a single Python string (`raw_msg`) and saves it
to a one-row-wide CSV file with the filename `output_file`.
"""
if append:
mode = "a"
else:
mode = "w"
with open(output_file, mode) as handle:
writer = unicodecsv.writer(handle, quoting=unicodecsv.QUOTE_ALL)
writer.writerow([raw_msg])
def process_raw_msg(raw_msg, formatted_output_file, append=True):
"""
Given a Python list of raw messages and an output CSV file
to write to, write details of the messages out to the CSV
file in the format:
<sender-domain>,<subject>,<message-text>
"""
if append:
mode = "ab"
else:
mode = "wb"
mime_msg = email.message_from_string(raw_msg)
text = remove_punctuation(html_to_text(concat_email_text(mime_msg)))
subject = mime_msg.get("Subject")
# Decode escaped character sets in the subject line
subject = u" ".join([a[0].decode('utf-8', 'replace')
for a in email.header.decode_header(subject)])
subject = remove_punctuation(subject.replace("\r", " ").replace("\n", " "))
sender_domain = mime_msg.get("From").split("@")[1].split(">")[0]#\
#.decode("utf-8")
# Strip whitespace
csv_line = [fix_spaces_cr_lf(s) for s in [sender_domain, subject, text]]
# If any of our strings are empty, replace with a placeholder
# to make sure each CSV line has three items.
csv_line = map(lambda s: (u'' == s) and u"PLACEHOLDERNONE" or s ,
csv_line)
if formatted_output_file == "STDOUT":
writer = unicodecsv.writer(sys.stdout,
quoting=unicodecsv.QUOTE_ALL)
writer.writerow(csv_line)
else:
with open(formatted_output_file, mode) as handle:
writer = unicodecsv.writer(handle,
quoting=unicodecsv.QUOTE_ALL)
writer.writerow(csv_line)
def make_argparser():
"""
Configures and returns an ArgumentParser object
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--output-file', type=str, required=True, help=
"""The file to write CSV output in. Type 'STDOUT' for
output to standard output. Setting 'STDOUT' will
trigger '--quiet'""")
parser.add_argument('--label', type=str, required=True, help=
"""The name of the Gmail label that contains all of the
emails you wish to download""")
parser.add_argument('--quiet', action='store_true', help=
"""Keep error messages quiet""")
parser.add_argument('--download-only', action='store_true', help=
"""Downloads emails and writes *raw* MIME email messages
to the output file. This option is useful in conjunction
with '--import-raw' for repeated debugging of this
program without downloading the same messages over
and over again from the Internet.
This parameter is also useful in the event you want
the raw MIME emails without any customization.
The output filename does not change with this
parameter, but the CSV format just becomes
one column containing the entire MIME string.
One thing to worry about is that some programs
might return errors with the extremely long CSV fields
created by this format.""")
parser.add_argument('--import-raw', type=str, default='', help=
"""Imports a CSV file created by the '--download-only'
parameter.""")
parser.add_argument('--user', type=str, default='me', help=
"""A different username to send to the Gmail API.
The default value is 'me', which the Gmail API
interprets as the authenticated user.""")
return parser
def quiet_print_maker(quiet):
"""
Creates a custom print function that is silent if quiet=True.
This is a cheap way to enable/disable all debug output at once.
"""
if quiet:
return lambda *x: None
else:
return print
def main():
# The next two lines are a common hack for enabling UTF-8
# support in Python 2. They're generally *not* recommended,
# but this avoids UnicodeEncodeErrors and UnicodeDecodeErrors
# when passing strings in and around third party libraries.
#
# This isn't a shortcut for true Unicode support, so don't
# use this hack in production.
reload(sys)
sys.setdefaultencoding("utf-8")
args = make_argparser().parse_args()
# If we are writing CSV output to standard output,
# then we don't want it clouded up with progress output.
if args.output_file == "STDOUT":
args.quiet = True
qp = quiet_print_maker(args.quiet)
if args.import_raw:
qp("Importing stored messages")
count = 1
with open(args.import_raw, 'r') as handle:
reader = unicodecsv.reader(handle, errors='replace')
for raw_msg in reader:
raw_msg = raw_msg[0]
process_raw_msg(raw_msg, args.output_file)
print("Processed", count)
count += 1
else:
# The first time the program runs, the user will have to authenticate
# using a web browser. After that, the credentials will be stored.
gmail_service = auth()
label = get_label_id_from_name(gmail_service, args.user, args.label)
msg_id_list = list_messages_with_label(gmail_service, args.user,
[label])
num_msgs = len(msg_id_list)
qp("Total messages:", num_msgs)
count = 1
for msg in msg_id_list:
raw = get_raw_message_from_id(gmail_service, args.user, msg['id'])
if args.download_only:
save_str_to_csv(raw, args.output_file)
else:
process_raw_msg(raw, args.output_file)
qp("Processed", count, "of", num_msgs)
count += 1
if __name__ == "__main__":
main()
|
|
"""
eld.py: economic load dispatching in electricity generation
Approach: use an SOS2 constraints for modeling non-linear functions.
Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012
"""
from pyscipopt import Model, quicksum, multidict
import math
import random
from piecewise import convex_comb_sos
def cost(a,b,c,e,f,p_min,p):
"""cost: fuel cost based on "standard" parameters
(with valve-point loading effect)
"""
return a + b*p + c*p*p + abs(e*math.sin(f*(p_min-p)))
def lower_brkpts(a,b,c,e,f,p_min,p_max,n):
"""lower_brkpts: lower approximation of the cost function
Parameters:
- a,...,p_max: cost parameters
- n: number of breakpoints' intervals to insert between valve points
Returns: list of breakpoints in the form [(x0,y0),...,(xK,yK)]
"""
EPS = 1.e-12 # for avoiding round-off errors
if f == 0: f = math.pi/(p_max-p_min)
brk = []
nvalve = int(math.ceil(f*(p_max-p_min)/math.pi))
for i in range(nvalve+1):
p0 = p_min + i*math.pi/f
if p0 >= p_max-EPS:
brk.append((p_max,cost(a,b,c,e,f,p_min,p_max)))
break
for j in range(n):
p = p0 + j*math.pi/f/n
if p >= p_max:
break
brk.append((p,cost(a,b,c,e,f,p_min,p)))
return brk
def eld_complete(U,p_min,p_max,d,brk):
"""eld -- economic load dispatching in electricity generation
Parameters:
- U: set of generators (units)
- p_min[u]: minimum operating power for unit u
- p_max[u]: maximum operating power for unit u
- d: demand
- brk[k]: (x,y) coordinates of breakpoint k, k=0,...,K
Returns a model, ready to be solved.
"""
model = Model("Economic load dispatching")
p,F = {},{}
for u in U:
p[u] = model.addVar(lb=p_min[u], ub=p_max[u], name="p(%s)"%u) # capacity
F[u] = model.addVar(lb=0,name="fuel(%s)"%u)
# set fuel costs based on piecewise linear approximation
for u in U:
abrk = [X for (X,Y) in brk[u]]
bbrk = [Y for (X,Y) in brk[u]]
# convex combination part:
K = len(brk[u])-1
z = {}
for k in range(K+1):
z[k] = model.addVar(ub=1) # do not name variables for avoiding clash
model.addCons(p[u] == quicksum(abrk[k]*z[k] for k in range(K+1)))
model.addCons(F[u] == quicksum(bbrk[k]*z[k] for k in range(K+1)))
model.addCons(quicksum(z[k] for k in range(K+1)) == 1)
model.addConsSOS2([z[k] for k in range(K+1)])
# demand satisfaction
model.addCons(quicksum(p[u] for u in U) == d, "demand")
# objective
model.setObjective(quicksum(F[u] for u in U), "minimize")
model.data = p
return model
def eld_another(U,p_min,p_max,d,brk):
"""eld -- economic load dispatching in electricity generation
Parameters:
- U: set of generators (units)
- p_min[u]: minimum operating power for unit u
- p_max[u]: maximum operating power for unit u
- d: demand
- brk[u][k]: (x,y) coordinates of breakpoint k, k=0,...,K for unit u
Returns a model, ready to be solved.
"""
model = Model("Economic load dispatching")
# set objective based on piecewise linear approximation
p,F,z = {},{},{}
for u in U:
abrk = [X for (X,Y) in brk[u]]
bbrk = [Y for (X,Y) in brk[u]]
p[u],F[u],z[u] = convex_comb_sos(model,abrk,bbrk)
p[u].lb = p_min[u]
p[u].ub = p_max[u]
# demand satisfaction
model.addCons(quicksum(p[u] for u in U) == d, "demand")
# objective
model.setObjective(quicksum(F[u] for u in U), "minimize")
model.data = p
return model
def eld13():
U, a, b, c, e, f, p_min, p_max = multidict({
1 : [ 550, 8.1, 0.00028, 300, 0.035, 0, 680 ],
2 : [ 309, 8.1, 0.00056, 200, 0.042, 0, 360 ],
3 : [ 307, 8.1, 0.00056, 200, 0.042, 0, 360 ],
4 : [ 240, 7.74, 0.00324, 150, 0.063, 60, 180 ],
5 : [ 240, 7.74, 0.00324, 150, 0.063, 60, 180 ],
6 : [ 240, 7.74, 0.00324, 150, 0.063, 60, 180 ],
7 : [ 240, 7.74, 0.00324, 150, 0.063, 60, 180 ],
8 : [ 240, 7.74, 0.00324, 150, 0.063, 60, 180 ],
9 : [ 240, 7.74, 0.00324, 150, 0.063, 60, 180 ],
10 : [ 126, 8.6, 0.00284, 100, 0.084, 40, 120 ],
11 : [ 126, 8.6, 0.00284, 100, 0.084, 40, 120 ],
12 : [ 126, 8.6, 0.00284, 100, 0.084, 55, 120 ],
13 : [ 126, 8.6, 0.00284, 100, 0.084, 55, 120 ],
})
return U, a, b, c, e, f, p_min, p_max
def eld40():
U, a, b, c, e, f, p_min, p_max = multidict({
1 : [ 94.705, 6.73, 0.00690, 100, 0.084, 36, 114],
2 : [ 94.705, 6.73, 0.00690, 100, 0.084, 36, 114],
3 : [ 309.54, 7.07, 0.02028, 100, 0.084, 60, 120],
4 : [ 369.03, 8.18, 0.00942, 150, 0.063, 80, 190],
5 : [ 148.89, 5.35, 0.01140, 120, 0.077, 47, 97],
6 : [ 222.33, 8.05, 0.01142, 100, 0.084, 68, 140],
7 : [ 287.71, 8.03, 0.00357, 200, 0.042, 110, 300],
8 : [ 391.98, 6.99, 0.00492, 200, 0.042, 135, 300],
9 : [ 455.76, 6.60, 0.00573, 200, 0.042, 135, 300],
10 : [ 722.82, 12.9, 0.00605, 200, 0.042, 130, 300],
11 : [ 635.20, 12.9, 0.00515, 200, 0.042, 94, 375],
12 : [ 654.69, 12.8, 0.00569, 200, 0.042, 94, 375],
13 : [ 913.40, 12.5, 0.00421, 300, 0.035, 125, 500],
14 : [ 1760.4, 8.84, 0.00752, 300, 0.035, 125, 500],
15 : [ 1728.3, 9.15, 0.00708, 300, 0.035, 125, 500],
16 : [ 1728.3, 9.15, 0.00708, 300, 0.035, 125, 500],
17 : [ 647.85, 7.97, 0.00313, 300, 0.035, 220, 500],
18 : [ 649.69, 7.95, 0.00313, 300, 0.035, 220, 500],
19 : [ 647.83, 7.97, 0.00313, 300, 0.035, 242, 550],
20 : [ 647.81, 7.97, 0.00313, 300, 0.035, 242, 550],
21 : [ 785.96, 6.63, 0.00298, 300, 0.035, 254, 550],
22 : [ 785.96, 6.63, 0.00298, 300, 0.035, 254, 550],
23 : [ 794.53, 6.66, 0.00284, 300, 0.035, 254, 550],
24 : [ 794.53, 6.66, 0.00284, 300, 0.035, 254, 550],
25 : [ 801.32, 7.10, 0.00277, 300, 0.035, 254, 550],
26 : [ 801.32, 7.10, 0.00277, 300, 0.035, 254, 550],
27 : [ 1055.1, 3.33, 0.52124, 120, 0.077, 10, 150],
28 : [ 1055.1, 3.33, 0.52124, 120, 0.077, 10, 150],
29 : [ 1055.1, 3.33, 0.52124, 120, 0.077, 10, 150],
30 : [ 148.89, 5.35, 0.01140, 120, 0.077, 47, 97],
31 : [ 222.92, 6.43, 0.00160, 150, 0.063, 60, 190],
32 : [ 222.92, 6.43, 0.00160, 150, 0.063, 60, 190],
33 : [ 222.92, 6.43, 0.00160, 150, 0.063, 60, 190],
34 : [ 107.87, 8.95, 0.00010, 200, 0.042, 90, 200],
35 : [ 116.58, 8.62, 0.00010, 200, 0.042, 90, 200],
36 : [ 116.58, 8.62, 0.00010, 200, 0.042, 90, 200],
37 : [ 307.45, 5.88, 0.01610, 80, 0.098, 25, 110],
38 : [ 307.45, 5.88, 0.01610, 80, 0.098, 25, 110],
39 : [ 307.45, 5.88, 0.01610, 80, 0.098, 25, 110],
40 : [ 647.83, 7.97, 0.00313, 300, 0.035, 242, 550],
})
U = list(a.keys())
return U,a,b,c,e,f,p_min,p_max,d
if __name__ == "__main__":
# U,a,b,c,e,f,p_min,p_max = eld13(); d=1800
U,a,b,c,e,f,p_min,p_max = eld13(); d=2520
# U,a,b,c,e,f,p_min,p_max = eld40(); d=10500
n = 100 # number of breakpoints between valve points
brk = {}
for u in U:
brk[u] = lower_brkpts(a[u],b[u],c[u],e[u],f[u],p_min[u],p_max[u],n)
lower = eld_complete(U,p_min,p_max,d,brk)
# lower = eld_another(U,p_min,p_max,d,brk)
lower.setRealParam("limits/gap", 1e-12)
lower.setRealParam("limits/absgap", 1e-12)
lower.setRealParam("numerics/feastol", 1e-9)
lower.optimize()
p = lower.data
print("Lower bound:",lower.ObjBound)
UB = sum(cost(a[u],b[u],c[u],e[u],f[u],p_min[u],lower.getVal(p[u])) for u in U)
print("Upper bound:",UB)
print("Solution:")
for u in p:
print(u,lower.getVal(p[u]))
|
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file may adapt small portions of https://github.com/mtth/hdfs (MIT
# license), see the LICENSES directory.
from os import path as osp
from posixpath import join as pjoin
import os
import posixpath
import shutil
import six
from ibis.config import options
import ibis.common as com
import ibis.util as util
from hdfs.util import temppath
class HDFSError(com.IbisError):
pass
def implements(f):
def decorator(g):
g.__doc__ = f.__doc__
return g
return decorator
class HDFS(object):
"""
Interface class to HDFS for ibis that abstracts away (and protects
user/developer against) various 3rd party library API differences.
"""
def log(self, message):
print(message)
def exists(self, path):
raise NotImplementedError
def status(self, path):
raise NotImplementedError
def chmod(self, hdfs_path, permissions):
"""
Change permissions of a file of directory
Parameters
----------
hdfs_path : string
Directory or path
permissions : string
Octal permissions string
"""
raise NotImplementedError
def chown(self, hdfs_path, owner=None, group=None):
"""
Change owner (and/or group) of a file or directory
Parameters
----------
hdfs_path : string
Directory or path
owner : string, optional
Name of owner
group : string, optional
Name of group
"""
raise NotImplementedError
def head(self, hdfs_path, nbytes=1024, offset=0):
"""
Retrieve the requested number of bytes from a file
Parameters
----------
hdfs_path : string
Absolute HDFS path
nbytes : int, default 1024 (1K)
Number of bytes to retrieve
offset : int, default 0
Number of bytes at beginning of file to skip before retrieving data
Returns
-------
head_data : bytes
"""
raise NotImplementedError
def get(self, hdfs_path, local_path='.', overwrite=False):
"""
Download remote file or directory to the local filesystem
Parameters
----------
hdfs_path : string
local_path : string, default '.'
"""
raise NotImplementedError
def put(self, hdfs_path, resource, overwrite=False, verbose=None,
**kwargs):
"""
Write file or directory to HDFS
Parameters
----------
hdfs_path : string
Directory or path
resource : string or buffer-like
Relative or absolute path to local resource, or a file-like object
overwrite : boolean, default False
verbose : boolean, default ibis options.verbose
Further keyword arguments passed down to any internal API used.
Returns
-------
written_path : string
The path to the written file or directory
"""
raise NotImplementedError
def put_tarfile(self, hdfs_path, local_path, compression='gzip',
verbose=None, overwrite=False):
"""
Write contents of tar archive to HDFS directly without having to
decompress it locally first
Parameters
----------
hdfs_path : string
local_path : string
compression : {'gzip', 'bz2', None}
overwrite : boolean, default False
verbose : boolean, default None (global default)
"""
import tarfile
modes = {
None: 'r',
'gzip': 'r:gz',
'bz2': 'r:bz2'
}
if compression not in modes:
raise ValueError('Invalid compression type {0}'
.format(compression))
mode = modes[compression]
tf = tarfile.open(local_path, mode=mode)
for info in tf:
if not info.isfile():
continue
buf = tf.extractfile(info)
abspath = pjoin(hdfs_path, info.path)
self.put(abspath, buf, verbose=verbose, overwrite=overwrite)
def put_zipfile(self, hdfs_path, local_path):
raise NotImplementedError
def write(self, hdfs_path, buf, overwrite=False, blocksize=None,
replication=None, buffersize=None):
raise NotImplementedError
def mkdir(self, path, create_parent=False):
pass
def ls(self, hdfs_path, status=False):
"""
Return contents of directory
Parameters
----------
hdfs_path : string
"""
raise NotImplementedError
def size(self, hdfs_path):
"""
Return total size of file or directory
Parameters
----------
size : int
"""
raise NotImplementedError
def tail(self, hdfs_path, nbytes=1024):
raise NotImplementedError
def mv(self, hdfs_path_src, hdfs_path_dest, overwrite=True):
"""
Move hdfs_path_src to hdfs_path_dest
Parameters
----------
overwrite : boolean, default True
Overwrite hdfs_path_dest if it exists.
"""
raise NotImplementedError
def cp(self, hdfs_path_src, hdfs_path_dest):
raise NotImplementedError
def rm(self, path):
"""
Delete a single file
"""
return self.delete(path)
def rmdir(self, path):
"""
Delete a directory and all its contents
"""
self.client.delete(path, recursive=True)
def find_any_file(self, hdfs_dir):
contents = self.ls(hdfs_dir, status=True)
def valid_filename(name):
head, tail = posixpath.split(name)
tail = tail.lower()
return (not tail.endswith('.tmp') and
not tail.endswith('.copying') and
not tail.startswith('_') and
not tail.startswith('.'))
for filename, meta in contents:
if meta['type'].lower() == 'file' and valid_filename(filename):
return filename
raise com.IbisError('No files found in the passed directory')
class WebHDFS(HDFS):
"""
A WebHDFS-based interface to HDFS using the HDFSCli library
"""
def __init__(self, client):
self.client = client
@property
def protocol(self):
return 'webhdfs'
def status(self, path):
"""
Retrieve HDFS metadata for path
"""
return self.client.status(path)
@implements(HDFS.chmod)
def chmod(self, path, permissions):
self.client.set_permissions(path, permissions)
@implements(HDFS.chown)
def chown(self, path, owner=None, group=None):
self.client.set_owner(path, owner, group)
@implements(HDFS.exists)
def exists(self, path):
try:
self.client.status(path)
return True
except Exception:
return False
@implements(HDFS.ls)
def ls(self, hdfs_path, status=False):
contents = self.client.list(hdfs_path)
if not status:
return [path for path, detail in contents]
else:
return contents
@implements(HDFS.mkdir)
def mkdir(self, dir_path, create_parent=False):
# ugh, see #252
# create a temporary file, then delete it
dummy = pjoin(dir_path, util.guid())
self.client.write(dummy, '')
self.client.delete(dummy)
@implements(HDFS.size)
def size(self, hdfs_path):
stat = self.status(hdfs_path)
if stat['type'] == 'FILE':
return stat['length']
elif stat['type'] == 'DIRECTORY':
total = 0
for path in self.ls(hdfs_path):
total += self.size(path)
return total
else:
raise NotImplementedError
@implements(HDFS.mv)
def mv(self, hdfs_path_src, hdfs_path_dest, overwrite=True):
if overwrite and self.exists(hdfs_path_dest):
if self.status(hdfs_path_dest)['type'] == 'FILE':
self.rm(hdfs_path_dest)
return self.client.rename(hdfs_path_src, hdfs_path_dest)
def delete(self, hdfs_path, recursive=False):
"""
"""
return self.client.delete(hdfs_path, recursive=recursive)
@implements(HDFS.head)
def head(self, hdfs_path, nbytes=1024, offset=0):
gen = self.client.read(hdfs_path, offset=offset, length=nbytes)
return ''.join(gen)
@implements(HDFS.put)
def put(self, hdfs_path, resource, overwrite=False, verbose=None,
**kwargs):
verbose = verbose or options.verbose
is_path = isinstance(resource, six.string_types)
if is_path and osp.isdir(resource):
for dirpath, dirnames, filenames in os.walk(resource):
rel_dir = osp.relpath(dirpath, resource)
if rel_dir == '.':
rel_dir = ''
for fpath in filenames:
abs_path = osp.join(dirpath, fpath)
rel_hdfs_path = pjoin(hdfs_path, rel_dir, fpath)
self.put(rel_hdfs_path, abs_path, overwrite=overwrite,
verbose=verbose, **kwargs)
else:
if is_path:
basename = os.path.basename(resource)
if self.exists(hdfs_path):
if self.status(hdfs_path)['type'] == 'DIRECTORY':
hdfs_path = pjoin(hdfs_path, basename)
if verbose:
self.log('Writing local {0} to HDFS {1}'.format(resource,
hdfs_path))
self.client.upload(hdfs_path, resource,
overwrite=overwrite, **kwargs)
else:
if verbose:
self.log('Writing buffer to HDFS {0}'.format(hdfs_path))
resource.seek(0)
self.client.write(hdfs_path, resource, overwrite=overwrite,
**kwargs)
@implements(HDFS.get)
def get(self, hdfs_path, local_path, overwrite=False, verbose=None):
verbose = verbose or options.verbose
hdfs_path = hdfs_path.rstrip(posixpath.sep)
if osp.isdir(local_path) and not overwrite:
dest = osp.join(local_path, posixpath.basename(hdfs_path))
else:
local_dir = osp.dirname(local_path) or '.'
if osp.isdir(local_dir):
dest = local_path
else:
# fail early
raise HDFSError('Parent directory %s does not exist',
local_dir)
# TODO: threadpool
def _get_file(remote, local):
if verbose:
self.log('Writing HDFS {0} to local {1}'.format(remote, local))
self.client.download(remote, local, overwrite=overwrite)
def _scrape_dir(path, dst):
objs = self.client.list(path)
for hpath, detail in objs:
relpath = posixpath.relpath(hpath, hdfs_path)
full_opath = pjoin(dst, relpath)
if detail['type'] == 'FILE':
_get_file(hpath, full_opath)
else:
os.makedirs(full_opath)
_scrape_dir(hpath, dst)
status = self.status(hdfs_path)
if status['type'] == 'FILE':
if not overwrite and osp.exists(local_path):
raise IOError('{0} exists'.format(local_path))
_get_file(hdfs_path, local_path)
else:
# TODO: partitioned files
with temppath() as tpath:
_temp_dir_path = osp.join(tpath, posixpath.basename(hdfs_path))
os.makedirs(_temp_dir_path)
_scrape_dir(hdfs_path, _temp_dir_path)
if verbose:
self.log('Moving {0} to {1}'.format(_temp_dir_path,
local_path))
if overwrite and osp.exists(local_path):
# swap and delete
local_swap_path = util.guid()
shutil.move(local_path, local_swap_path)
try:
shutil.move(_temp_dir_path, local_path)
if verbose:
msg = 'Deleting original {0}'.format(local_path)
self.log(msg)
shutil.rmtree(local_swap_path)
except:
# undo our diddle
shutil.move(local_swap_path, local_path)
else:
shutil.move(_temp_dir_path, local_path)
return dest
|
|
#!/usr/bin/env python
lookup_table = {
'RAD_FACILITY': {
'CITY_NAME':
('The name of the city, town, or village where a facility is '
'located.'),
'CONGRESSIONAL_DIST_NUM':
('The number that represents a Congressional District for a '
'state within the United States.'),
'COUNTRY_NAME':
('The name that represents a primary geopolitical unit of the '
'world. The default value for RADInfo is "United States".'),
'COUNTY_NAME':
('The name of U.S. county or county equivalent, as listed in '
'FIPS Pub 6-4.'),
'EF_PGM_SOURCE':
('The abbreviated name of an information management system '
'contained in Envirofacts that may serve as a source for '
'RADInfo data.'),
'EPA_REGION_CODE':
'The code that represents an EPA Region.',
'FACILITY_REGISTRY_ID':
('The identification number assigned by the EPA Facility '
'Registry System to uniquely identify a facility site.'),
'FED_FACILITY_CODE':
('A code identifying whether or not a site is a Federal '
'(U.S. Government) facility. Valid Values: "D" = Status '
'Undetermined, "Y" = Federal Facility, "N" = Not a Federal '
'Facility.'),
'FRS_UPDATE_DATE':
('The date when RADInfo facility data is updated using the '
'Facility Registry System.'),
'HUC_CODE':
('The hydrologic unit code (HUC) that represents a geographic '
'area representing part or all of a surface-draining basin, a '
'combination of drainage basins, or a distinct hydrologic '
'feature.'),
'LOCATION_ADDRESS':
('The address that describes the physical (geographic) location '
'of the front door or main entrance of a facility site, '
'including urban-style street address or rural address.'),
'POSTAL_CODE':
('The combination of the 5-digit Zone Improvement Plan (ZIP) '
'code and the four-digit extension code (if available) that '
'represents the geographic segment that is a subunit of the '
'ZIP Code, assigned by the U.S. Postal Service to a geographic '
'location to facilitate delivery, or the postal zone specific '
'to the country, other than the U.S., where the mail is '
'delivered.'),
'PRIMARY_NAME':
'The name of a facility site.',
'RAD_CHANGE_DATE':
('The date when RADInfo facility data was altered by a '
'designated RADInfo user.'),
'RAD_SYS_ID':
('The non-intelligent, unique identifier assigned to a RADInfo '
'facility or site.'),
'SOURCE_DATA':
('The initial source of RADInfo information for a facility. '
'Reconciliation with the Facility Registry System may '
'subsequently alter some source data.'),
'STATE_CODE':
('The U.S. Postal Service abbreviation that represents the '
'state of state equivalent for the U.S. and Canada.'),
'STATE_NAME':
('The name of the principal administrative subdivision of the '
'United States.'),
'SUPPLEMENTAL_LOCATION':
('The text that provides additional information about a place, '
'including a building name with its secondary unit and number, '
'an industrial park name, an installation name, or descriptive '
'text where no formal address is available.'),
'TRIBAL_LAND_CODE':
('Code indicating whether or not the facility is located on '
'tribal land. Valid values: "Y" = yes; "N" = no.'),
'TRIBAL_LAND_NAME':
('The name of an American Indian or Alaskan native area where '
'the facility is located, as identified through query '
'mechanisms available to the Envirofacts network.'),
'URL':
('The URL associated with the web which provides risk data '
'about the associated radioisotope.'),
'URL_LINK_DESCRIPTION':
('A web site description identifying the type of information '
'provided at the URL.'),
},
'RAD_FACILITY_TYPE': {
'CIT_REF_CODE':
('The code that represents the environmental regulation with '
'oversight of a facility. For example, the CIT_REF_CODE for a '
'RAD NPL facility would be equal to "40CFR300".'),
'FACILITY_TYPE':
('The type of facility regulated by the governing regulation. '
'Valid Values include: '
'NESHAPS/Underground Uranium Mine, '
'NESHAPS/DOE RAD Facility (Non-Radon), '
'NESHAPS/Other Federal Facility, '
'NESHAPS/Elemental Phosphorus Plant, '
'NESHAPS/DOE Radon, '
'NESHAPS/Phosphogypsum Stack, '
'NESHAPS/Disposal of Uranium Mill Tailings, '
'NESHAPS/Operating Uranium Mill Tailings, '
'WIPP Facility/Repository, '
'WIPP Facility/Generator Site, '
'RAD NPL Facility'),
'SEC_CIT_REF_FLAG':
('Indicates than an additional Citation Reference Code must '
'be used to uniquely identify this this type of facility. For '
'example, the WIPP repository is uniquely identified as being '
'regulated under both 40CFR191 and 40CFR194. This flag is set to '
'"Y" when a facility has this exact combination of '
'CIT_REF_CODE(s) associated with it. In the near term, no other '
'combination of governing regulations will cause this flag to be '
'set.'),
'SUBPART_ID':
('Identification number assigned to the subpart of the '
'environmental regulation with oversight of the facility.'),
},
'RAD_GEO_LOCATION': {
'COORDINATE_DATA_SOURCE_CODE':
('The code that represents the party responsible for providing '
'the latitude and longitude coordinates.'),
'DATA_COLLECTION_DATE':
'The calendar date when data were collected.',
'EF_PGM_SOURCE':
('The abbreviated name of an information management system '
'contained in Envirofacts that may serve as a source for '
'RADInfo data.'),
'GEOMETRIC_TYPE_CODE':
('The code that represents the geometric entity represented by '
'one point or a sequence of latitude and longitude points.'),
'HORIZONTAL_ACCURACY_MEASURE':
('The measure of the accuracy (in meters) of the latitude and '
'longitude coordinates.'),
'HORIZONTAL_COLLECT_METHOD_CODE':
('The code that represents the method used to determine the '
'latitude and longitude coordinates for a point on the earth.'),
'HORIZONTAL_REFER_DATUM_CODE':
('The code that represents the reference datum used in '
'determining latitude and longitude coordinates.'),
'LATITUDE_MEASURE':
('The measure of the angular distance on a meridian north or '
'south of the equator.'),
'LOCATION_COMMENTS_TEXT':
('The text that provides additional information about the '
'geographic coordinates.'),
'LONGITUDE_MEASURE':
('The measure of the angular distance on a meridian east or '
'west of the prime meridian.'),
'RAD_CHANGE_DATE':
('The date when RADInfo facility data was altered by a '
'designated RADInfo user.'),
'RAD_OVERRIDE':
('A flag indicating that the latitude and longitude coordinates '
'for the facility in RADInfo are preferred to the designated '
'best value coordinates in the Envirofacts Locational Reference '
'Tables.'),
'RAD_SYS_ID':
('The non-intelligent, unique identifier assigned to a RADInfo '
'facility or site.'),
'REFERENCE_POINT_CODE':
('The code that represents the place for which geographic '
'coordinates were established.'),
'SOURCE_MAP_SCALE_NUMBER':
('The number that represents the proportional distance on the '
'ground for one unit of measure on the map or photo.'),
'SUB_ID':
'Identification number for the operable unit.',
'SUB_TYPE_CODE':
'The code for an operable unit. View a list of permitted values.',
'VERTICAL_ACCURACY_MEASURE':
('The measure of the accuracy (in meters) of the vertical '
'measure (i.e., the altitude) of a reference point.'),
'VERTICAL_COLLECT_METHOD_CASE':
('The code that represents the method used to collect the '
'vertical measure (i.e., the altitude) of a reference point.'),
'VERTICAL_MEASURE':
('The measure of elevation (i.e., the altitude), in meters, '
'above or below a reference datum.'),
'VERTICAL_REFERENCE_DATA_CODE':
('The code that represents the reference datum used to '
'determine the vertical measure (i.e., the altitude).'),
},
'RAD_REGULATION': {
'CFR_PART':
('The Part (name/title) of the regulation related '
'to the facility.'),
'CRF_SECTION':
('The Section (name/title) of the regulation related to '
'the facility.'),
'CFR_SUBPART':
('Subpart related to the specific part of the CFR '
'(e.g. Subpart D).'),
'CIT_REF_CODE':
('The code that represents the environmental regulation with '
'oversight of a facility. For example, the CIT_REF_CODE for a '
'RAD NPL facility would be equal to "40CFR300".'),
'PART_ID':
('The part number of the specific Code of Federal regulation '
'(e.g. Part 60).'),
'RAD_CHANGE_DATE':
('The date when RADInfo facility data was altered by a '
'designated RADInfo user.'),
'REG_TITLE':
'The title (name) of the regulation related to the facility.',
'SECTION_ID':
('The section number of the specific Code of Federal regulation '
'(e.g. Part 60.489).'),
'STATUTE':
('The name of the Federal statute governing the regulations '
'related to the facility.'),
'STAT_ACRONYM':
('The acronym of the Federal statute governing the regulations '
'related to the facility.'),
'SUBPART_ID':
('Identification number assigned to the subpart of the '
'environmental regulation with oversight of the facility.'),
'TITLE_ID':
('The Code of Federal Regulation number related to the '
'regulation (e.g. 40 CFR).'),
'URL':
('The URL associated with the web which provides risk data '
'about the associated radioisotope.'),
},
'RAD_REGULATORY_PROG': {
'CIT_REF_CODE':
('The code that represents the environmental regulation with '
'oversight of a facility. For example, the CIT_REF_CODE for '
'a RAD NPL facility would be equal to "40CFR300".'),
'ENFORCEMENT_AGENCY':
('The Agency (or Agreement State) responsible for the '
'implementation and enforcement of the radiation standard(s) '
'established. In the case of 40 CFR 190, the NRC or one of the '
'29 Agreement States is identified as the Enforcement Agency '
'under this definition.'),
'OPERATING_ORGANIZATION':
('The facility owner/operator who conducts the daily operations '
'and submits compliance reports to the Enforcement Agency.'),
'OVERSIGHT_AGENCY':
('The Agency responsible for establishing generally applicable '
'radiation standards. In the case of 40 CFR 190, EPA is '
'identified as the Oversight Agency under this definition.'),
'PROG_FAC_STATUS':
('The status of a facility in relation to the program monitoring '
'it. An active facility status means that the facility is '
'currently operational or activities such as remediation are '
'ongoing at the site/facility. An inactive facility status means '
'the facility is no longer operational. A standby status '
'indicates that the facility (i.e., uranium mine) is not '
'currently operating, but has not committed to closing down its '
'operation. An archived facility status means remediation has '
'been completed; such facilities are no longer of regulatory '
'concern and the information associated with them can be placed '
'in an archive database.'),
'PROG_FAC_TYPE':
('Code indicating the type of facility or complex facility that '
'is regulated or monitored by a program. Only those facilities '
'typecast as RAD NESHAPS Facilities and reporting under 40CFR61 '
'are contained in the RAD_NESHAPS_FACILITY table. Similarly, '
'only those facilities typecast as WIPP Facilites and governed '
'under 40CFR194, or the combination of 40CFR191 and 40CFR194, '
'are contained in the RAD_WIPP_FACILITY table, and only those '
'facilities that must comply with 40CFR300 and typecast as RAD '
'NPL F acilities are contained in the RAD_NPL_FACILITY table.'),
'RAD_CHANGE_DATE':
('The date when RADInfo facility data was altered by a '
'designated RADInfo user.'),
'RAD_SYS_ID':
('The non-intelligent, unique identifier assigned to a RADInfo '
'facility or site.'),
'SEC_CIT_REF_FLAG':
('Indicates than an additional Citation Reference Code must '
'be used to uniquely identify this this type of facility. For '
'example, the WIPP repository is uniquely identified as being '
'regulated under both 40CFR191 and 40CFR194. This flag is set to '
'"Y" when a facility has this exact combination of '
'CIT_REF_CODE(s) associated with it. In the near term, no other '
'combination of governing regulations will cause this flag to be '
'set.'),
'SUBPART_ID':
('Identification number assigned to the subpart of the '
'environmental regulation with oversight of the facility.'),
},
}
|
|
# -*- coding: utf-8; fill-column: 78 -*-
import re
#import string
import sys
try:
import threading
except ImportError: # pragma:nocover
import dummy_threading as threading
# derived from ASPN Cookbook (#36302)
class lazy_property(object):
"""An @property that is only calculated once.
The results of the decorated function are stored in the instance
dictionary after the first access. Subsequent accesses are serviced out
of the __dict__ by Python at native attribute access speed.
"""
def __init__(self, deferred):
self._deferred = deferred
def __get__(self, obj, cls):
if obj is None:
return self
value = self._deferred(obj)
setattr(obj, self._deferred.func_name, value)
return value
class assignable_property(object):
"""A @property, computed by default but assignable on a per-instance basis.
Similar to ``property``, except that the attribute may be assigned to and
assignments may be deleted.
May be used as a decorator.
"""
def __init__(self, fget, name=None, doc=None):
self.name = name or fget.__name__
self.fget = fget
self.__doc__ = doc or fget.__doc__
def __get__(self, instance, cls):
if instance is None:
# Class.prop == None
return None
if self.name in instance.__dict__:
return instance.__dict__[self.name]
else:
return self.fget(instance)
def __set__(self, instance, value):
instance.__dict__[self.name] = value
def __delete__(self, instance):
try:
del instance.__dict__[self.name]
except KeyError:
raise AttributeError("%r object has no overriden attribute %r" % (
type(instance).__name__, self.name))
class assignable_class_property(object):
"""A read/write property for access on a class or an instance.
Similar to :class:`assignable_property`, except that access as a class
attribute will also return a computed value.
The decorated function will be passed two arguments: ``instance`` and
``class`` (the same signature as the descriptor __get__ protocol).
Instance will be ``None`` if the attribute access was against the class.
Note that assignments at the class level are not intercepted by this
property. They will replace the property on the class.
May be used as a decorator.
"""
def __init__(self, fget, name=None, doc=None):
self.name = name or fget.__name__
self.fget = fget
self.__doc__ = doc or fget.__doc__
def __get__(self, instance, cls):
if instance is None:
return self.fget(None, cls)
if self.name in instance.__dict__:
return instance.__dict__[self.name]
else:
return self.fget(instance, cls)
def __set__(self, instance, value):
instance.__dict__[self.name] = value
def __delete__(self, instance):
try:
del instance.__dict__[self.name]
except KeyError:
raise AttributeError("%r object has no overridden attribute %r" % (
type(instance).__name__, self.name))
class class_cloner(object):
"""A class-copying ``classmethod``.
Calls the decorated method as a classmethod, passing a copy of the class.
The copy will be a direct subclass of the class the method is invoked on.
The class_cloner is only visible at the class level. Instance access is
proxied to the instance dictionary.
"""
def __init__(self, fn):
self.name = fn.__name__
self.cloner = classmethod(fn)
self.__doc__ = fn.__doc__
def __get__(self, instance, cls):
if instance is not None:
try:
return instance.__dict__[self.name]
except KeyError:
raise AttributeError(self.name)
members = {'__doc__': getattr(cls, '__doc__', '')}
try:
members['__module__'] = \
sys._getframe(1).f_globals['__name__']
except (AttributeError, KeyError, TypeError): # pragma: nocover
members['__module__'] = cls.__module__
clone = type(cls.__name__, (cls,), members)
return self.cloner.__get__(None, clone)
def __set__(self, instance, value):
instance.__dict__[self.name] = value
def __delete__(self, instance):
try:
del instance.__dict__[self.name]
except KeyError:
raise AttributeError("%r object has no attribute %r" % (
type(instance).__name__, self.name))
class as_mapping(object):
"""Provide a mapping view of an instance.
Similar to vars(), but effective on extension types and will invoke
descriptors on access.
"""
__slots__ = 'target',
def __init__(self, target):
self.target = target
def __getitem__(self, item):
try:
if isinstance(item, unicode):
return getattr(self.target, item.encode('ascii'))
return getattr(self.target, item)
except (AttributeError, UnicodeError):
raise KeyError(item)
def __contains__(self, item):
if isinstance(item, unicode):
try:
return hasattr(self.target, item.encode('ascii'))
except UnicodeError:
return False
return hasattr(self.target, item)
def __iter__(self):
return iter(dir(self.target))
class adict(dict):
"""Allow dict keys to be accessed with getattr()."""
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def re_ucompile(pattern, flags=0):
"""Compile a regex with re.UNICODE on by default."""
return re.compile(pattern, flags | re.UNICODE)
#_alphanum = set((string.digits + string.letters).decode('ascii'))
_alphanum = set(('0123456789' + 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ').decode('ascii'))
def re_uescape(pattern):
"""A unicode-friendly version of re.escape."""
mutable = list(pattern)
for idx, char in enumerate(pattern):
if char not in _alphanum:
if char == u"\000":
mutable[idx] = u"\\000"
else:
mutable[idx] = u"\\" + char
return u''.join(mutable)
def luhn10(number):
"""Return True if the number passes the Luhn checksum algorithm."""
sum = 0
while number:
r = number % 100
number //= 100
z = r % 10
r = r // 10 * 2
sum += r // 10 + r % 10 + z
return 0 == sum % 10
def to_pairs(dictlike):
"""Yield (key, value) pairs from any dict-like object.
Implements an optimized version of the dict.update() definition of
"dictlike".
"""
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'keys'):
return ((key, dictlike[key]) for key in dictlike.keys())
elif hasattr(dictlike, '_asdict'): # namedtuple interface
return dictlike._asdict().iteritems()
else:
return ((key, value) for key, value in dictlike)
def keyslice_pairs(pairs, include=None, omit=None, rename=None, key=None):
"""Filter (key, value) pairs by key and return a subset.
:param pairs: an iterable of ``(key, value)`` pairs (2-tuples).
:param include: optional, a sequence of key values. If supplied, only
pairs whose key is a member of this sequence will be returned.
:param omit: optional, a sequence of key values. If supplied, all pairs
will be returned, save those whose key is a member of this sequence.
:param rename: optional, a mapping or sequence of 2-tuples specifying a
key-to-key translation. A pair whose key has been renamed by this
translation will always be emitted, regardless of *include* or *omit*
rules. The mapping will be converted to a dict internally, and keys
must be hashable.
:param key: optional, a function of one argument that is used to extract a
comparison key from the first item of each pair. Similar to the
``key`` parameter to Python's ``sort`` and ``sorted``. Useful for
transforming unicode keys to bytestrings with ```key=str``, adding or
removing prefixes en masse, etc.
:returns: yields ``(key, value)`` pairs.
"""
if include and omit:
raise TypeError('received include and omit, specify only one')
include = set(include) if include else False
omit = set(omit) if omit else False
rename = dict(to_pairs(rename)) if rename else False
keyfunc = key
del key
for key, value in pairs:
if keyfunc:
key = keyfunc(key)
if rename and key in rename:
yield (rename[key], value)
continue
if include:
if key not in include:
continue
elif omit:
if key in omit:
continue
yield key, value
class Maybe(object):
"""A ternary logic value, bitwise-comparable to bools"""
def __and__(self, other):
if other is True or other is self:
return self
elif other is False:
return False
return NotImplemented
__rand__ = __and__
def __or__(self, other):
if other is False or other is self:
return self
elif other is True:
return True
return NotImplemented
__ror__ = __or__
def not_(self, other):
"""Negate a ternary value.
(Python doesn't allow useful overriding of ``not``.)
"""
if other is self:
return other
elif other is True:
return False
elif other is False:
return True
else:
raise TypeError(type(other).__name__)
def truth(self, other):
if other is self:
return True
elif other is True:
return True
elif other is False:
return False
else:
raise TypeError(type(other).__name__)
def __nonzero__(self):
raise NotImplementedError()
def __str__(self):
return 'Maybe'
__repr__ = __str__
Maybe = Maybe()
def named_int_factory(name, value, doc=''):
"""Return a unique integer *value* with a str() and repr() of *name*."""
report_name = lambda self: name
cls = type(name, (int,), dict(
__doc__=doc, __str__=report_name, __repr__=report_name))
return cls(value)
def autodocument_from_superclasses(cls):
"""Fill in missing documentation on overridden methods.
Can be used as a class decorator.
"""
undocumented = []
for name, attribute in cls.__dict__.items():
# is it a method on the class that is locally undocumented?
if hasattr(attribute, '__call__') and not attribute.__doc__:
# don't muck with builtins
if not hasattr(attribute, '__module__'):
continue
# find docs on a superclass
for supercls in cls.__bases__:
try:
superdoc = getattr(supercls, name).__doc__
if superdoc:
setattr(attribute, '__doc__', superdoc)
break
except (AttributeError, TypeError):
pass
return cls
# derived from SQLAlchemy (http://www.sqlalchemy.org/); MIT License
class _symbol(object):
def __init__(self, name):
"""Construct a new named symbol."""
assert isinstance(name, str)
self.__name__ = self.name = name
def __reduce__(self):
return symbol, (self.name,)
def __repr__(self):
return self.name
_symbol.__name__ = 'symbol'
# derived from SQLAlchemy (http://www.sqlalchemy.org/); MIT License
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
foo
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
"""
symbols = {}
_lock = threading.Lock()
def __new__(cls, name):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name)
return sym
finally:
symbol._lock.release()
Unspecified = symbol('Unspecified')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.