code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# coding=utf-8
import pymongo
import re
from math import exp
from datetime import date, datetime
connection = pymongo.MongoClient("")
db = connection.dataservices
ads = db.ads
# 1 - Указан ли адрес
# 2 - Указаны ли контактные данные
# 3 - Наличие фотографий Больше 1, тоже плюс
# 4 - Давность подачи объявления
# 5 - Указана ли цена
# важность критериев предлагаю выбрать интуитивно:
# 1 - 0.1
# 2 - 0.3
# 3 - 0.05 за фотку - максимум 0.1
# 4 - 0.4 1+1/-(1+exp(-0.2(x-30)))
# 5 - 0.1
# address_weight = (lambda isHasAddress: 0.1 if isHasAddress else 0)
contacts_weight = (lambda isHasContacts: 0.3 if isHasContacts else 0)
def photos_weight(count):
if count > 1:
return 0.1
elif count == 1:
return 0.05
else:
return 0
ads_ago_weight = (lambda isHasCost: 0.5 * (1+1/-(1+exp(-0.2*(isHasCost-30)))))
cost_weight = (lambda isHasCost: 0.1 if isHasCost else 0)
try:
query = {}
cursor = ads.find(query)
for ad in cursor:
# images rank
imgcnt = 0
if 'images' in ad:
imgcnt = len(ad['images'])
C1_photos = photos_weight(imgcnt)
# address rank
# addressHas = False
# if 'address' in ad:
# if bool(ad['address'].strip()):
# addressHas = True
# C2_address = address_weight(addressHas)
# contacts rank
contactsHas = False
if 'contacts' in ad:
if bool(ad['contacts'].strip()):
contactsHas = True
C3_contacts = contacts_weight(contactsHas)
# cost rank
prog = re.compile("^[\D]+$")
costHas = False
if 'cost' in ad:
cost = ad['cost']
if prog.match(cost) is None:
costHas = True
C4_cost = cost_weight(costHas)
# date rank
C5_date = 0
if 'date' in ad:
d0 = datetime.today()
d1 = ad['date']
delta = d0 - d1
C5_date = ads_ago_weight(delta.days)
C = C1_photos + C3_contacts + C4_cost + C5_date
print("rank: ",C)
# update costValue
#if 'costValue' not in ad:
try:
ads.update({"_id": ad["_id"]}, {"$set": {"rank": C}})
except Exception as e:
print("No update", type(e), e)
except Exception as e:
print ("Unexpected error:", type(e), e)
|
nesterione/core-of-my-services
|
scripts/run_ads_ranking.py
|
Python
|
apache-2.0
| 2,535
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateTagBinding
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-resourcemanager
# [START cloudresourcemanager_v3_generated_TagBindings_CreateTagBinding_sync]
from google.cloud import resourcemanager_v3
def sample_create_tag_binding():
# Create a client
client = resourcemanager_v3.TagBindingsClient()
# Initialize request argument(s)
request = resourcemanager_v3.CreateTagBindingRequest(
)
# Make the request
operation = client.create_tag_binding(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END cloudresourcemanager_v3_generated_TagBindings_CreateTagBinding_sync]
|
googleapis/python-resource-manager
|
samples/generated_samples/cloudresourcemanager_v3_generated_tag_bindings_create_tag_binding_sync.py
|
Python
|
apache-2.0
| 1,572
|
"""
"""
from .datasets import Dataset;
from.mnist import get_mnist_data;
__all__ = ['get_mnist_data', 'Dataset'];
|
nryant/tensorflow_mnist_examples
|
mnist_lib/datasets/__init__.py
|
Python
|
apache-2.0
| 115
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import numpy as np
import mxnet as mx
import math
import random
import itertools
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.base import py_str, MXNetError
from common import setup_module, with_seed
import unittest
def np_softmax(x, axis=-1):
# fix for old numpy on Travis not supporting keepdims
# x = x - np.max(x, axis=-1, keepdims=True)
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x)
# x /= np.sum(x, axis=-1, keepdims=True)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
out1 = exec1.outputs[0].asnumpy()
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy())
@with_seed(0)
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1.asnumpy(), ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad.asnumpy(), np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i].asnumpy(), gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i].asnumpy(), gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0].asnumpy(), np_out, atol=atol)
assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x.asnumpy(), exec1.outputs[0].asnumpy())
exec1.backward(dy)
assert_almost_equal(dy.asnumpy(), dx.asnumpy())
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0].asnumpy()
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
y = mx.sym.pow(2, exp)
x = np.ones(shape)*3
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
shape = (3, 4)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1:
grad_gam = np.sum(copy_x, axis=0)
return (grad_x, grad_gam)
shape = (3,4)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0].asnumpy()
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0].asnumpy()
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0].asnumpy()
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0].asnumpy(), data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data')
data2 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2);
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
assert_almost_equal(arr_grad2.asnumpy(), npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0].asnumpy(), rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-4, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy(), deconv_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='bilinear', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/8044")
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_batchnorm_training(stype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
num_group = 2
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-4)
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(mx.cpu(), x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
x = baseline(d[0], d[1])
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
print('found precision problem')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
print('a: {}'.format(d[0][idx]))
print('b: {}'.format(d[1][idx]))
import struct
print('a hex: {}'.format(struct.pack('d', d[0][idx]).encode('hex')))
print('b hex: {}'.format(struct.pack('d', np.broadcast_to(d[1], x.shape)[idx]).encode('hex')))
print('in baseline(a, b): {}'.format(x[idx]))
print('in symbol(a, b): {}'.format(y[idx]))
print('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
c = a % b
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Issue exposed with seed 1768433044
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a, b):
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out * (a // b)), gen_broadcast_data, atol=1)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (np.equal(data, outdata.reshape(keepdim_shape)).astype(np.float)),
mx.symbol.max, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (np.equal(data, outdata.reshape(keepdim_shape)).astype(np.float)),
mx.symbol.min, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0].asnumpy(), groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd.asnumpy(), grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
np.set_printoptions(threshold=np.nan)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad.asnumpy(), grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_dot():
ctx=default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
# Test normal dot.
for data_type in dtypes:
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
dtypes = ['float32', 'float64']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'].asnumpy(),
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'].asnumpy(),
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0].asnumpy(), forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'].asnumpy(), grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'].asnumpy(), grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32', 'float64']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad.asnumpy())
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad.asnumpy())
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode):
# bind with label
X = mx.symbol.Variable('X')
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu()).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
check_pad_with_shape(shape1, default_context(), pad1, 'constant')
check_pad_with_shape(shape1, default_context(), pad1, 'edge')
check_pad_with_shape(shape2, default_context(), pad2, 'constant')
check_pad_with_shape(shape2, default_context(), pad2, 'edge')
check_pad_with_shape(shape1, default_context(), pad1, 'reflect')
check_pad_with_shape(shape2, default_context(), pad2, 'reflect')
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=1e-3)
# TODO(szha): Seeding this masks failures. We need to do a deep dive for failures without this seed.
@with_seed(1234)
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
for nbatch in [1, 4]:
for nchannel in [3, 5]:
for height in [4, 6]:
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
for width in [5, 7]:
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32, forward_check_eps=1E-3):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd.asnumpy(), forward_check_eps, forward_check_eps)
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
def test_layer_norm():
for dtype, forward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4]):
for in_shape in [(10, 6, 5), (10, 10)]:
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu()).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out])
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data')
data2 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
arr_grad1 = arr_grad1.asnumpy()
arr_grad2 = arr_grad2.asnumpy()
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
arr_grad = arr_grad.asnumpy()
# print(name)
# print(arr_grad)
# print(npout_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp < 0.6, [1], [0]) * np.where(data_tmp > -0.6, [1], [0])])
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out.asnumpy())
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0].asnumpy(), a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def check_output_n_grad(data_shape, idx_shape):
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape)
data_real = np.random.normal(size=data_shape).astype('float32')
idx_real = np.random.randint(low=0, high=data_shape[0], size=idx_shape)
grad_out = np.ones(idx_shape + data_shape[1:], dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), data_real[idx_real])
for i in np.nditer(idx_real):
grad_in[i] += 1.0
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), grad_in)
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx)
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
check_output_n_grad(data_shape, idx_shape)
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'].asnumpy(), grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_bilinear_sampler():
from math import floor
def between(x, lowerbound, upperbound):
return x>=lowerbound and x<=upperbound
def bilinear_forward_numpy(data, grid):
batchsize = data.shape[0]
input_height = data.shape[2]
input_width = data.shape[3]
num_channel = data.shape[1]
output_height = grid.shape[2]
output_width = grid.shape[3]
out = np.zeros(data.shape[:2] + grid.shape[2:], dtype=np.float32)
for i in range(batchsize):
for yout in range(output_height):
for xout in range(output_width):
xcoord = np.float32((grid[i, 0, yout, xout] + 1) * (input_width-1) / 2.0)
ycoord = np.float32((grid[i, 1, yout, xout] + 1) * (input_height-1) / 2.0)
xInTopLeft = int(floor(xcoord))
xWeightTopLeft = np.float32(1-(xcoord - xInTopLeft))
yInTopLeft = int(floor(ycoord))
yWeightTopLeft = np.float32(1-(ycoord - yInTopLeft))
# interpolation
for channel in range(num_channel):
inTopLeft = data[i,channel,yInTopLeft, xInTopLeft] \
if between(xInTopLeft,0,input_width-1) and between(yInTopLeft,0,input_height-1) else 0.0
inTopRight = data[i,channel,yInTopLeft, xInTopLeft+1] \
if between(xInTopLeft+1,0,input_width-1) and between(yInTopLeft,0,input_height-1) else 0.0
inBottomLeft = data[i,channel,yInTopLeft+1, xInTopLeft] \
if between(xInTopLeft,0,input_width-1) and between(yInTopLeft+1,0,input_height-1) else 0.0
inBottomRight = data[i,channel,yInTopLeft+1, xInTopLeft+1] \
if between(xInTopLeft+1,0,input_width-1) and between(yInTopLeft+1,0,input_height-1) else 0.0
out[i,channel,yout,xout] = xWeightTopLeft * yWeightTopLeft * inTopLeft\
+ (1-xWeightTopLeft)*yWeightTopLeft * inTopRight\
+ xWeightTopLeft * (1-yWeightTopLeft) * inBottomLeft\
+(1-xWeightTopLeft) * (1-yWeightTopLeft) * inBottomRight
return out
def bilinear_backward_numpy(out_grad, data, grid):
data_grad = np.zeros(data.shape, dtype=np.float32)
grid_grad = np.zeros(grid.shape, dtype=np.float32)
batchsize = data.shape[0]
input_height = data.shape[2]
input_width = data.shape[3]
num_channel = data.shape[1]
output_height = grid.shape[2]
output_width = grid.shape[3]
for i in range(batchsize):
for yout in range(output_height):
for xout in range(output_width):
top_left_y_gw = np.float32(0.0);
top_left_x_gw = np.float32(0.0);
xcoord = np.float32((grid[i, 0, yout, xout] + 1) * (input_width-1) / 2.0)
ycoord = np.float32((grid[i, 1, yout, xout] + 1) * (input_height-1) / 2.0)
xInTopLeft = int(floor(xcoord))
xWeightTopLeft = np.float32(1-(xcoord - xInTopLeft))
yInTopLeft = int(floor(ycoord))
yWeightTopLeft = np.float32(1-(ycoord - yInTopLeft))
topLeftDotProduct = np.float32(0)
topRightDotProduct = np.float32(0)
bottomLeftDotProduct = np.float32(0)
bottomRightDotProduct = np.float32(0)
for channel in range(num_channel):
# left top
if between(xInTopLeft,0,input_width-1) and between(yInTopLeft,0,input_height-1):
topLeftDotProduct += data[i,channel,yInTopLeft, xInTopLeft] * \
out_grad[i,channel,yout,xout]
data_grad[i, channel, yInTopLeft, xInTopLeft] += xWeightTopLeft * \
yWeightTopLeft * out_grad[i,channel,yout,xout]
# right top
if between(xInTopLeft+1,0,input_width-1) and between(yInTopLeft,0,input_height-1):
topRightDotProduct += data[i, channel, yInTopLeft,xInTopLeft+1] * \
out_grad[i, channel, yout,xout]
data_grad[i, channel,yInTopLeft, xInTopLeft+1] += (1-xWeightTopLeft) * \
yWeightTopLeft * out_grad[i,channel,yout,xout]
# left bottom
if between(xInTopLeft,0,input_width-1) and between(yInTopLeft+1,0,input_height-1):
bottomLeftDotProduct += data[i, channel,yInTopLeft+1, xInTopLeft] * \
out_grad[i,channel,yout,xout]
data_grad[i,channel,yInTopLeft+1,xInTopLeft]+=xWeightTopLeft * \
(1-yWeightTopLeft)* out_grad[i,channel,yout,xout]
# right bottom
if between(xInTopLeft+1,0,input_width-1) and between(yInTopLeft+1,0,input_height-1):
bottomRightDotProduct += data[i,channel,yInTopLeft+1, xInTopLeft+1] * \
out_grad[i,channel,yout,xout]
data_grad[i,channel,yInTopLeft+1,xInTopLeft+1]+= (1-xWeightTopLeft) * \
(1-yWeightTopLeft)*out_grad[i,channel,yout,xout]
yf = np.float32(-xWeightTopLeft * topLeftDotProduct + xWeightTopLeft*bottomLeftDotProduct - \
(1-xWeightTopLeft)* topRightDotProduct + (1-xWeightTopLeft)*bottomRightDotProduct)
xf = np.float32(-yWeightTopLeft * topLeftDotProduct + yWeightTopLeft*topRightDotProduct - \
(1-yWeightTopLeft)*bottomLeftDotProduct + (1-yWeightTopLeft)*bottomRightDotProduct)
grid_grad[i,0,yout,xout] = xf * (input_width-1) / 2.0
grid_grad[i,1,yout,xout] = yf * (input_height-1) / 2.0
return data_grad, grid_grad
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
net = mx.sym.BilinearSampler(data=data,grid=grid)
test_case = [[(1,3,15,16),(1,2,10,10)],
[(1,6,7,16),(1,2,10,4)],
[(1,7,3,16),(1,2,8,11)],
[(1,9,50,50),(1,2,50,50)]]
for ctx in [default_context()]:
for item in test_case:
data_shape, grid_shape = item
exe = net.simple_bind(data=data_shape,grid=grid_shape,ctx=ctx,grad_req='write')
# check forward
exe.arg_dict['data'][:] = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)
exe.arg_dict['grid'][:] = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)
exe.forward(is_train=True)
out = bilinear_forward_numpy(exe.arg_dict['data'].asnumpy(), exe.arg_dict['grid'].asnumpy())
assert_almost_equal(exe.outputs[0].asnumpy(), out, rtol=1e-3,atol=1e-5)
# check backward
out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)
exe.backward(mx.nd.array(out_grad))
data_grad, grid_grad = bilinear_backward_numpy(out_grad,exe.arg_dict['data'].asnumpy(),
exe.arg_dict['grid'].asnumpy())
assert_almost_equal(exe.grad_dict['data'].asnumpy(), data_grad, rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'].asnumpy(), grid_grad, rtol=1e-3, atol=1e-5)
# check kAddTo
exe_addto = net.simple_bind(data=data_shape, grid=grid_shape, ctx=ctx, grad_req='add')
data_initial_grid = np.random.normal(size=exe_addto.grad_dict['data'].shape).astype(np.float32)
grid_initial_grid = np.random.normal(size=exe_addto.grad_dict['grid'].shape).astype(np.float32)
exe_addto.arg_dict['data'][:] = exe.arg_dict['data'][:]
exe_addto.arg_dict['grid'][:] = exe.arg_dict['grid'][:]
exe_addto.grad_dict['data'][:] = data_initial_grid
exe_addto.grad_dict['grid'][:] = grid_initial_grid
exe_addto.forward(is_train=True)
exe_addto.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_addto.grad_dict['data'].asnumpy(), data_grad + data_initial_grid, rtol=1e-3,atol=1e-5)
assert_almost_equal(exe_addto.grad_dict['grid'].asnumpy(), grid_grad + grid_initial_grid, rtol=1e-3,atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r.asnumpy(), data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0].asnumpy(), X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0].asnumpy(), X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats).asnumpy()
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis).asnumpy()
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=mx.context.Context.default_ctx, args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
check_symbolic_forward(sym, [data], [np_softmax(data, axis=axis)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for _ in range(100):
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward without grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
check_ctc_loss(acts, labels, true_loss)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels2, true_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
check_ctc_loss_grad('first')
check_ctc_loss_grad('last')
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='uint8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[35, 151], [154, 219]])
a_real = mx.nd.array([[0.13725491, 0.59215689], [0.60392159, 0.8588236]])
assert same(qa.asnumpy(), qa_real.asnumpy())
assert same(a_.asnumpy(), a_real.asnumpy())
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output.asnumpy(), expected_output.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(x2.grad.asnumpy(), expected_grad.asnumpy(), rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x.asnumpy(), np.ones(shape=(10, 10), dtype=np.float32))
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if mx.Context.default_ctx.device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if mx.Context.default_ctx.device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
# Seed set because the test is not robust enough to operate on random data. Repro issue with:
# MXNET_TEST_SEED=1234 nosetests --verbose tests/python/gpu/test_operator_gpu.py:test_deformable_psroipooling
@with_seed(0)
def test_deformable_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group) * 0.1
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if mx.Context.default_ctx.device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_lower_triangle_symm(a, ndims, m, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# Seed set because the test is not robust enough to operate on random data
@with_seed(42)
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Test gemm separately from other la-operators.
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1)
data_in2 = np.random.uniform(1, 10, shape2)
data_in3 = np.random.uniform(1, 10, shape3)
data_in4 = np.random.uniform(1, 10, shape4)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
# Now test all the other operators.
# Tests with trivial 1x1 matrices.
shape = (4, 4, 1, 1)
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1)
check_fw(test_potrf, [data_in], [res_potrf])
if grad_check == 1:
check_grad(test_potrf, [data_in])
# test potri
ones = mx.nd.ones(shape).asnumpy()
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1)
check_fw(test_potri, [data_in], [res_potri])
if grad_check == 1:
check_grad(test_potri, [data_in])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7.)
check_fw(test_trsm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trsm, [trian_in,data_in])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True)
check_fw(test_trmm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trmm, [trian_in, data_in])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw(test_sumlogdiag, [data_in], [res_sumlogdiag])
if grad_check == 1:
check_grad(test_sumlogdiag, [data_in])
# more elaborate example of Cholesky factorization
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4))
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw(test_potrf, [a], [r])
if grad_check == 1:
check_grad(test_potrf, [a])
#test potri
data1_ltri = _make_lower_triangle_symm(
data1, ndims=4, m=4, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw(test_potri, [a], [r])
if grad_check == 1:
check_grad(test_potri, [a])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7.)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(trian), 4, 4)
check_fw(test_trsm, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm, [a, b])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=True)
r = rep_3x(-2. * trian, 4, 4)
check_fw(test_trsm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm2, [a, b])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=True)
b = rep_3x(np.transpose(trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw(test_trsm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm3, [a, b])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True)
b = rep_3x(trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw(test_trsm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm4, [a, b])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw(test_trmm, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm, [a, b])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2.)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw(test_trmm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm2, [a, b])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw(test_trmm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm3, [a, b])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw(test_trmm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm4, [a, b])
# test sumlogdiag
a = rep_3x(pow, 4, 4)
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw(test_sumlogdiag, [a], [r])
if grad_check == 1:
check_grad(test_sumlogdiag, [a])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
# test fails with seed 990952066: 0 output seen with dropout ratio=0. See issue #9816
@with_seed(1234)
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always')
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputHeight - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = 14
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_score = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_score = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
# check forward
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data = rand_ndarray(shape=shape, stype='default')
data_np = data.asnumpy()
expected = f(data_np, a, b, c)
output = mx.nd.contrib.quadratic(data, a=a, b=b, c=c)
assert_almost_equal(output.asnumpy(), expected, rtol=0.001, atol=0.0001)
# check backward using finite difference
data = mx.sym.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback)
op_exe.forward()
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
if __name__ == '__main__':
import nose
nose.runmodule()
|
luoyetx/mxnet
|
tests/python/unittest/test_operator.py
|
Python
|
apache-2.0
| 247,358
|
import json
import os
import re
import unittest2 as unittest
from stacktester import openstack
class ImagesTest(unittest.TestCase):
def setUp(self):
self.os = openstack.Manager()
host = self.os.config.nova.host
port = self.os.config.nova.port
def tearDown(self):
pass
def _assert_image_links(self, image):
image_id = str(image['id'])
mgmt_url = self.os.nova.management_url
bmk_url = re.sub(r'v1.1\/', r'', mgmt_url)
self_link = os.path.join(mgmt_url, 'images', image_id)
bookmark_link = os.path.join(bmk_url, 'images', image_id)
expected_links = [
{
'rel': 'self',
'href': self_link,
},
{
'rel': 'bookmark',
'href': bookmark_link,
},
]
self.assertEqual(image['links'], expected_links)
def _assert_image_entity_basic(self, image):
actual_keys = set(image.keys())
expected_keys = set((
'id',
'name',
'links',
))
self.assertEqual(actual_keys, expected_keys)
self._assert_image_links(image)
def _assert_image_entity_detailed(self, image):
keys = image.keys()
if 'server' in keys:
keys.remove('server')
actual_keys = set(keys)
expected_keys = set((
'id',
'name',
'progress',
'created',
'updated',
'status',
'metadata',
'links',
))
self.assertEqual(actual_keys, expected_keys)
self._assert_image_links(image)
def test_index(self):
"""List all images"""
response, body = self.os.nova.request('GET', '/images')
self.assertEqual(response['status'], '200')
resp_body = json.loads(body)
self.assertEqual(resp_body.keys(), ['images'])
for image in resp_body['images']:
self._assert_image_entity_basic(image)
def test_detail(self):
"""List all images in detail"""
response, body = self.os.nova.request('GET', '/images/detail')
self.assertEqual(response['status'], '200')
resp_body = json.loads(body)
self.assertEqual(resp_body.keys(), ['images'])
for image in resp_body['images']:
self._assert_image_entity_detailed(image)
|
rackspace-titan/stacktester
|
stacktester/tests/test_images.py
|
Python
|
apache-2.0
| 2,430
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""A library of contruction functions for tensorflow computation structures."""
import functools
import types
from typing import Any, Callable, Optional
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.impl.compiler import local_computation_factory_base
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.types import type_transformations
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
# TODO(b/181028772): Move this and similar code to `backends/tensorflow`.
# TODO(b/181131807): Remove independent invocations of the helper methods, and
# replace them with calls to the factory, then inline the bodies of the methods
# within the factory.
ComputationProtoAndType = local_computation_factory_base.ComputationProtoAndType
class TensorFlowComputationFactory(
local_computation_factory_base.LocalComputationFactory):
"""An implementation of local computation factory for TF computations."""
def __init__(self):
pass
def create_constant_from_scalar(
self, value,
type_spec: computation_types.Type) -> ComputationProtoAndType:
return create_constant(value, type_spec)
def create_plus_operator(
self, type_spec: computation_types.Type) -> ComputationProtoAndType:
def plus(a, b):
return structure.map_structure(tf.add, a, b)
return create_binary_operator(plus, type_spec)
def create_multiply_operator(
self, type_spec: computation_types.Type) -> ComputationProtoAndType:
def multiply(a, b):
return structure.map_structure(tf.multiply, a, b)
return create_binary_operator(multiply, type_spec)
def create_scalar_multiply_operator(
self, operand_type: computation_types.Type,
scalar_type: computation_types.TensorType) -> ComputationProtoAndType:
return create_binary_operator_with_upcast(
computation_types.StructType([(None, operand_type),
(None, scalar_type)]), tf.multiply)
def create_indexing_operator(
self,
operand_type: computation_types.TensorType,
index_type: computation_types.TensorType,
) -> ComputationProtoAndType:
return create_indexing_operator(operand_type, index_type)
def _tensorflow_comp(
tensorflow_proto: pb.TensorFlow,
type_signature: computation_types.Type,
) -> ComputationProtoAndType:
serialized_type = type_serialization.serialize_type(type_signature)
comp = pb.Computation(type=serialized_type, tensorflow=tensorflow_proto)
return (comp, type_signature)
def create_constant(
value, type_spec: computation_types.Type) -> ComputationProtoAndType:
"""Returns a tensorflow computation returning a constant `value`.
The returned computation has the type signature `( -> T)`, where `T` is
`type_spec`.
`value` must be a value convertible to a tensor or a structure of values, such
that the dtype and shapes match `type_spec`. `type_spec` must contain only
named tuples and tensor types, but these can be arbitrarily nested.
Args:
value: A value to embed as a constant in the tensorflow graph.
type_spec: A `computation_types.Type` to use as the argument to the
constructed binary operator; must contain only named tuples and tensor
types.
Raises:
TypeError: If the constraints of `type_spec` are violated.
"""
if not type_analysis.is_generic_op_compatible_type(type_spec):
raise TypeError(
'Type spec {} cannot be constructed as a TensorFlow constant in TFF; '
' only nested tuples and tensors are permitted.'.format(type_spec))
inferred_value_type = type_conversions.infer_type(value)
if (inferred_value_type.is_struct() and
not type_spec.is_assignable_from(inferred_value_type)):
raise TypeError(
'Must pass a only tensor or structure of tensor values to '
'`create_tensorflow_constant`; encountered a value {v} with inferred '
'type {t!r}, but needed {s!r}'.format(
v=value, t=inferred_value_type, s=type_spec))
if inferred_value_type.is_struct():
value = structure.from_container(value, recursive=True)
tensor_dtypes_in_type_spec = []
def _pack_dtypes(type_signature):
"""Appends dtype of `type_signature` to nonlocal variable."""
if type_signature.is_tensor():
tensor_dtypes_in_type_spec.append(type_signature.dtype)
return type_signature, False
type_transformations.transform_type_postorder(type_spec, _pack_dtypes)
if (any(x.is_integer for x in tensor_dtypes_in_type_spec) and
(inferred_value_type.is_tensor() and
not inferred_value_type.dtype.is_integer)):
raise TypeError(
'Only integers can be used as scalar values if our desired constant '
'type spec contains any integer tensors; passed scalar {} of dtype {} '
'for type spec {}.'.format(value, inferred_value_type.dtype, type_spec))
result_type = type_spec
def _create_result_tensor(type_spec, value):
"""Packs `value` into `type_spec` recursively."""
if type_spec.is_tensor():
type_spec.shape.assert_is_fully_defined()
result = tf.constant(value, dtype=type_spec.dtype, shape=type_spec.shape)
else:
elements = []
if inferred_value_type.is_struct():
# Copy the leaf values according to the type_spec structure.
for (name, elem_type), value in zip(
structure.iter_elements(type_spec), value):
elements.append((name, _create_result_tensor(elem_type, value)))
else:
# "Broadcast" the value to each level of the type_spec structure.
for _, elem_type in structure.iter_elements(type_spec):
elements.append((None, _create_result_tensor(elem_type, value)))
result = structure.Struct(elements)
return result
with tf.Graph().as_default() as graph:
result = _create_result_tensor(result_type, value)
_, result_binding = tensorflow_utils.capture_result_from_graph(
result, graph)
type_signature = computation_types.FunctionType(None, result_type)
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=None,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
def create_unary_operator(
operator, operand_type: computation_types.Type) -> ComputationProtoAndType:
"""Returns a tensorflow computation computing a unary operation.
The returned computation has the type signature `(T -> U)`, where `T` is
`operand_type` and `U` is the result of applying the `operator` to a value of
type `T`
Args:
operator: A callable taking one argument representing the operation to
encode For example: `tf.math.abs`.
operand_type: A `computation_types.Type` to use as the argument to the
constructed unary operator; must contain only named tuples and tensor
types.
Raises:
TypeError: If the constraints of `operand_type` are violated or `operator`
is not callable.
"""
if (operand_type is None or
not type_analysis.is_generic_op_compatible_type(operand_type)):
raise TypeError(
'`operand_type` contains a type other than '
'`computation_types.TensorType` and `computation_types.StructType`; '
f'this is disallowed in the generic operators. Got: {operand_type} ')
py_typecheck.check_callable(operator)
with tf.Graph().as_default() as graph:
operand_value, operand_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', operand_type, graph)
result_value = operator(operand_value)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result_value, graph)
type_signature = computation_types.FunctionType(operand_type, result_type)
parameter_binding = operand_binding
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
def create_binary_operator(
operator,
operand_type: computation_types.Type,
second_operand_type: Optional[computation_types.Type] = None
) -> ComputationProtoAndType:
"""Returns a tensorflow computation computing a binary operation.
The returned computation has the type signature `(<T,T> -> U)`, where `T` is
`operand_type` and `U` is the result of applying the `operator` to a tuple of
type `<T,T>`
Note: If `operand_type` is a `computation_types.StructType`, then
`operator` will be applied pointwise. This places the burden on callers of
this function to construct the correct values to pass into the returned
function. For example, to divide `[2, 2]` by `2`, first `2` must be packed
into the data structure `[x, x]`, before the division operator of the
appropriate type is called.
Args:
operator: A callable taking two arguments representing the operation to
encode For example: `tf.math.add`, `tf.math.multiply`, and
`tf.math.divide`.
operand_type: A `computation_types.Type` to use as the argument to the
constructed binary operator; must contain only named tuples and tensor
types.
second_operand_type: An optional `computation_types.Type` to use as the
seocnd argument to the constructed binary operator. If `None`, operator
uses `operand_type` for both arguments. Must contain only named tuples and
tensor types.
Raises:
TypeError: If the constraints of `operand_type` are violated or `operator`
is not callable.
"""
if not type_analysis.is_generic_op_compatible_type(operand_type):
raise TypeError(
'`operand_type` contains a type other than '
'`computation_types.TensorType` and `computation_types.StructType`; '
f'this is disallowed in the generic operators. Got: {operand_type} ')
if second_operand_type is not None:
if not type_analysis.is_generic_op_compatible_type(second_operand_type):
raise TypeError(
'`second_operand_type` contains a type other than '
'`computation_types.TensorType` and `computation_types.StructType`; '
'this is disallowed in the generic operators. '
f'Got: {second_operand_type} ')
elif second_operand_type is None:
second_operand_type = operand_type
py_typecheck.check_callable(operator)
with tf.Graph().as_default() as graph:
operand_1_value, operand_1_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', operand_type, graph)
operand_2_value, operand_2_binding = tensorflow_utils.stamp_parameter_in_graph(
'y', second_operand_type, graph)
result_value = operator(operand_1_value, operand_2_value)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result_value, graph)
type_signature = computation_types.FunctionType(
computation_types.StructType((operand_type, second_operand_type)),
result_type)
parameter_binding = pb.TensorFlow.Binding(
struct=pb.TensorFlow.StructBinding(
element=[operand_1_binding, operand_2_binding]))
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
def create_binary_operator_with_upcast(
type_signature: computation_types.StructType,
operator: Callable[[Any, Any], Any]) -> ComputationProtoAndType:
"""Creates TF computation upcasting its argument and applying `operator`.
Args:
type_signature: A `computation_types.StructType` with two elements, both
only containing structs or tensors in their type tree. The first and
second element must match in structure, or the second element may be a
single tensor type that is broadcasted (upcast) to the leaves of the
structure of the first type. This single tensor may be assignable to the
tensor types at the leaves, or in the case that the leaves have fully
defined shapes, this tensor may be `tf.broadcast`-ed to each of those
shapes. In the case of non-assignability and non-fully defined shapes
at the leaves of the structure, this function will raise.
operator: Callable defining the operator.
Returns:
Same as `create_binary_operator()`.
"""
py_typecheck.check_type(type_signature, computation_types.StructType)
py_typecheck.check_callable(operator)
type_analysis.check_tensorflow_compatible_type(type_signature)
if not type_signature.is_struct() or len(type_signature) != 2:
raise TypeError('To apply a binary operator, we must by definition have an '
'argument which is a `StructType` with 2 elements; '
'asked to create a binary operator for type: {t}'.format(
t=type_signature))
if type_analysis.contains(type_signature, lambda t: t.is_sequence()):
raise TypeError(
'Applying binary operators in TensorFlow is only '
'supported on Tensors and StructTypes; you '
'passed {t} which contains a SequenceType.'.format(t=type_signature))
def _pack_into_type(to_pack: tf.Tensor, type_spec: computation_types.Type):
"""Pack Tensor value `to_pack` into the nested structure `type_spec`."""
if type_spec.is_struct():
elem_iter = structure.iter_elements(type_spec)
return structure.Struct([(elem_name, _pack_into_type(to_pack, elem_type))
for elem_name, elem_type in elem_iter])
elif type_spec.is_tensor():
value_tensor_type = type_conversions.type_from_tensors(to_pack)
if type_spec.is_assignable_from(value_tensor_type):
return to_pack
elif not type_spec.shape.is_fully_defined():
raise TypeError('Cannot generate TensorFlow creating binary operator '
'with first type not assignable from second, and '
'first type without fully defined shapes. First '
f'type contains an element of type: {type_spec}.\n'
f'Packing value {to_pack} into this type is '
'undefined.')
return tf.cast(tf.broadcast_to(to_pack, type_spec.shape), type_spec.dtype)
with tf.Graph().as_default() as graph:
first_arg, operand_1_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', type_signature[0], graph)
operand_2_value, operand_2_binding = tensorflow_utils.stamp_parameter_in_graph(
'y', type_signature[1], graph)
if type_signature[0].is_struct() and type_signature[1].is_struct():
# If both the first and second arguments are structs with the same
# structure, simply re-use operand_2_value as. `tf.nest.map_structure`
# below will map the binary operator pointwise to the leaves of the
# structure.
if structure.is_same_structure(type_signature[0], type_signature[1]):
second_arg = operand_2_value
else:
raise TypeError('Cannot upcast one structure to a different structure. '
'{x} -> {y}'.format(
x=type_signature[1], y=type_signature[0]))
elif type_signature[0].is_assignable_from(type_signature[1]):
second_arg = operand_2_value
else:
second_arg = _pack_into_type(operand_2_value, type_signature[0])
if type_signature[0].is_tensor():
result_value = operator(first_arg, second_arg)
elif type_signature[0].is_struct():
result_value = structure.map_structure(operator, first_arg, second_arg)
else:
raise TypeError('Encountered unexpected type {t}; can only handle Tensor '
'and StructTypes.'.format(t=type_signature[0]))
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result_value, graph)
type_signature = computation_types.FunctionType(type_signature, result_type)
parameter_binding = pb.TensorFlow.Binding(
struct=pb.TensorFlow.StructBinding(
element=[operand_1_binding, operand_2_binding]))
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
def create_indexing_operator(
operand_type: computation_types.TensorType,
index_type: computation_types.TensorType,
) -> ComputationProtoAndType:
"""Returns a tensorflow computation computing an indexing operation."""
operand_type.check_tensor()
index_type.check_tensor()
if index_type.shape.rank != 0:
raise TypeError(f'Expected index type to be a scalar, found {index_type}.')
with tf.Graph().as_default() as graph:
operand_value, operand_binding = tensorflow_utils.stamp_parameter_in_graph(
'indexing_operand', operand_type, graph)
index_value, index_binding = tensorflow_utils.stamp_parameter_in_graph(
'index', index_type, graph)
result_value = tf.gather(operand_value, index_value)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result_value, graph)
type_signature = computation_types.FunctionType(
computation_types.StructType((operand_type, index_type)), result_type)
parameter_binding = pb.TensorFlow.Binding(
struct=pb.TensorFlow.StructBinding(
element=[operand_binding, index_binding]))
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
def create_empty_tuple() -> ComputationProtoAndType:
"""Returns a tensorflow computation returning an empty tuple.
The returned computation has the type signature `( -> <>)`.
"""
return create_computation_for_py_fn(lambda: structure.Struct([]), None)
def create_identity(
type_signature: computation_types.Type) -> ComputationProtoAndType:
"""Returns a tensorflow computation representing an identity function.
The returned computation has the type signature `(T -> T)`, where `T` is
`type_signature`. NOTE: if `T` contains `computation_types.StructType`s
without an associated container type, they will be given the container type
`tuple` by this function.
Args:
type_signature: A `computation_types.Type` to use as the parameter type and
result type of the identity function.
Raises:
TypeError: If `type_signature` contains any types which cannot appear in
TensorFlow bindings.
"""
type_analysis.check_tensorflow_compatible_type(type_signature)
parameter_type = type_signature
if parameter_type is None:
raise TypeError('TensorFlow identity cannot be created for NoneType.')
# TF relies on feeds not-identical to fetches in certain circumstances.
if type_signature.is_tensor() or type_signature.is_sequence():
identity_fn = tf.identity
elif type_signature.is_struct():
identity_fn = functools.partial(structure.map_structure, tf.identity)
else:
raise NotImplementedError(
f'TensorFlow identity cannot be created for type {type_signature}')
return create_computation_for_py_fn(identity_fn, parameter_type)
def create_replicate_input(type_signature: computation_types.Type,
count: int) -> ComputationProtoAndType:
"""Returns a tensorflow computation returning `count` copies of its argument.
The returned computation has the type signature `(T -> <T, T, T, ...>)`, where
`T` is `type_signature` and the length of the result is `count`.
Args:
type_signature: A `computation_types.Type` to replicate.
count: An integer, the number of times the input is replicated.
Raises:
TypeError: If `type_signature` contains any types which cannot appear in
TensorFlow bindings or if `which` is not an integer.
"""
type_analysis.check_tensorflow_compatible_type(type_signature)
py_typecheck.check_type(count, int)
parameter_type = type_signature
identity_comp, _ = create_identity(parameter_type)
# This manual proto manipulation is significantly faster than using TFF's
# GraphDef serialization for large `count` arguments.
tensorflow_comp = identity_comp.tensorflow
single_result_binding = tensorflow_comp.result
if tensorflow_comp.parameter:
new_tf_pb = pb.TensorFlow(
graph_def=tensorflow_comp.graph_def,
parameter=tensorflow_comp.parameter,
result=pb.TensorFlow.Binding(
struct=pb.TensorFlow.StructBinding(
element=(single_result_binding for _ in range(count)))))
else:
new_tf_pb = pb.TensorFlow(
graph_def=tensorflow_comp.graph_def,
result=pb.TensorFlow.Binding(
struct=pb.TensorFlow.StructBinding(
element=(single_result_binding for _ in range(count)))))
fn_type = computation_types.FunctionType(
parameter_type,
computation_types.StructType([(None, parameter_type) for _ in range(count)
]))
return _tensorflow_comp(new_tf_pb, fn_type)
def create_computation_for_py_fn(
fn: types.FunctionType, parameter_type: Optional[computation_types.Type]
) -> ComputationProtoAndType:
"""Returns a tensorflow computation returning the result of `fn`.
The returned computation has the type signature `(T -> U)`, where `T` is
`parameter_type` and `U` is the type returned by `fn`.
Args:
fn: A Python function.
parameter_type: A `computation_types.Type` or `None`.
"""
if parameter_type is not None:
py_typecheck.check_type(parameter_type, computation_types.Type)
with tf.Graph().as_default() as graph:
if parameter_type is not None:
parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', parameter_type, graph)
result = fn(parameter_value)
else:
parameter_binding = None
result = fn()
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result, graph)
type_signature = computation_types.FunctionType(parameter_type, result_type)
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
|
tensorflow/federated
|
tensorflow_federated/python/core/impl/compiler/tensorflow_computation_factory.py
|
Python
|
apache-2.0
| 23,589
|
MAX_DVSWITCH_LENGTH = 60
QS_NAME_PREFIX = 'QS'
VLAN = 'VLAN'
NAME_FORMAT = '{0}_{1}_{2}_{3}_{4}'
class DvPortGroupNameGenerator(object):
@staticmethod
def generate_port_group_name(dv_switch_name, vlan_id, vlan_type):
dv_switch_name = dv_switch_name[:MAX_DVSWITCH_LENGTH]
return NAME_FORMAT.format(QS_NAME_PREFIX, dv_switch_name, VLAN, str(vlan_id), str(vlan_type))
@staticmethod
def is_generated_name(name):
return str(name).startswith(QS_NAME_PREFIX)
|
QualiSystems/vCenterShell
|
package/cloudshell/cp/vcenter/network/dvswitch/name_generator.py
|
Python
|
apache-2.0
| 495
|
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from xgboost import XGBRegressor
from keras.wrappers.scikit_learn import KerasRegressor
from ionyx import Experiment
from ionyx.contrib.keras_builder import KerasBuilder
from ionyx.datasets import DataSetLoader
print('Beginning experiment test...')
data, _, _ = DataSetLoader.load_forest_cover()
X_cols = data.columns[1:].tolist()
y_col = data.columns[0]
logistic = LogisticRegression()
cv = KFold()
experiment = Experiment(package='sklearn', model=logistic, scoring_metric='accuracy',
verbose=True, data=data, X_columns=X_cols, y_column=y_col, cv=cv)
experiment.train_model()
experiment.cross_validate()
experiment.learning_curve()
param_grid = [
{
'alpha': [0.01, 0.1, 1.0]
}
]
experiment.param_search(param_grid, save_results_path='/home/john/temp/search.csv')
print(experiment.best_model_)
experiment.save_model('/home/john/temp/model.pkl')
experiment.load_model('/home/john/temp/model.pkl')
print(experiment.model)
_, X, y = DataSetLoader.load_property_inspection()
xgb = XGBRegressor()
cv = KFold()
experiment = Experiment(package='xgboost', model=xgb, scoring_metric='neg_mean_squared_error',
eval_metric='rmse', verbose=True)
experiment.train_model(X, y, validate=True, early_stopping=True, early_stopping_rounds=5,
plot_eval_history=True)
experiment.cross_validate(X, y, cv)
experiment.save_model('/home/john/temp/model.pkl')
experiment.load_model('/home/john/temp/model.pkl')
print(experiment.model)
_, X, y = DataSetLoader.load_property_inspection()
nn = KerasRegressor(build_fn=KerasBuilder.build_dense_model, input_size=X.shape[1], output_size=1,
loss='mean_squared_error', metrics=['mse'], batch_size=32, epochs=5)
cv = KFold()
experiment = Experiment(package='keras', model=nn, scoring_metric='neg_mean_squared_error',
verbose=True)
experiment.train_model(X, y, validate=True, early_stopping=True, early_stopping_rounds=2,
plot_eval_history=True)
experiment.cross_validate(X, y, cv)
experiment.save_model('/home/john/temp/model.h5')
experiment.load_model('/home/john/temp/model.h5')
print(experiment.model)
print('Done.')
|
jdwittenauer/ionyx
|
tests/experiment_test.py
|
Python
|
apache-2.0
| 2,290
|
# Copyright (c) 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import mock
from oslo.utils import units
from nova.compute import arch
from nova.tests.unit.virt.xenapi import stubs
from nova.virt import driver
from nova.virt import fake
from nova.virt import xenapi
from nova.virt.xenapi import driver as xenapi_driver
class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Driver operations."""
def _get_driver(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.flags(connection_url='test_url',
connection_password='test_pass', group='xenserver')
return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
def host_stats(self, refresh=True):
return {'host_memory_total': 3 * units.Mi,
'host_memory_free_computed': 2 * units.Mi,
'disk_total': 5 * units.Gi,
'disk_used': 2 * units.Gi,
'disk_allocated': 4 * units.Gi,
'host_hostname': 'somename',
'supported_instances': arch.X86_64,
'host_cpu_info': {'cpu_count': 50},
'vcpus_used': 10,
'pci_passthrough_devices': ''}
def test_available_resource(self):
driver = self._get_driver()
driver._session.product_version = (6, 8, 2)
self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
resources = driver.get_available_resource(None)
self.assertEqual(6008002, resources['hypervisor_version'])
self.assertEqual(50, resources['vcpus'])
self.assertEqual(3, resources['memory_mb'])
self.assertEqual(5, resources['local_gb'])
self.assertEqual(10, resources['vcpus_used'])
self.assertEqual(3 - 2, resources['memory_mb_used'])
self.assertEqual(2, resources['local_gb_used'])
self.assertEqual('xen', resources['hypervisor_type'])
self.assertEqual('somename', resources['hypervisor_hostname'])
self.assertEqual(1, resources['disk_available_least'])
def test_overhead(self):
driver = self._get_driver()
instance = {'memory_mb': 30720, 'vcpus': 4}
# expected memory overhead per:
# https://wiki.openstack.org/wiki/XenServer/Overhead
expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
(instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
xenapi_driver.OVERHEAD_BASE)
expected = math.ceil(expected)
overhead = driver.estimate_instance_overhead(instance)
self.assertEqual(expected, overhead['memory_mb'])
def test_set_bootable(self):
driver = self._get_driver()
self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
driver._vmops.set_bootable('inst', True)
self.mox.ReplayAll()
driver.set_bootable('inst', True)
def test_post_interrupted_snapshot_cleanup(self):
driver = self._get_driver()
fake_vmops_cleanup = mock.Mock()
driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
driver.post_interrupted_snapshot_cleanup("context", "instance")
fake_vmops_cleanup.assert_called_once_with("context", "instance")
def test_public_api_signatures(self):
inst = self._get_driver()
self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
|
luzheqi1987/nova-annotation
|
nova/tests/unit/virt/xenapi/test_driver.py
|
Python
|
apache-2.0
| 3,973
|
from __future__ import print_function
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import time
def rom_from_vhdl(data):
# extract binary data from a bunch of VHDL lines
bytes = ['\xff' for _ in range(16384)]
for line in data.split("\n"):
m = re.search('when x"(....)" => Di <= x"(..)"', line)
if not m: continue
addr, value = m.groups()
bytes[int(addr, 16)] = chr(int(value, 16))
return ''.join(bytes)
if __name__ == '__main__':
infile, outfile = sys.argv[1:]
print("extracting bytes from %s and saving to %s" % (infile, outfile))
bytes = rom_from_vhdl(open(infile).read())
print("writing %d bytes" % len(bytes))
open(outfile, 'w').write(bytes)
|
google/myelin-acorn-electron-hardware
|
cpu_socket_minispartan_daughterboard/fpga-without-cpu/rom_from_vhdl.py
|
Python
|
apache-2.0
| 1,278
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module looks after initialising the appengine api stubs."""
import logging
import os
from appengine_django import appid
from appengine_django import have_appserver
from appengine_django.db.creation import DatabaseCreation
from django.db.backends import BaseDatabaseWrapper
from django.db.backends import BaseDatabaseFeatures
from django.db.backends import BaseDatabaseOperations
def get_datastore_paths():
"""Returns a tuple with the path to the datastore and history file.
The datastore is stored in the same location as dev_appserver uses by
default, but the name is altered to be unique to this project so multiple
Django projects can be developed on the same machine in parallel.
Returns:
(datastore_path, history_path)
"""
from google.appengine.tools import dev_appserver_main
datastore_path = dev_appserver_main.DEFAULT_ARGS['datastore_path']
history_path = dev_appserver_main.DEFAULT_ARGS['history_path']
datastore_path = datastore_path.replace("dev_appserver", "django_%s" % appid)
history_path = history_path.replace("dev_appserver", "django_%s" % appid)
return datastore_path, history_path
def get_test_datastore_paths(inmemory=True):
"""Returns a tuple with the path to the test datastore and history file.
If inmemory is true, (None, None) is returned to request an in-memory
datastore. If inmemory is false the path returned will be similar to the path
returned by get_datastore_paths but with a different name.
Returns:
(datastore_path, history_path)
"""
if inmemory:
return None, None
datastore_path, history_path = get_datastore_paths()
datastore_path = datastore_path.replace("datastore", "testdatastore")
history_path = history_path.replace("datastore", "testdatastore")
return datastore_path, history_path
def destroy_datastore(datastore_path, history_path):
"""Destroys the appengine datastore at the specified paths."""
for path in [datastore_path, history_path]:
if not path: continue
try:
os.remove(path)
except OSError, e:
if e.errno != 2:
logging.error("Failed to clear datastore: %s" % e)
class DatabaseError(Exception):
"""Stub class for database errors. Required by Django"""
pass
class IntegrityError(Exception):
"""Stub class for database integrity errors. Required by Django"""
pass
class DatabaseFeatures(BaseDatabaseFeatures):
"""Stub class to provide the feaures member expected by Django"""
pass
class DatabaseOperations(BaseDatabaseOperations):
"""Stub class to provide the options member expected by Django"""
pass
class DatabaseWrapper(BaseDatabaseWrapper):
"""App Engine database definition for Django.
This "database" backend does not support any of the standard backend
operations. The only task that it performs is to setup the api stubs required
by the appengine libraries if they have not already been initialised by an
appserver.
"""
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.creation = DatabaseCreation(self)
self.use_test_datastore = kwargs.get("use_test_datastore", False)
self.test_datastore_inmemory = kwargs.get("test_datastore_inmemory", True)
if have_appserver:
return
self._setup_stubs()
def _get_paths(self):
if self.use_test_datastore:
return get_test_datastore_paths(self.test_datastore_inmemory)
else:
return get_datastore_paths()
def _setup_stubs(self):
# If this code is being run without an appserver (eg. via a django
# commandline flag) then setup a default stub environment.
from google.appengine.tools import dev_appserver_main
args = dev_appserver_main.DEFAULT_ARGS.copy()
args['datastore_path'], args['history_path'] = self._get_paths()
from google.appengine.tools import dev_appserver
dev_appserver.SetupStubs(appid, **args)
if self.use_test_datastore:
logging.debug("Configured API stubs for the test datastore")
else:
logging.debug("Configured API stubs for the development datastore")
def flush(self):
"""Helper function to remove the current datastore and re-open the stubs"""
destroy_datastore(*self._get_paths())
self._setup_stubs()
def close(self):
pass
def _commit(self):
pass
def cursor(self, *args):
pass
|
ratio/google-app-engine-django
|
appengine_django/db/base.py
|
Python
|
apache-2.0
| 5,014
|
# Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .defaults import _C as cfg
|
mlperf/training_results_v0.5
|
v0.5.0/nvidia/submission/code/object_detection/pytorch/maskrcnn_benchmark/config/__init__.py
|
Python
|
apache-2.0
| 650
|
#!/usr/bin/env python3
#####
#
# Title: ST.py
# Author: Dan Clegg
# Copyright: 2016, Dan Clegg
# LICENSE: Apache 2.0
#
#####
import requests
import string
import urllib3.contrib.pyopenssl # Necessary to get around Python 3 ssl errors when calling an https endpoint
from parse import *
from lxml import etree
def POST(url,body):
response = requests.post('%s' % url,data=body)
data = response.text
return data
class ST:
service = None
tgt = None
value = None
casServiceUrl=None
def __init__(self,service,tgt,casServiceUrl):
self.casServiceUrl = casServiceUrl
self.service = service
self.tgt = tgt
self.value = POST('%s/tickets/%s' % (self.casServiceUrl,self.tgt),'service=%s' % self.service)
|
danclegg/python-CAS-RESTful-client
|
ST.py
|
Python
|
apache-2.0
| 750
|
# Copyright 2016-2020 The Matrix.org Foundation C.I.C.
# Copyright 2020 Sorunome
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
import random
from http import HTTPStatus
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
from synapse import types
from synapse.api.constants import (
AccountDataTypes,
EventContentFields,
EventTypes,
GuestAccess,
Membership,
)
from synapse.api.errors import (
AuthError,
Codes,
LimitExceededError,
ShadowBanError,
SynapseError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.event_auth import get_named_level, get_power_level_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.types import (
JsonDict,
Requester,
RoomAlias,
RoomID,
StateMap,
UserID,
create_requester,
get_domain_from_id,
)
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_left_room
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class RoomMemberHandler(metaclass=abc.ABCMeta):
# TODO(paul): This handler currently contains a messy conflation of
# low-level API that works on UserID objects and so on, and REST-level
# API that takes ID strings and returns pagination chunks. These concerns
# ought to be separated out a lot better.
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastores().main
self.auth = hs.get_auth()
self.state_handler = hs.get_state_handler()
self.config = hs.config
self._server_name = hs.hostname
self.federation_handler = hs.get_federation_handler()
self.directory_handler = hs.get_directory_handler()
self.identity_handler = hs.get_identity_handler()
self.registration_handler = hs.get_registration_handler()
self.profile_handler = hs.get_profile_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.account_data_handler = hs.get_account_data_handler()
self.event_auth_handler = hs.get_event_auth_handler()
self.member_linearizer: Linearizer = Linearizer(name="member")
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
self.clock = hs.get_clock()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._server_notices_mxid = self.config.servernotices.server_notices_mxid
self._enable_lookup = hs.config.registration.enable_3pid_lookup
self.allow_per_room_profiles = self.config.server.allow_per_room_profiles
self._join_rate_limiter_local = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_joins_local.per_second,
burst_count=hs.config.ratelimiting.rc_joins_local.burst_count,
)
self._join_rate_limiter_remote = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second,
burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count,
)
self._invites_per_room_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_room.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_room.burst_count,
)
self._invites_per_user_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_user.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
)
self._third_party_invite_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_third_party_invite.per_second,
burst_count=hs.config.ratelimiting.rc_third_party_invite.burst_count,
)
self.request_ratelimiter = hs.get_request_ratelimiter()
@abc.abstractmethod
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Try and join a room that this server is not in
Args:
requester
remote_room_hosts: List of servers that can be used to join via.
room_id: Room that we are trying to join
user: User who is trying to join
content: A dict that should be used as the content of the join event.
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Try and knock on a room that this server is not in
Args:
remote_room_hosts: List of servers that can be used to knock via.
room_id: Room that we are trying to knock on.
user: User who is trying to knock.
content: A dict that should be used as the content of the knock event.
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite we have received from a remote server
Args:
invite_event_id: ID of the invite to be rejected
txn_id: optional transaction ID supplied by the client
requester: user making the rejection request, according to the access token
content: additional content to include in the rejection event.
Normally an empty dict.
Returns:
event id, stream_id of the leave event
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""Rescind a local knock made on a remote room.
Args:
knock_event_id: The ID of the knock event to rescind.
txn_id: An optional transaction ID supplied by the client.
requester: The user making the request, according to the access token.
content: The content of the generated leave event.
Returns:
A tuple containing (event_id, stream_id of the leave event).
"""
raise NotImplementedError()
@abc.abstractmethod
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Notifies distributor on master process that the user has left the
room.
Args:
target
room_id
"""
raise NotImplementedError()
@abc.abstractmethod
async def forget(self, user: UserID, room_id: str) -> None:
raise NotImplementedError()
async def ratelimit_multiple_invites(
self,
requester: Optional[Requester],
room_id: Optional[str],
n_invites: int,
update: bool = True,
) -> None:
"""Ratelimit more than one invite sent by the given requester in the given room.
Args:
requester: The requester sending the invites.
room_id: The room the invites are being sent in.
n_invites: The amount of invites to ratelimit for.
update: Whether to update the ratelimiter's cache.
Raises:
LimitExceededError: The requester can't send that many invites in the room.
"""
await self._invites_per_room_limiter.ratelimit(
requester,
room_id,
update=update,
n_actions=n_invites,
)
async def ratelimit_invite(
self,
requester: Optional[Requester],
room_id: Optional[str],
invitee_user_id: str,
) -> None:
"""Ratelimit invites by room and by target user.
If room ID is missing then we just rate limit by target user.
"""
if room_id:
await self._invites_per_room_limiter.ratelimit(requester, room_id)
await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id)
async def _local_membership_update(
self,
requester: Requester,
target: UserID,
room_id: str,
membership: str,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
txn_id: Optional[str] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
) -> Tuple[str, int]:
"""
Internal membership update function to get an existing event or create
and persist a new event for the new membership change.
Args:
requester:
target:
room_id:
membership:
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases like MSC2716.
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
txn_id:
ratelimit:
content:
require_consent:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
Returns:
Tuple of event ID and stream ordering position
"""
user_id = target.to_string()
if content is None:
content = {}
content["membership"] = membership
if requester.is_guest:
content["kind"] = "guest"
# Check if we already have an event with a matching transaction ID. (We
# do this check just before we persist an event as well, but may as well
# do it up front for efficiency.)
if txn_id and requester.access_token_id:
existing_event_id = await self.store.get_event_id_from_transaction_id(
room_id,
requester.user.to_string(),
requester.access_token_id,
txn_id,
)
if existing_event_id:
event_pos = await self.store.get_position_for_event(existing_event_id)
return existing_event_id, event_pos.stream
event, context = await self.event_creation_handler.create_event(
requester,
{
"type": EventTypes.Member,
"content": content,
"room_id": room_id,
"sender": requester.user.to_string(),
"state_key": user_id,
# For backwards compatibility:
"membership": membership,
},
txn_id=txn_id,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
require_consent=require_consent,
outlier=outlier,
historical=historical,
)
prev_state_ids = await context.get_prev_state_ids()
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
if event.membership == Membership.JOIN:
newly_joined = True
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
newly_joined = prev_member_event.membership != Membership.JOIN
# Only rate-limit if the user actually joined the room, otherwise we'll end
# up blocking profile updates.
if newly_joined and ratelimit:
time_now_s = self.clock.time()
(
allowed,
time_allowed,
) = await self._join_rate_limiter_local.can_do_action(requester)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now_s))
)
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
extra_users=[target],
ratelimit=ratelimit,
)
if event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership == Membership.JOIN:
await self._user_left_room(target, room_id)
# we know it was persisted, so should have a stream ordering
assert result_event.internal_metadata.stream_ordering
return result_event.event_id, result_event.internal_metadata.stream_ordering
async def copy_room_tags_and_direct_to_room(
self, old_room_id: str, new_room_id: str, user_id: str
) -> None:
"""Copies the tags and direct room state from one room to another.
Args:
old_room_id: The room ID of the old room.
new_room_id: The room ID of the new room.
user_id: The user's ID.
"""
# Retrieve user account data for predecessor room
user_account_data, _ = await self.store.get_account_data_for_user(user_id)
# Copy direct message state if applicable
direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {})
# Check which key this room is under
if isinstance(direct_rooms, dict):
for key, room_id_list in direct_rooms.items():
if old_room_id in room_id_list and new_room_id not in room_id_list:
# Add new room_id to this key
direct_rooms[key].append(new_room_id)
# Save back to user's m.direct account data
await self.account_data_handler.add_account_data_for_user(
user_id, AccountDataTypes.DIRECT, direct_rooms
)
break
# Copy room tags if applicable
room_tags = await self.store.get_tags_for_room(user_id, old_room_id)
# Copy each room tag to the new room
for tag, tag_content in room_tags.items():
await self.account_data_handler.add_tag_to_room(
user_id, new_room_id, tag, tag_content
)
async def update_membership(
self,
requester: Requester,
target: UserID,
room_id: str,
action: str,
txn_id: Optional[str] = None,
remote_room_hosts: Optional[List[str]] = None,
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
) -> Tuple[str, int]:
"""Update a user's membership in a room.
Params:
requester: The user who is performing the update.
target: The user whose membership is being updated.
room_id: The room ID whose membership is being updated.
action: The membership change, see synapse.api.constants.Membership.
txn_id: The transaction ID, if given.
remote_room_hosts: Remote servers to send the update to.
third_party_signed: Information from a 3PID invite.
ratelimit: Whether to rate limit the request.
content: The content of the created event.
new_room: Whether the membership update is happening in the context of a room
creation.
require_consent: Whether consent is required.
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases like MSC2716.
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
Returns:
A tuple of the new event ID and stream ID.
Raises:
ShadowBanError if a shadow-banned requester attempts to send an invite.
"""
if action == Membership.INVITE and requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
key = (room_id,)
as_id = object()
if requester.app_service:
as_id = requester.app_service.id
# We first linearise by the application service (to try to limit concurrent joins
# by application services), and then by room ID.
with (await self.member_as_limiter.queue(as_id)):
with (await self.member_linearizer.queue(key)):
result = await self.update_membership_locked(
requester,
target,
room_id,
action,
txn_id=txn_id,
remote_room_hosts=remote_room_hosts,
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
new_room=new_room,
require_consent=require_consent,
outlier=outlier,
historical=historical,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
)
return result
async def update_membership_locked(
self,
requester: Requester,
target: UserID,
room_id: str,
action: str,
txn_id: Optional[str] = None,
remote_room_hosts: Optional[List[str]] = None,
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
) -> Tuple[str, int]:
"""Helper for update_membership.
Assumes that the membership linearizer is already held for the room.
Args:
requester:
target:
room_id:
action:
txn_id:
remote_room_hosts:
third_party_signed:
ratelimit:
content:
new_room: Whether the membership update is happening in the context of a room
creation.
require_consent:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases like MSC2716.
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
Returns:
A tuple of the new event ID and stream ID.
"""
content_specified = bool(content)
if content is None:
content = {}
else:
# We do a copy here as we potentially change some keys
# later on.
content = dict(content)
# allow the server notices mxid to set room-level profile
is_requester_server_notices_user = (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
)
if (
not self.allow_per_room_profiles and not is_requester_server_notices_user
) or requester.shadow_banned:
# Strip profile data, knowing that new profile data will be added to the
# event's content in event_creation_handler.create_event() using the target's
# global profile.
content.pop("displayname", None)
content.pop("avatar_url", None)
if len(content.get("displayname") or "") > MAX_DISPLAYNAME_LEN:
raise SynapseError(
400,
f"Displayname is too long (max {MAX_DISPLAYNAME_LEN})",
errcode=Codes.BAD_JSON,
)
if len(content.get("avatar_url") or "") > MAX_AVATAR_URL_LEN:
raise SynapseError(
400,
f"Avatar URL is too long (max {MAX_AVATAR_URL_LEN})",
errcode=Codes.BAD_JSON,
)
if "avatar_url" in content:
if not await self.profile_handler.check_avatar_size_and_mime_type(
content["avatar_url"],
):
raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN)
# The event content should *not* include the authorising user as
# it won't be properly signed. Strip it out since it might come
# back from a client updating a display name / avatar.
#
# This only applies to restricted rooms, but there should be no reason
# for a client to include it. Unconditionally remove it.
content.pop(EventContentFields.AUTHORISING_USER, None)
effective_membership_state = action
if action in ["kick", "unban"]:
effective_membership_state = "leave"
# if this is a join with a 3pid signature, we may need to turn a 3pid
# invite into a normal invite before we can handle the join.
if third_party_signed is not None:
await self.federation_handler.exchange_third_party_invite(
third_party_signed["sender"],
target.to_string(),
room_id,
third_party_signed,
)
if not remote_room_hosts:
remote_room_hosts = []
if effective_membership_state not in ("leave", "ban"):
is_blocked = await self.store.is_room_blocked(room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
if effective_membership_state == Membership.INVITE:
target_id = target.to_string()
if ratelimit:
await self.ratelimit_invite(requester, room_id, target_id)
# block any attempts to invite the server notices mxid
if target_id == self._server_notices_mxid:
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
block_invite = False
if (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
):
# allow the server notices mxid to send invites
is_requester_admin = True
else:
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
if self.config.server.block_non_admin_invites:
logger.info(
"Blocking invite: user is not admin and non-admin "
"invites disabled"
)
block_invite = True
if not await self.spam_checker.user_may_invite(
requester.user.to_string(), target_id, room_id
):
logger.info("Blocking invite due to spam checker")
block_invite = True
if block_invite:
raise SynapseError(403, "Invites have been disabled on this server")
# An empty prev_events list is allowed as long as the auth_event_ids are present
if prev_event_ids is not None:
return await self._local_membership_update(
requester=requester,
target=target,
room_id=room_id,
membership=effective_membership_state,
txn_id=txn_id,
ratelimit=ratelimit,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
content=content,
require_consent=require_consent,
outlier=outlier,
historical=historical,
)
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
current_state_ids = await self.state_handler.get_current_state_ids(
room_id, latest_event_ids=latest_event_ids
)
# TODO: Refactor into dictionary of explicitly allowed transitions
# between old and new state, with specific error messages for some
# transitions and generic otherwise
old_state_id = current_state_ids.get((EventTypes.Member, target.to_string()))
if old_state_id:
old_state = await self.store.get_event(old_state_id, allow_none=True)
old_membership = old_state.content.get("membership") if old_state else None
if action == "unban" and old_membership != "ban":
raise SynapseError(
403,
"Cannot unban user who was not banned"
" (membership=%s)" % old_membership,
errcode=Codes.BAD_STATE,
)
if old_membership == "ban" and action not in ["ban", "unban", "leave"]:
raise SynapseError(
403,
"Cannot %s user who was banned" % (action,),
errcode=Codes.BAD_STATE,
)
if old_state:
same_content = content == old_state.content
same_membership = old_membership == effective_membership_state
same_sender = requester.user.to_string() == old_state.sender
if same_sender and same_membership and same_content:
# duplicate event.
# we know it was persisted, so must have a stream ordering.
assert old_state.internal_metadata.stream_ordering
return (
old_state.event_id,
old_state.internal_metadata.stream_ordering,
)
if old_membership in ["ban", "leave"] and action == "kick":
raise AuthError(403, "The target user is not in the room")
# we don't allow people to reject invites to the server notice
# room, but they can leave it once they are joined.
if (
old_membership == Membership.INVITE
and effective_membership_state == Membership.LEAVE
):
is_blocked = await self._is_server_notice_room(room_id)
if is_blocked:
raise SynapseError(
HTTPStatus.FORBIDDEN,
"You cannot reject this invite",
errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
)
else:
if action == "kick":
raise AuthError(403, "The target user is not in the room")
is_host_in_room = await self._is_host_in_room(current_state_ids)
if effective_membership_state == Membership.JOIN:
if requester.is_guest:
guest_can_join = await self._can_guest_join(current_state_ids)
if not guest_can_join:
# This should be an auth check, but guests are a local concept,
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
# Figure out whether the user is a server admin to determine whether they
# should be able to bypass the spam checker.
if (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
):
# allow the server notices mxid to join rooms
bypass_spam_checker = True
else:
bypass_spam_checker = await self.auth.is_server_admin(requester.user)
inviter = await self._get_inviter(target.to_string(), room_id)
if (
not bypass_spam_checker
# We assume that if the spam checker allowed the user to create
# a room then they're allowed to join it.
and not new_room
and not await self.spam_checker.user_may_join_room(
target.to_string(), room_id, is_invited=inviter is not None
)
):
raise SynapseError(403, "Not allowed to join this room")
# Check if a remote join should be performed.
remote_join, remote_room_hosts = await self._should_perform_remote_join(
target.to_string(), room_id, remote_room_hosts, content, is_host_in_room
)
if remote_join:
if ratelimit:
time_now_s = self.clock.time()
(
allowed,
time_allowed,
) = await self._join_rate_limiter_remote.can_do_action(
requester,
)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now_s))
)
inviter = await self._get_inviter(target.to_string(), room_id)
if inviter and not self.hs.is_mine(inviter):
remote_room_hosts.append(inviter.domain)
content["membership"] = Membership.JOIN
profile = self.profile_handler
if not content_specified:
content["displayname"] = await profile.get_displayname(target)
content["avatar_url"] = await profile.get_avatar_url(target)
if requester.is_guest:
content["kind"] = "guest"
remote_join_response = await self._remote_join(
requester, remote_room_hosts, room_id, target, content
)
return remote_join_response
elif effective_membership_state == Membership.LEAVE:
if not is_host_in_room:
# Figure out the user's current membership state for the room
(
current_membership_type,
current_membership_event_id,
) = await self.store.get_local_current_membership_for_user_in_room(
target.to_string(), room_id
)
if not current_membership_type or not current_membership_event_id:
logger.info(
"%s sent a leave request to %s, but that is not an active room "
"on this server, or there is no pending invite or knock",
target,
room_id,
)
raise SynapseError(404, "Not a known room")
# perhaps we've been invited
if current_membership_type == Membership.INVITE:
invite = await self.store.get_event(current_membership_event_id)
logger.info(
"%s rejects invite to %s from %s",
target,
room_id,
invite.sender,
)
if not self.hs.is_mine_id(invite.sender):
# send the rejection to the inviter's HS (with fallback to
# local event)
return await self.remote_reject_invite(
invite.event_id,
txn_id,
requester,
content,
)
# the inviter was on our server, but has now left. Carry on
# with the normal rejection codepath, which will also send the
# rejection out to any other servers we believe are still in the room.
# thanks to overzealous cleaning up of event_forward_extremities in
# `delete_old_current_state_events`, it's possible to end up with no
# forward extremities here. If that happens, let's just hang the
# rejection off the invite event.
#
# see: https://github.com/matrix-org/synapse/issues/7139
if len(latest_event_ids) == 0:
latest_event_ids = [invite.event_id]
# or perhaps this is a remote room that a local user has knocked on
elif current_membership_type == Membership.KNOCK:
knock = await self.store.get_event(current_membership_event_id)
return await self.remote_rescind_knock(
knock.event_id, txn_id, requester, content
)
elif effective_membership_state == Membership.KNOCK:
if not is_host_in_room:
# The knock needs to be sent over federation instead
remote_room_hosts.append(get_domain_from_id(room_id))
content["membership"] = Membership.KNOCK
profile = self.profile_handler
if "displayname" not in content:
content["displayname"] = await profile.get_displayname(target)
if "avatar_url" not in content:
content["avatar_url"] = await profile.get_avatar_url(target)
return await self.remote_knock(
remote_room_hosts, room_id, target, content
)
return await self._local_membership_update(
requester=requester,
target=target,
room_id=room_id,
membership=effective_membership_state,
txn_id=txn_id,
ratelimit=ratelimit,
prev_event_ids=latest_event_ids,
auth_event_ids=auth_event_ids,
content=content,
require_consent=require_consent,
outlier=outlier,
)
async def _should_perform_remote_join(
self,
user_id: str,
room_id: str,
remote_room_hosts: List[str],
content: JsonDict,
is_host_in_room: bool,
) -> Tuple[bool, List[str]]:
"""
Check whether the server should do a remote join (as opposed to a local
join) for a user.
Generally a remote join is used if:
* The server is not yet in the room.
* The server is in the room, the room has restricted join rules, the user
is not joined or invited to the room, and the server does not have
another user who is capable of issuing invites.
Args:
user_id: The user joining the room.
room_id: The room being joined.
remote_room_hosts: A list of remote room hosts.
content: The content to use as the event body of the join. This may
be modified.
is_host_in_room: True if the host is in the room.
Returns:
A tuple of:
True if a remote join should be performed. False if the join can be
done locally.
A list of remote room hosts to use. This is an empty list if a
local join is to be done.
"""
# If the host isn't in the room, pass through the prospective hosts.
if not is_host_in_room:
return True, remote_room_hosts
# If the host is in the room, but not one of the authorised hosts
# for restricted join rules, a remote join must be used.
room_version = await self.store.get_room_version(room_id)
current_state_ids = await self.store.get_current_state_ids(room_id)
# If restricted join rules are not being used, a local join can always
# be used.
if not await self.event_auth_handler.has_restricted_join_rules(
current_state_ids, room_version
):
return False, []
# If the user is invited to the room or already joined, the join
# event can always be issued locally.
prev_member_event_id = current_state_ids.get((EventTypes.Member, user_id), None)
prev_member_event = None
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership in (
Membership.JOIN,
Membership.INVITE,
):
return False, []
# If the local host has a user who can issue invites, then a local
# join can be done.
#
# If not, generate a new list of remote hosts based on which
# can issue invites.
event_map = await self.store.get_events(current_state_ids.values())
current_state = {
state_key: event_map[event_id]
for state_key, event_id in current_state_ids.items()
}
allowed_servers = get_servers_from_users(
get_users_which_can_issue_invite(current_state)
)
# If the local server is not one of allowed servers, then a remote
# join must be done. Return the list of prospective servers based on
# which can issue invites.
if self.hs.hostname not in allowed_servers:
return True, list(allowed_servers)
# Ensure the member should be allowed access via membership in a room.
await self.event_auth_handler.check_restricted_join_rules(
current_state_ids, room_version, user_id, prev_member_event
)
# If this is going to be a local join, additional information must
# be included in the event content in order to efficiently validate
# the event.
content[
EventContentFields.AUTHORISING_USER
] = await self.event_auth_handler.get_user_which_could_invite(
room_id,
current_state_ids,
)
return False, []
async def transfer_room_state_on_room_upgrade(
self, old_room_id: str, room_id: str
) -> None:
"""Upon our server becoming aware of an upgraded room, either by upgrading a room
ourselves or joining one, we can transfer over information from the previous room.
Copies user state (tags/push rules) for every local user that was in the old room, as
well as migrating the room directory state.
Args:
old_room_id: The ID of the old room
room_id: The ID of the new room
"""
logger.info("Transferring room state from %s to %s", old_room_id, room_id)
# Find all local users that were in the old room and copy over each user's state
users = await self.store.get_users_in_room(old_room_id)
await self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
# Add new room to the room directory if the old room was there
# Remove old room from the room directory
old_room = await self.store.get_room(old_room_id)
if old_room is not None and old_room["is_public"]:
await self.store.set_room_is_public(old_room_id, False)
await self.store.set_room_is_public(room_id, True)
# Transfer alias mappings in the room directory
await self.store.update_aliases_for_room(old_room_id, room_id)
# Check if any groups we own contain the predecessor room
local_group_ids = await self.store.get_local_groups_for_room(old_room_id)
for group_id in local_group_ids:
# Add new the new room to those groups
await self.store.add_room_to_group(
group_id, room_id, old_room is not None and old_room["is_public"]
)
# Remove the old room from those groups
await self.store.remove_room_from_group(group_id, old_room_id)
async def copy_user_state_on_room_upgrade(
self, old_room_id: str, new_room_id: str, user_ids: Iterable[str]
) -> None:
"""Copy user-specific information when they join a new room when that new room is the
result of a room upgrade
Args:
old_room_id: The ID of upgraded room
new_room_id: The ID of the new room
user_ids: User IDs to copy state for
"""
logger.debug(
"Copying over room tags and push rules from %s to %s for users %s",
old_room_id,
new_room_id,
user_ids,
)
for user_id in user_ids:
try:
# It is an upgraded room. Copy over old tags
await self.copy_room_tags_and_direct_to_room(
old_room_id, new_room_id, user_id
)
# Copy over push rules
await self.store.copy_push_rules_from_room_to_room_for_user(
old_room_id, new_room_id, user_id
)
except Exception:
logger.exception(
"Error copying tags and/or push rules from rooms %s to %s for user %s. "
"Skipping...",
old_room_id,
new_room_id,
user_id,
)
continue
async def send_membership_event(
self,
requester: Optional[Requester],
event: EventBase,
context: EventContext,
ratelimit: bool = True,
) -> None:
"""
Change the membership status of a user in a room.
Args:
requester: The local user who requested the membership
event. If None, certain checks, like whether this homeserver can
act as the sender, will be skipped.
event: The membership event.
context: The context of the event.
ratelimit: Whether to rate limit this request.
Raises:
SynapseError if there was a problem changing the membership.
"""
target_user = UserID.from_string(event.state_key)
room_id = event.room_id
if requester is not None:
sender = UserID.from_string(event.sender)
assert (
sender == requester.user
), "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
else:
requester = types.create_requester(target_user)
prev_state_ids = await context.get_prev_state_ids()
if event.membership == Membership.JOIN:
if requester.is_guest:
guest_can_join = await self._can_guest_join(prev_state_ids)
if not guest_can_join:
# This should be an auth check, but guests are a local concept,
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
if event.membership not in (Membership.LEAVE, Membership.BAN):
is_blocked = await self.store.is_room_blocked(room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
event = await self.event_creation_handler.handle_new_client_event(
requester, event, context, extra_users=[target_user], ratelimit=ratelimit
)
prev_member_event_id = prev_state_ids.get(
(EventTypes.Member, event.state_key), None
)
if event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership == Membership.JOIN:
await self._user_left_room(target_user, room_id)
async def _can_guest_join(self, current_state_ids: StateMap[str]) -> bool:
"""
Returns whether a guest can join a room based on its current state.
"""
guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None)
if not guest_access_id:
return False
guest_access = await self.store.get_event(guest_access_id)
return bool(
guest_access
and guest_access.content
and guest_access.content.get(EventContentFields.GUEST_ACCESS)
== GuestAccess.CAN_JOIN
)
async def kick_guest_users(self, current_state: Iterable[EventBase]) -> None:
"""Kick any local guest users from the room.
This is called when the room state changes from guests allowed to not-allowed.
Params:
current_state: the current state of the room. We will iterate this to look
for guest users to kick.
"""
for member_event in current_state:
try:
if member_event.type != EventTypes.Member:
continue
if not self.hs.is_mine_id(member_event.state_key):
continue
if member_event.content["membership"] not in {
Membership.JOIN,
Membership.INVITE,
}:
continue
if (
"kind" not in member_event.content
or member_event.content["kind"] != "guest"
):
continue
# We make the user choose to leave, rather than have the
# event-sender kick them. This is partially because we don't
# need to worry about power levels, and partially because guest
# users are a concept which doesn't hugely work over federation,
# and having homeservers have their own users leave keeps more
# of that decision-making and control local to the guest-having
# homeserver.
target_user = UserID.from_string(member_event.state_key)
requester = create_requester(
target_user, is_guest=True, authenticated_entity=self._server_name
)
handler = self.hs.get_room_member_handler()
await handler.update_membership(
requester,
target_user,
member_event.room_id,
"leave",
ratelimit=False,
require_consent=False,
)
except Exception as e:
logger.exception("Error kicking guest user: %s" % (e,))
async def lookup_room_alias(
self, room_alias: RoomAlias
) -> Tuple[RoomID, List[str]]:
"""
Get the room ID associated with a room alias.
Args:
room_alias: The alias to look up.
Returns:
A tuple of:
The room ID as a RoomID object.
Hosts likely to be participating in the room ([str]).
Raises:
SynapseError if room alias could not be found.
"""
directory_handler = self.directory_handler
mapping = await directory_handler.get_association(room_alias)
if not mapping:
raise SynapseError(404, "No such room alias")
room_id = mapping["room_id"]
servers = mapping["servers"]
# put the server which owns the alias at the front of the server list.
if room_alias.domain in servers:
servers.remove(room_alias.domain)
servers.insert(0, room_alias.domain)
return RoomID.from_string(room_id), servers
async def _get_inviter(self, user_id: str, room_id: str) -> Optional[UserID]:
invite = await self.store.get_invite_for_local_user_in_room(
user_id=user_id, room_id=room_id
)
if invite:
return UserID.from_string(invite.sender)
return None
async def do_3pid_invite(
self,
room_id: str,
inviter: UserID,
medium: str,
address: str,
id_server: str,
requester: Requester,
txn_id: Optional[str],
id_access_token: Optional[str] = None,
) -> int:
"""Invite a 3PID to a room.
Args:
room_id: The room to invite the 3PID to.
inviter: The user sending the invite.
medium: The 3PID's medium.
address: The 3PID's address.
id_server: The identity server to use.
requester: The user making the request.
txn_id: The transaction ID this is part of, or None if this is not
part of a transaction.
id_access_token: The optional identity server access token.
Returns:
The new stream ID.
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
if self.config.server.block_non_admin_invites:
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
raise SynapseError(
403, "Invites have been disabled on this server", Codes.FORBIDDEN
)
if requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
# We need to rate limit *before* we send out any 3PID invites, so we
# can't just rely on the standard ratelimiting of events.
await self._third_party_invite_limiter.ratelimit(requester)
can_invite = await self.third_party_event_rules.check_threepid_can_be_invited(
medium, address, room_id
)
if not can_invite:
raise SynapseError(
403,
"This third-party identifier can not be invited in this room",
Codes.FORBIDDEN,
)
if not self._enable_lookup:
raise SynapseError(
403, "Looking up third-party identifiers is denied from this server"
)
invitee = await self.identity_handler.lookup_3pid(
id_server, medium, address, id_access_token
)
if invitee:
# Note that update_membership with an action of "invite" can raise
# a ShadowBanError, but this was done above already.
# We don't check the invite against the spamchecker(s) here (through
# user_may_invite) because we'll do it further down the line anyway (in
# update_membership_locked).
_, stream_id = await self.update_membership(
requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id
)
else:
# Check if the spamchecker(s) allow this invite to go through.
if not await self.spam_checker.user_may_send_3pid_invite(
inviter_userid=requester.user.to_string(),
medium=medium,
address=address,
room_id=room_id,
):
raise SynapseError(403, "Cannot send threepid invite")
stream_id = await self._make_and_store_3pid_invite(
requester,
id_server,
medium,
address,
room_id,
inviter,
txn_id=txn_id,
id_access_token=id_access_token,
)
return stream_id
async def _make_and_store_3pid_invite(
self,
requester: Requester,
id_server: str,
medium: str,
address: str,
room_id: str,
user: UserID,
txn_id: Optional[str],
id_access_token: Optional[str] = None,
) -> int:
room_state = await self.state_handler.get_current_state(room_id)
inviter_display_name = ""
inviter_avatar_url = ""
member_event = room_state.get((EventTypes.Member, user.to_string()))
if member_event:
inviter_display_name = member_event.content.get("displayname", "")
inviter_avatar_url = member_event.content.get("avatar_url", "")
# if user has no display name, default to their MXID
if not inviter_display_name:
inviter_display_name = user.to_string()
canonical_room_alias = ""
canonical_alias_event = room_state.get((EventTypes.CanonicalAlias, ""))
if canonical_alias_event:
canonical_room_alias = canonical_alias_event.content.get("alias", "")
room_name = ""
room_name_event = room_state.get((EventTypes.Name, ""))
if room_name_event:
room_name = room_name_event.content.get("name", "")
room_type = None
room_create_event = room_state.get((EventTypes.Create, ""))
if room_create_event:
room_type = room_create_event.content.get(EventContentFields.ROOM_TYPE)
room_join_rules = ""
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
if join_rules_event:
room_join_rules = join_rules_event.content.get("join_rule", "")
room_avatar_url = ""
room_avatar_event = room_state.get((EventTypes.RoomAvatar, ""))
if room_avatar_event:
room_avatar_url = room_avatar_event.content.get("url", "")
(
token,
public_keys,
fallback_public_key,
display_name,
) = await self.identity_handler.ask_id_server_for_third_party_invite(
requester=requester,
id_server=id_server,
medium=medium,
address=address,
room_id=room_id,
inviter_user_id=user.to_string(),
room_alias=canonical_room_alias,
room_avatar_url=room_avatar_url,
room_join_rules=room_join_rules,
room_name=room_name,
room_type=room_type,
inviter_display_name=inviter_display_name,
inviter_avatar_url=inviter_avatar_url,
id_access_token=id_access_token,
)
(
event,
stream_id,
) = await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.ThirdPartyInvite,
"content": {
"display_name": display_name,
"public_keys": public_keys,
# For backwards compatibility:
"key_validity_url": fallback_public_key["key_validity_url"],
"public_key": fallback_public_key["public_key"],
},
"room_id": room_id,
"sender": user.to_string(),
"state_key": token,
},
ratelimit=False,
txn_id=txn_id,
)
return stream_id
async def _is_host_in_room(self, current_state_ids: StateMap[str]) -> bool:
# Have we just created the room, and is this about to be the very
# first member event?
create_event_id = current_state_ids.get(("m.room.create", ""))
if len(current_state_ids) == 1 and create_event_id:
# We can only get here if we're in the process of creating the room
return True
for etype, state_key in current_state_ids:
if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):
continue
event_id = current_state_ids[(etype, state_key)]
event = await self.store.get_event(event_id, allow_none=True)
if not event:
continue
if event.membership == Membership.JOIN:
return True
return False
async def _is_server_notice_room(self, room_id: str) -> bool:
if self._server_notices_mxid is None:
return False
user_ids = await self.store.get_users_in_room(room_id)
return self._server_notices_mxid in user_ids
class RoomMemberMasterHandler(RoomMemberHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.distributor = hs.get_distributor()
self.distributor.declare("user_left_room")
async def _is_remote_room_too_complex(
self, room_id: str, remote_room_hosts: List[str]
) -> Optional[bool]:
"""
Check if complexity of a remote room is too great.
Args:
room_id
remote_room_hosts
Returns: bool of whether the complexity is too great, or None
if unable to be fetched
"""
max_complexity = self.hs.config.server.limit_remote_rooms.complexity
complexity = await self.federation_handler.get_room_complexity(
remote_room_hosts, room_id
)
if complexity:
return complexity["v1"] > max_complexity
return None
async def _is_local_room_too_complex(self, room_id: str) -> bool:
"""
Check if the complexity of a local room is too great.
Args:
room_id: The room ID to check for complexity.
"""
max_complexity = self.hs.config.server.limit_remote_rooms.complexity
complexity = await self.store.get_room_complexity(room_id)
return complexity["v1"] > max_complexity
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Implements RoomMemberHandler._remote_join"""
# filter ourselves out of remote_room_hosts: do_invite_join ignores it
# and if it is the only entry we'd like to return a 404 rather than a
# 500.
remote_room_hosts = [
host for host in remote_room_hosts if host != self.hs.hostname
]
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
check_complexity = self.hs.config.server.limit_remote_rooms.enabled
if (
check_complexity
and self.hs.config.server.limit_remote_rooms.admins_can_join
):
check_complexity = not await self.auth.is_server_admin(user)
if check_complexity:
# Fetch the room complexity
too_complex = await self._is_remote_room_too_complex(
room_id, remote_room_hosts
)
if too_complex is True:
raise SynapseError(
code=400,
msg=self.hs.config.server.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
# We don't do an auth check if we are doing an invite
# join dance for now, since we're kinda implicitly checking
# that we are allowed to join when we decide whether or not we
# need to do the invite/join dance.
event_id, stream_id = await self.federation_handler.do_invite_join(
remote_room_hosts, room_id, user.to_string(), content
)
# Check the room we just joined wasn't too large, if we didn't fetch the
# complexity of it before.
if check_complexity:
if too_complex is False:
# We checked, and we're under the limit.
return event_id, stream_id
# Check again, but with the local state events
too_complex = await self._is_local_room_too_complex(room_id)
if too_complex is False:
# We're under the limit.
return event_id, stream_id
# The room is too large. Leave.
requester = types.create_requester(
user, authenticated_entity=self._server_name
)
await self.update_membership(
requester=requester, target=user, room_id=room_id, action="leave"
)
raise SynapseError(
code=400,
msg=self.hs.config.server.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
return event_id, stream_id
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite received from a remote user
Implements RoomMemberHandler.remote_reject_invite
"""
invite_event = await self.store.get_event(invite_event_id)
room_id = invite_event.room_id
target_user = invite_event.state_key
# first of all, try doing a rejection via the inviting server
fed_handler = self.federation_handler
try:
inviter_id = UserID.from_string(invite_event.sender)
event, stream_id = await fed_handler.do_remotely_reject_invite(
[inviter_id.domain], room_id, target_user, content=content
)
return event.event_id, stream_id
except Exception as e:
# if we were unable to reject the invite, we will generate our own
# leave event.
#
# The 'except' clause is very broad, but we need to
# capture everything from DNS failures upwards
#
logger.warning("Failed to reject invite: %s", e)
return await self._generate_local_out_of_band_leave(
invite_event, txn_id, requester, content
)
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rescinds a local knock made on a remote room
Args:
knock_event_id: The ID of the knock event to rescind.
txn_id: The transaction ID to use.
requester: The originator of the request.
content: The content of the leave event.
Implements RoomMemberHandler.remote_rescind_knock
"""
# TODO: We don't yet support rescinding knocks over federation
# as we don't know which homeserver to send it to. An obvious
# candidate is the remote homeserver we originally knocked through,
# however we don't currently store that information.
# Just rescind the knock locally
knock_event = await self.store.get_event(knock_event_id)
return await self._generate_local_out_of_band_leave(
knock_event, txn_id, requester, content
)
async def _generate_local_out_of_band_leave(
self,
previous_membership_event: EventBase,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""Generate a local leave event for a room
This can be called after we e.g fail to reject an invite via a remote server.
It generates an out-of-band membership event locally.
Args:
previous_membership_event: the previous membership event for this user
txn_id: optional transaction ID supplied by the client
requester: user making the request, according to the access token
content: additional content to include in the leave event.
Normally an empty dict.
Returns:
A tuple containing (event_id, stream_id of the leave event)
"""
room_id = previous_membership_event.room_id
target_user = previous_membership_event.state_key
content["membership"] = Membership.LEAVE
event_dict = {
"type": EventTypes.Member,
"room_id": room_id,
"sender": target_user,
"content": content,
"state_key": target_user,
}
# the auth events for the new event are the same as that of the previous event, plus
# the event itself.
#
# the prev_events consist solely of the previous membership event.
prev_event_ids = [previous_membership_event.event_id]
auth_event_ids = (
list(previous_membership_event.auth_event_ids()) + prev_event_ids
)
event, context = await self.event_creation_handler.create_event(
requester,
event_dict,
txn_id=txn_id,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
outlier=True,
)
event.internal_metadata.out_of_band_membership = True
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
extra_users=[UserID.from_string(target_user)],
)
# we know it was persisted, so must have a stream ordering
assert result_event.internal_metadata.stream_ordering
return result_event.event_id, result_event.internal_metadata.stream_ordering
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Sends a knock to a room. Attempts to do so via one remote out of a given list.
Args:
remote_room_hosts: A list of homeservers to try knocking through.
room_id: The ID of the room to knock on.
user: The user to knock on behalf of.
content: The content of the knock event.
Returns:
A tuple of (event ID, stream ID).
"""
# filter ourselves out of remote_room_hosts
remote_room_hosts = [
host for host in remote_room_hosts if host != self.hs.hostname
]
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
return await self.federation_handler.do_knock(
remote_room_hosts, room_id, user.to_string(), content=content
)
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Implements RoomMemberHandler._user_left_room"""
user_left_room(self.distributor, target, room_id)
async def forget(self, user: UserID, room_id: str) -> None:
user_id = user.to_string()
member = await self.state_handler.get_current_state(
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
)
membership = member.membership if member else None
if membership is not None and membership not in [
Membership.LEAVE,
Membership.BAN,
]:
raise SynapseError(400, "User %s in room %s" % (user_id, room_id))
if membership:
await self.store.forget(user_id, room_id)
def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]:
"""
Return the list of users which can issue invites.
This is done by exploring the joined users and comparing their power levels
to the necessyar power level to issue an invite.
Args:
auth_events: state in force at this point in the room
Returns:
The users which can issue invites.
"""
invite_level = get_named_level(auth_events, "invite", 0)
users_default_level = get_named_level(auth_events, "users_default", 0)
power_level_event = get_power_level_event(auth_events)
# Custom power-levels for users.
if power_level_event:
users = power_level_event.content.get("users", {})
else:
users = {}
result = []
# Check which members are able to invite by ensuring they're joined and have
# the necessary power level.
for (event_type, state_key), event in auth_events.items():
if event_type != EventTypes.Member:
continue
if event.membership != Membership.JOIN:
continue
# Check if the user has a custom power level.
if users.get(state_key, users_default_level) >= invite_level:
result.append(state_key)
return result
def get_servers_from_users(users: List[str]) -> Set[str]:
"""
Resolve a list of users into their servers.
Args:
users: A list of users.
Returns:
A set of servers.
"""
servers = set()
for user in users:
try:
servers.add(get_domain_from_id(user))
except SynapseError:
pass
return servers
|
matrix-org/synapse
|
synapse/handlers/room_member.py
|
Python
|
apache-2.0
| 72,282
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""T5 CBQA tasks."""
import functools
from . import metrics
from . import postprocessors
from . import preprocessors
import seqio
from t5.data import get_default_vocabulary
from t5.data import postprocessors as t5_postprocessors
from t5.data import preprocessors as t5_preprocessors
from t5.evaluation import metrics as t5_metrics
MixtureRegistry = seqio.MixtureRegistry
TaskRegistry = seqio.TaskRegistry
DEFAULT_SPM_PATH = "gs://t5-data/vocabs/cc_all.32000/sentencepiece.model" # GCS
DEFAULT_EXTRA_IDS = 100
NQ_TRAIN_SPLIT_START = 7830
NQ_TRAIN_SPLIT_END = 79168
NQO_TRAIN_SPLIT_END = 79168
WQ_TRAIN_SPLIT_END = 3417
TQA_TRAIN_SPLIT_END = 78785
DEFAULT_OUTPUT_FEATURES = {
"inputs": seqio.Feature(vocabulary=get_default_vocabulary(), add_eos=True),
"targets": seqio.Feature(vocabulary=get_default_vocabulary(), add_eos=True)
}
# ========================== Natural Questions =================================
# Natural Questions open domain variant that most closely matches the official
# evaluation procedure.
# The model is trained to predict all ground-truth answers
# and is only considered correct if it predicts all answers for any one of the
# annotators. As in the official evaluation, we consider questions with fewer
# than two non-null annotations unanswerable (given the context) but because we
# cannot predict unanswerability without the context, we only compute the recall
# metric. Further, because our model does not have access to the oracle context,
# we also normalize predicted and ground-truth answers when comparing them.
# This task uses a portion of the train set for validation.
TaskRegistry.add(
"natural_questions_nocontext",
source=seqio.TfdsDataSource(
tfds_name="natural_questions:0.0.2",
splits={
"train": f"train[{NQ_TRAIN_SPLIT_START}:{NQ_TRAIN_SPLIT_END}]",
"validation": f"train[:{NQ_TRAIN_SPLIT_START}]",
"test": "validation"
}),
preprocessors=[
preprocessors.natural_questions_nocontext,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=postprocessors.natural_questions,
metric_fns=[
functools.partial(
metrics.natural_questions,
# Train set does not contain multiple annotations.
non_null_threshold=1)
])
# This task uses full train split and reports metrics on the NQ validation split
# (which is the test set in the open domain setting).
TaskRegistry.add(
"natural_questions_nocontext_test",
source=seqio.TfdsDataSource(tfds_name="natural_questions:0.0.2"),
preprocessors=[
preprocessors.natural_questions_nocontext,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=postprocessors.natural_questions,
metric_fns=[metrics.natural_questions])
# The standard open domain variant of Natural Questions, where:
# 1) the model is only ever trained to output a single answer;
# 2) if a question has multiple answers, it is trained to predict the first;
# 3) any questions with answers longer than five tokens are ignored;
# 4) answers are normalized before being compared;
# This task uses a portion of the train split for validation.
TaskRegistry.add(
"natural_questions_open",
source=seqio.TfdsDataSource(
tfds_name="natural_questions_open:1.0.0",
splits={
# ~90%, matches numbers used by ORQA
"train": f"train[:{NQO_TRAIN_SPLIT_END}]",
# ~10%, matches numbers used by ORQA
"validation": f"train[{NQO_TRAIN_SPLIT_END}:]",
"test": "validation"
}),
preprocessors=[
preprocessors.natural_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad])
# This is a slight variant of the previous task that selects a random answer
# when multiple are provided instead of using the first.
TaskRegistry.add(
"natural_questions_open_randanswer",
source=seqio.TfdsDataSource(
tfds_name="natural_questions_open:1.0.0",
splits={
"train": f"train[:{NQO_TRAIN_SPLIT_END}]",
"validation": f"train[{NQO_TRAIN_SPLIT_END}:]",
"test": "validation"
}),
preprocessors=[
preprocessors.natural_questions_open,
preprocessors.sample_answer,
seqio.preprocessors.tokenize,
# Do not cache - ensures we are sampling different answers.
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad])
# This task uses full train split and reports metrics on the NQ validation split
# (which is the test set in the open domain setting).
TaskRegistry.add(
"natural_questions_open_test",
source=seqio.TfdsDataSource(tfds_name="natural_questions_open:1.0.0"),
preprocessors=[
preprocessors.natural_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad])
# ============================ Web Questions ===================================
# This task uses 10% of the train split for validation.
TaskRegistry.add(
"web_questions_open",
source=seqio.TfdsDataSource(
tfds_name="web_questions:1.0.0",
splits={
# ~90%, matches numbers used by ORQA
"train": f"train[:{WQ_TRAIN_SPLIT_END}]",
# ~10%, matches numbers used by ORQA
"validation": f"train[{WQ_TRAIN_SPLIT_END}:]",
"test": "test"
}),
preprocessors=[
preprocessors.web_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad],
)
# This tasks trains on the full train split.
TaskRegistry.add(
"web_questions_open_test",
source=seqio.TfdsDataSource(
tfds_name="web_questions:1.0.0",
splits={
"train": "train",
"validation": "test",
}),
preprocessors=[
preprocessors.web_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad],
)
# =============================== Trivia QA ====================================
TaskRegistry.add(
"trivia_qa_open",
source=seqio.TfdsDataSource(
tfds_name="trivia_qa/unfiltered.nocontext:1.1.0",
splits={
# ~90%, matches numbers used by ORQA
"train": f"train[:{TQA_TRAIN_SPLIT_END}]",
# ~10%, matches numbers used by ORQA
"validation": f"train[{TQA_TRAIN_SPLIT_END}:]",
"test": "validation"
}),
preprocessors=[
preprocessors.trivia_qa_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.trivia_qa])
# This tasks trains on combined train and validation splits.
TaskRegistry.add(
"trivia_qa_open_test",
source=seqio.TfdsDataSource(
tfds_name="trivia_qa/unfiltered.nocontext:1.1.0",
splits={
"train": "train+validation",
"test": "test"
}),
preprocessors=[
preprocessors.trivia_qa_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.trivia_qa])
# ============================= CBQA Mixtures ==================================
# This mixture is to be used for hyperparameter tuning. Training happens on
# validation sets (if available) or subsplits of the train set. Evaluation
# happens on the validation (or heldout portion of the train) split.
MixtureRegistry.add(
"closed_book_qa",
[
"trivia_qa_open",
"natural_questions_open",
"web_questions_open"
],
default_rate=seqio.mixing_rate_num_examples
)
# This mixture is to be used at test time. Training happens on the combined
# train and validation splits and evaluation happens on the test split.
MixtureRegistry.add(
"closed_book_qa_test",
[
"trivia_qa_open_test",
"natural_questions_open_test",
"web_questions_open_test"
],
default_rate=seqio.mixing_rate_num_examples
)
# ========================= Salient Span Masking ===============================
TaskRegistry.add(
"salient_span_masked_wikipedia",
source=seqio.TfdsDataSource(
tfds_name="salient_span_wikipedia/sentences:1.0.0"),
preprocessors=[
preprocessors.mask_salient_spans,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
TaskRegistry.add(
"span_corrupted_wikipedia",
source=seqio.TfdsDataSource(
tfds_name="salient_span_wikipedia/sentences:1.0.0"),
preprocessors=[
functools.partial(
t5_preprocessors.rekey, key_map={
"inputs": None,
"targets": "text"
}),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
t5_preprocessors.span_corruption,
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
|
google-research/google-research
|
t5_closed_book_qa/t5_cbqa/tasks.py
|
Python
|
apache-2.0
| 11,010
|
import matplotlib.pyplot as plt
import numpy as np
def plot_image(masked_bands, title=None, figsize=(10, 10)):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1)
show(ax, masked_bands)
if title:
ax.set_title(title)
ax.set_axis_off()
def show(axis, bands, alpha=True):
"""Show bands as image with option of converting mask to alpha.
Alters axis in place.
"""
# Single band (2d array)
if bands.ndim == 2:
bands = [bands]
elif len(bands) == 3:
bands = [b for b in bands.copy()] # turn into list
else:
raise ValueError("Can only plot 1 or 3 band arrays, not an array with shape: {}".format(bands.shape))
bands = _scale_bands(bands)
if alpha and len(bands) == 3:
bands.append(_mask_to_alpha(bands[0].mask))
if len(bands) >= 3:
dbands = np.dstack(bands)
else:
dbands = bands[0]
return axis.imshow(dbands)
def _mask_bands(bands, mask):
return [np.ma.array(b, mask) for b in bands]
def _scale_bands(bands):
def _percentile(bands, percentile):
all_pixels = np.concatenate([b.compressed() for b in bands])
return np.percentile(all_pixels, percentile)
old_min = _percentile(bands, 2)
old_max = _percentile(bands, 98)
new_min = 0
new_max = 1
def _linear_scale(ndarray, old_min, old_max, new_min, new_max):
# https://en.wikipedia.org/wiki/Normalization_(image_processing)
return (ndarray - old_min) * (new_max - new_min) / (old_max - old_min) + new_min
scaled = [np.clip(_linear_scale(b.astype(np.float),
old_min, old_max,
new_min, new_max),
new_min, new_max)
for b in bands]
filled = [b.filled(fill_value=0) for b in scaled]
return filled
def _mask_to_alpha(mask):
alpha = np.zeros_like(np.atleast_3d(mask))
alpha[~mask] = 1
return alpha
|
planetlabs/notebooks
|
jupyter-notebooks/landsat-ps-comparison/visual.py
|
Python
|
apache-2.0
| 1,976
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import gzip
import paddle.v2 as paddle
import network_conf
import reader
from utils import *
def infer(topology, data_dir, model_path, word_dict_path, label_dict_path,
batch_size):
def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):
probs = inferer.infer(input=test_batch, field=['value'])
assert len(probs) == len(test_batch)
for word_ids, prob in zip(test_batch, probs):
word_text = " ".join([ids_2_word[id] for id in word_ids[0]])
print("%s\t%s\t%s" % (ids_2_label[prob.argmax()],
" ".join(["{:0.4f}".format(p)
for p in prob]), word_text))
logger.info('begin to predict...')
use_default_data = (data_dir is None)
if use_default_data:
word_dict = paddle.dataset.imdb.word_dict()
word_reverse_dict = dict((value, key)
for key, value in word_dict.iteritems())
label_reverse_dict = {0: "positive", 1: "negative"}
test_reader = paddle.dataset.imdb.test(word_dict)
else:
assert os.path.exists(
word_dict_path), 'the word dictionary file does not exist'
assert os.path.exists(
label_dict_path), 'the label dictionary file does not exist'
word_dict = load_dict(word_dict_path)
word_reverse_dict = load_reverse_dict(word_dict_path)
label_reverse_dict = load_reverse_dict(label_dict_path)
test_reader = reader.test_reader(data_dir, word_dict)()
dict_dim = len(word_dict)
class_num = len(label_reverse_dict)
prob_layer = topology(dict_dim, class_num, is_infer=True)
# initialize PaddlePaddle
paddle.init(use_gpu=False, trainer_count=1)
# load the trained models
parameters = paddle.parameters.Parameters.from_tar(
gzip.open(model_path, 'r'))
inferer = paddle.inference.Inference(
output_layer=prob_layer, parameters=parameters)
test_batch = []
for idx, item in enumerate(test_reader):
test_batch.append([item[0]])
if len(test_batch) == batch_size:
_infer_a_batch(inferer, test_batch, word_reverse_dict,
label_reverse_dict)
test_batch = []
if len(test_batch):
_infer_a_batch(inferer, test_batch, word_reverse_dict,
label_reverse_dict)
test_batch = []
if __name__ == '__main__':
model_path = 'dnn_params_pass_00000.tar.gz'
assert os.path.exists(model_path), "the trained model does not exist."
nn_type = 'dnn'
test_dir = None
word_dict = None
label_dict = None
if nn_type == 'dnn':
topology = network_conf.fc_net
elif nn_type == 'cnn':
topology = network_conf.convolution_net
infer(
topology=topology,
data_dir=test_dir,
word_dict_path=word_dict,
label_dict_path=label_dict,
model_path=model_path,
batch_size=10)
|
zhaopu7/models
|
text_classification/infer.py
|
Python
|
apache-2.0
| 3,061
|
#!/usr/bin/env python
# Copyright (c) 2015 Sergey Bunatyan <sergey.bunatyan@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import entry_point
import messages
class RCPCallProxy(object):
"""
Proxy class for method call
"""
def __init__(self, postprocessor, service_name, method_name, source,
context, correlation_id, headers, kwargs):
super(RCPCallProxy, self).__init__()
self._postprocessor = postprocessor
self._service_name = service_name
self._method_name = method_name
self._source = source
self._context = context
self._correlation_id = correlation_id
self._headers = copy.copy(headers) or {}
self._kwargs = kwargs
def _make_request(self, context="", correlation_id="", reply_to="",
source=""):
if not source:
source = self._source
if not context:
context = self._context
if not correlation_id:
correlation_id = self._correlation_id
if not reply_to and not isinstance(reply_to, entry_point.EntryPoint):
reply_to = source.service
payload = self._kwargs
dst = entry_point.Destination(self._service_name, self._method_name)
headers = {
"correlation_id": correlation_id,
"reply_to": str(reply_to),
"source": str(source),
"destination": str(dst)
}
request_headers = self._headers.copy()
request_headers.update(headers)
request = messages.Request(request_headers, context, payload)
return request
def call(self, correlation_id="", context="", reply_to="", source=""):
"""
Executes
:param reply_to:
:param source:
:return:
"""
request = self._make_request(context=context,
correlation_id=correlation_id,
reply_to=reply_to,
source=source)
self._postprocessor.process(request)
def cast(self, correlation_id="", context="", source=""):
request = self._make_request(context=context,
correlation_id=correlation_id,
reply_to=entry_point.NullEntryPoint(),
source=source)
self._postprocessor.process(request)
def transfer(self, request, context="", reply_to="", source=""):
if request.context:
context = context or {}
context.update(request.context)
request = self._make_request(correlation_id=request.correlation_id,
reply_to=reply_to,
context=context,
source=source)
self._postprocessor.process(request)
class RPCMethodProxy(object):
def __init__(self, postprocessor, service_name, method_name, source,
context="", correlation_id="", headers=""):
self._postprocessor = postprocessor
self._service_name = service_name
self._method_name = method_name
self._source = source
self._context = context
self._correlation_id = correlation_id
self._headers = copy.copy(headers)
def __call__(self, **kwargs):
self._kwargs = kwargs
return RCPCallProxy(self._postprocessor, self._service_name,
self._method_name, self._source, self._context,
self._correlation_id, self._headers, kwargs)
class RPCServiceProxy(object):
def __init__(self, postprocessor, name, source, context=None,
correlation_id="", headers=None):
self._postprocessor = postprocessor
self._name = name
self._source = source
self._context = context
self._correlation_id = correlation_id
self._headers = copy.copy(headers)
def __getattr__(self, item):
return RPCMethodProxy(self._postprocessor, self._name, item,
self._source, self._context,
self._correlation_id, self._headers)
class RPCProxy(object):
def __init__(self, postprocessor, source, context=None,
correlation_id="", headers=None):
self._postprocessor = postprocessor
self._source = source
self._context = context
self._correlation_id = correlation_id
self._headers = copy.copy(headers) or {}
def _get_discovery_service(self):
return self._postprocessor.discovery_service
def __getattr__(self, item):
disc = self._get_discovery_service()
disc.get_remote(item)
return RPCServiceProxy(self._postprocessor, item, self._source,
self._context, self._correlation_id)
def add_headers(self, headers):
self._headers = copy.copy(headers)
def publish(self, correlation_id="", **kwargs):
headers = {
"correlation_id": correlation_id or self._correlation_id,
"source": str(self._source)
}
notification_headers = self._headers.copy()
notification_headers.update(headers)
publication = messages.Notification(notification_headers,
self._context, kwargs)
self._postprocessor.process(publication)
|
sbunatyan/tavrida
|
tavrida/proxies.py
|
Python
|
apache-2.0
| 5,991
|
"""
Main entry point for the Flask API. The API will provide
"""
# an interface to communicate with Dronekit instances
from flask import jsonify, Flask
from flask import abort, request
from flask import make_response
import dronepool
import threadrunner
app = Flask(__name__)
api_base_url = '/dronesym/api/flask'
# response.headers['X-Content-Type-Options'] = 'nosniff'
# response.headers['X-Frame-Options'] = 'SAMEORIGIN'
@app.after_request
def apply_caching(response):
response.headers["X-Frame-Options"] = "SAMEORIGIN"
response.headers['X-Content-Type-Options'] = 'nosniff'
return response
@app.errorhandler(404)
def send_not_found(error):
return make_response(jsonify({"message": "Resource not found"}), 404)
@app.errorhandler(400)
def send_bad_request(error):
return make_response(jsonify({"message": "Bad request"}), 400)
@app.route(api_base_url + '/spawn', methods=['POST'])
def create_new_drone():
# This routes creates a new Dronekit SITL in the Drone Pool.
# The initial position needs to be send along the request as a JSON
global q
# response.headers['X-Content-Type-Options'] = 'nosniff'
# response.headers['X-Frame-Options'] = 'SAMEORIGIN'
if not request.json or not 'location'in request.json or 'droneId' not in request.json:
abort(400)
print(request.json)
home = request.json['location']
drone_id = request.json['droneId']
q.put((dronepool.create_new_drone, {"db_key": drone_id, "home": home}))
return jsonify({"status": "OK", "message": "Created new drone"})
@app.route(api_base_url + '/remove/<string:drone_id>', methods=['POST'])
def remove_drone(drone_id):
global q
q.put((dronepool.remove_drone, {"drone_id": drone_id}))
return jsonify({"status": "OK", "message": "Removed drone"})
@app.route(api_base_url + '/<string:drone_id>/takeoff', methods=['POST'])
def send_takeoff(drone_id):
# This route issues a takeoff command to a specific drone
global q
if request.json and request.json['waypoints'] and len(
request.json['waypoints']) > 0:
q.put(
(dronepool.takeoff_drone, {
"drone_id": drone_id, "waypoints": request.json['waypoints']}))
else:
q.put((dronepool.takeoff_drone, {"drone_id": drone_id}))
return jsonify({"status": "taking_off", "drone_id": drone_id})
@app.route(api_base_url + '/<string:drone_id>/land', methods=['POST'])
def send_land(drone_id):
global q
q.put((dronepool.land_drone, {"drone_id": drone_id}))
return jsonify({"status": "landing", "drone_id": drone_id})
@app.route(api_base_url + '/<string:drone_id>/resume', methods=['POST'])
def send_resume(drone_id):
global q
q.put((dronepool.resume_flight, {"drone_id": drone_id}))
return jsonify({"status": "resuming", "drone_id": drone_id})
if __name__ == '__main__':
threadrunner.initialize()
q = threadrunner.q
dronepool.initialize()
app.run(debug=True, use_reloader=False)
|
scorelab/DroneSym
|
dronesym-python/flask-api/src/main.py
|
Python
|
apache-2.0
| 2,989
|
#!/usr/bin/env python
#parser {} mode f5config to csv
import sys
import re
REG_IP=re.compile(r'[1-9][0-9]*(\.[0-9]+){3}')
REG_INDENT=re.compile('^[ ]*')
EOF='{'
FOE='}'
""" config for read argv of subcmd"""
BLOCK_SUBCMD = (
"members",
"origins",
"rules",
)
NONBLOCK_SUBCMD = (
"pool",
"destination",
"originating-address",
"translation-address",
"translation",
)
ALL_SUBCMD = NONBLOCK_SUBCMD + BLOCK_SUBCMD
PREFIX_POOL = "ltm pool "
PREFIX_VSERVER = "ltm virtual "
def ldepth(l,r):
d=r.search(l).group(0)
return (len(d),d)
def readconf(fdir):
f=open(fdir)
b=[]
for l in f:
b.append(l)
f.close()
return b
"""Pop a block by indent"""
def pop_block(f):
b=[ ]
b.append(f[0])
md=ldepth(f[0],r=REG_INDENT)
flag=(' '*md[0]) + FOE
i=1
l=len(f)
while i<l:
b.append(f[i])
i+=1
if f[i-1].startswith(flag):
break
return b,i
def block_to_dict(block):
r_eof = '[^' + EOF + ']*'
rdict={}
k=re.search(r_eof,block[0]).group(0)
rdict[k]=block[1:-1]
return rdict
def nonblock_to_dict(x):
r=x.split()
lenth_r=len(r)
if lenth_r == 2:
return { strip_key(r[0]):r[1] }
return { strip_key(x):x }
strip_key=lambda x: x.strip().strip(EOF).strip()
def readblock(block):
if block[0].strip().endswith(EOF):
return pop_block(block)
return block[0],1
"""
convert a block of :
"string"
or [sub1,sub2,]
to dict
"""
def parseblock(block):
"""return if a string """
if isinstance(block,str):
return nonblock_to_dict(block)
bdict={}
lenth_block= len(block)
if lenth_block == 0:
return bdict
b,i = readblock(block)
if isinstance(b,list):
bdict.update({ strip_key(b[0]):parseblock(b[1:-1]) })
else:
bdict.update(nonblock_to_dict(b))
bdict.update(parseblock(block[i:]))
return bdict
"""read argv"""
keyformat = lambda k : k.split().pop()
def read_dict_byprefix(dic,n):
vdict={}
for k in dic:
if k.startswith(n):
subdict={}
for kk in dic[k]:
if kk in ALL_SUBCMD:
subdict.update({ kk: dic[k][kk] })
vdict.update( { keyformat(k): subdict } )
return vdict
HEADLINE="VserverName,PublicIP,InternalIP,PoolName"
REG_DEL_PREFIX=re.compile("/Common/")
def printcsv_byvserver(lv,lp):
"""
ltmvserver,destination,member,pool
"""
print HEADLINE
output="%s,%s,%s,%s"
for k in lv:
destination=lv[k]["destination"]
pool=lv[k]["pool"]
members=lp[pool]["members"].keys()
for m in members:
r = (output) % (\
k,destination,m,pool
)
print REG_DEL_PREFIX.sub("",r)
if __name__ == "__main__":
conf = readconf(sys.argv[1])
conf_dict = {}
"""read conf to dict"""
while len(conf) > 0:
b,i = readblock(conf)
conf_dict.update(parseblock(b))
conf=conf[i:]
ltm_pool = read_dict_byprefix(conf_dict,PREFIX_POOL)
ltm_vserver = read_dict_byprefix(conf_dict,PREFIX_VSERVER)
#printcsv_byvserver(ltm_vserver,ltm_pool)
for k in ltm_vserver:
try:
if not "/Common/slowloris_dos_mitigate" in [ i for i in ltm_vserver[k]['rules']]:
print k
continue
except Exception as e:
print k
continue
|
dkluffy/dkluff-code
|
code/f5tools/configparse_f5.py
|
Python
|
apache-2.0
| 3,488
|
"""This module implements a class that..."""
from __future__ import print_function, unicode_literals
from builtins import range
import logging
from kivy.app import App
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivy.uix.popup import Popup
from MUSCIMarker.utils import keypress_to_dispatch_key
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
mlclass_selection_dialog_kv = '''
<MLClassSelectionDialog@Popup>
size_hint: None, None
size: app.root.size[0] * 0.5, app.root.size[1] * 0.2
pos_hint: {'center_x': 0.5, 'centery_y': 0.5}
title: 'Select MLClass by typing its name.'
# on_text: current_name_label.text = self.get_current_name()
GridLayout:
id: grid
cols: 1
padding: '24dp'
TextInput:
id: text_input
size_hint_y: None
height: dp(24)
multiline: False
focus: True
text: ''
on_text: root.text = self.text
BoxLayout:
size_hint_y: None
height: dp(24)
Button:
id: cancel
text: root.cancel_text
on_release: root.cancel()
Button:
id: ok
text: root.ok_text
on_release: root.ok()
Label:
id: available_names_label
size_hint_y: None
height: dp(24)
text: ''
'''
Builder.load_string(mlclass_selection_dialog_kv)
class MLClassSelectionDialog(Popup):
"""The MLClassSelectionDialog class allows for keyboard-based
selection of the current MLClass."""
text = StringProperty('')
ok_text = StringProperty('OK')
cancel_text = StringProperty('Cancel')
__events__ = ('on_ok', 'on_cancel')
def __init__(self, *args, **kwargs):
super(MLClassSelectionDialog, self).__init__(*args, **kwargs)
self.create_bindings()
def ok(self):
self.dispatch('on_ok')
self.dismiss()
def cancel(self):
self.dispatch('on_cancel')
self.dismiss()
def on_ok(self):
#if len(self.available_clsnames) == 0:
# return
name = self.get_current_name()
if name is None:
return
if len(self.available_clsnames) > 1:
logging.info('MLClassSelectionDialog:'
' More than one name possible: {0},'
' picking: {1}.'
''.format(self.available_clsnames, name))
App.get_running_app().currently_selected_mlclass_name = name
def on_cancel(self):
self.dismiss()
def dismiss(self, *largs, **kwargs):
self.remove_bindings()
super(MLClassSelectionDialog, self).dismiss()
def on_text(self, instance, pos):
#self.ids['current_name_label'].text = self.get_current_name()
n = self.get_current_name()
if n is None:
pass
#self.ids['text_input'].suggestion_text = ''
elif len(pos) >= len(n):
pass
# self.ids['text_input'].suggestion_text =
else:
self.ids['text_input'].suggestion_text = self.get_current_name()[len(self.text):]
names = self.currently_available_names
if len(names) > 5:
names = names[:5] + ['...']
name_str = ', '.join(names)
self.ids['available_names_label'].text = name_str
##########################################################################
# Making it possible to operate the popup with Esc to cancel,
# Enter to confirm.
def create_bindings(self):
Window.bind(on_key_down=self.on_key_down)
Window.bind(on_key_up=self.on_key_up)
def remove_bindings(self):
Window.unbind(on_key_down=self.on_key_down)
Window.unbind(on_key_up=self.on_key_up)
def on_key_down(self, window, key, scancode, codepoint, modifier):
# Should control enter to confirm/escape to cancel
dispatch_key = keypress_to_dispatch_key(key, scancode, codepoint, modifier)
logging.info('MLClassSelectionDialog: Handling keypress: {0}'.format(dispatch_key))
is_handled = self.handle_dispatch_key(dispatch_key)
# Don't let the event propagate through the dialog.
return True
def handle_dispatch_key(self, dispatch_key):
"""Does the "heavy lifting" in keyboard controls: responds to a dispatch key.
Decoupling this into a separate method facillitates giving commands to
the ListView programmatically, not just through user input,
and this way makes automation easier.
:param dispatch_key: A string of the form e.g. ``109+alt,shift``: the ``key``
number, ``+``, and comma-separated modifiers.
:returns: True if the dispatch key got handled, False if there is
no response defined for the given dispatch key.
"""
if dispatch_key == '13': # Enter
logging.info('Confirming MLClassSelectionDialog!')
self.ok()
elif dispatch_key == '27': # Escape
logging.info('Cancelling MLClassSelectionDialog!')
self.cancel()
#elif dispatch_key == '9': # Tab
# pass
# Special keys are handled separately in the TextInput, so
# they would get caught by the "return True". We need to call
# their operations explicitly.
elif dispatch_key == '8': # Backspace
self.ids['text_input'].do_backspace()
elif dispatch_key == '9': # Tab
# Process common prefix
lcp = self._longest_common_prefix
infix = lcp[len(self.text):]
logging.info('MLClassSelectionDialog: Found LCP {0}, inf {1}'
''.format(lcp, infix))
self.ids['text_input'].text = self.text + infix
else:
return False
return True
def on_key_up(self, window, key, scancode, *args, **kwargs):
return False
##########################################################################
# The name selection mechanism
def clsnames_with_prefix(self, prefix):
return [clsname for clsname in self.available_clsnames
if clsname.startswith(prefix)]
@property
def available_clsnames(self):
mlclasses_by_name = App.get_running_app().annot_model.mlclasses_by_name
clsnames = list(mlclasses_by_name.keys())
sorted_clsnames = sorted(clsnames, key=lambda n: mlclasses_by_name[n].clsid)
return sorted_clsnames
@property
def currently_available_names(self):
return self.clsnames_with_prefix(self.text)
@property
def _longest_common_prefix(self):
names = self.currently_available_names
if len(names) == 0:
return ''
if len(names) == 1:
return names[0]
pref = ''
shortest_name_length = min([len(n) for n in names])
for i in range(shortest_name_length):
pref = names[0][:i+1]
for n in names[1:]:
if n[:i+1] != pref: # Unequal at i-th letter
return pref[:-1]
# Shortest word is at the same time the prefix
return names[0][:shortest_name_length]
def get_current_name(self):
"""This is the "clever" part of the name selection mechanism.
Right now, it just selects the first available name."""
names = self.currently_available_names
if len(names) == 0:
return None
output = names[0]
# Exact match has preference
for n in names:
if n == self.text:
output = n
return output
##########################################################################
# Feedback mechanism
|
hajicj/MUSCIMarker
|
MUSCIMarker/mlclass_selection.py
|
Python
|
apache-2.0
| 7,891
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for BatchCreateEntities
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_EntityTypes_BatchCreateEntities_sync]
from google.cloud import dialogflow_v2
def sample_batch_create_entities():
# Create a client
client = dialogflow_v2.EntityTypesClient()
# Initialize request argument(s)
entities = dialogflow_v2.Entity()
entities.value = "value_value"
entities.synonyms = ['synonyms_value_1', 'synonyms_value_2']
request = dialogflow_v2.BatchCreateEntitiesRequest(
parent="parent_value",
entities=entities,
)
# Make the request
operation = client.batch_create_entities(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dialogflow_v2_generated_EntityTypes_BatchCreateEntities_sync]
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_v2_generated_entity_types_batch_create_entities_sync.py
|
Python
|
apache-2.0
| 1,747
|
import SimpleHTTPServer
import SocketServer
import sys
PORT = 1548
class MyHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
log_file = open('logfile.txt', 'w')
def log_message(self, format, *args):
self.log_file.write("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
format%args))
Handler = MyHTTPHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
|
suensummit/erjsTesting
|
testServer.py
|
Python
|
apache-2.0
| 549
|
# Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from django.conf.urls import url
from awx.api.views import (
JobList,
JobDetail,
JobStart,
JobCancel,
JobRelaunch,
JobCreateSchedule,
JobJobHostSummariesList,
JobJobEventsList,
JobActivityStreamList,
JobStdout,
JobNotificationsList,
JobLabelList,
JobHostSummaryDetail,
)
urls = [
url(r'^$', JobList.as_view(), name='job_list'),
url(r'^(?P<pk>[0-9]+)/$', JobDetail.as_view(), name='job_detail'),
url(r'^(?P<pk>[0-9]+)/start/$', JobStart.as_view(), name='job_start'), # Todo: Remove In 3.3
url(r'^(?P<pk>[0-9]+)/cancel/$', JobCancel.as_view(), name='job_cancel'),
url(r'^(?P<pk>[0-9]+)/relaunch/$', JobRelaunch.as_view(), name='job_relaunch'),
url(r'^(?P<pk>[0-9]+)/create_schedule/$', JobCreateSchedule.as_view(), name='job_create_schedule'),
url(r'^(?P<pk>[0-9]+)/job_host_summaries/$', JobJobHostSummariesList.as_view(), name='job_job_host_summaries_list'),
url(r'^(?P<pk>[0-9]+)/job_events/$', JobJobEventsList.as_view(), name='job_job_events_list'),
url(r'^(?P<pk>[0-9]+)/activity_stream/$', JobActivityStreamList.as_view(), name='job_activity_stream_list'),
url(r'^(?P<pk>[0-9]+)/stdout/$', JobStdout.as_view(), name='job_stdout'),
url(r'^(?P<pk>[0-9]+)/notifications/$', JobNotificationsList.as_view(), name='job_notifications_list'),
url(r'^(?P<pk>[0-9]+)/labels/$', JobLabelList.as_view(), name='job_label_list'),
url(r'^(?P<pk>[0-9]+)/$', JobHostSummaryDetail.as_view(), name='job_host_summary_detail'),
]
__all__ = ['urls']
|
wwitzel3/awx
|
awx/api/urls/job.py
|
Python
|
apache-2.0
| 1,601
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
"""Tests the citest.json_predicate.path_transforms module."""
import unittest
from citest.base import ExecutionContext
from citest.json_predicate import FieldDifference
class PathTransformTest(unittest.TestCase):
def assertEqual(self, a, b, msg=''):
if not msg:
msg = 'EXPECTED\n{0!r}\nGOT\n{1!r}'.format(a, b)
super(PathTransformTest, self).assertEqual(a, b, msg)
def test_field_difference_eq(self):
orig = FieldDifference('X', 'Y')
same = FieldDifference('X', 'Y')
diff = FieldDifference('Y', 'X')
self.assertEqual(orig, same)
self.assertNotEqual(orig, diff)
def test_field_difference(self):
context = ExecutionContext()
source = {'a': 7, 'b': 4}
xform = FieldDifference('a', 'b')
self.assertEqual(3, xform(context, source))
def test_field_difference_indirect(self):
context = ExecutionContext()
source = {'a': 7, 'b': 4}
xform = FieldDifference(lambda x: 'b', lambda x: 'a')
self.assertEqual(-3, xform(context, source))
if __name__ == '__main__':
unittest.main()
|
google/citest
|
tests/json_predicate/path_transforms_test.py
|
Python
|
apache-2.0
| 1,687
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 11 15:04:25 2014
@author: mgaldzic
"""
# Example of Non-unit Stoichiometries
model = '''
model pathway()
S1 + S2 -> 2 S3; k1*S1*S2
3 S3 -> 4 S4 + 6 S5; k2*S3^3
end
'''
import tellurium as te
r = te.loadAntimonyModel(model)
|
violasox/tellurium
|
examples/tellurium-files/nonUnitStoichiometries.py
|
Python
|
apache-2.0
| 293
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import json
from Libraries.Singleton.Singleton import Singleton
import Libraries.ErrorDefine as ED
from Libraries.DBModel import *
from Platform.ConfigCenter.ConfigCenter import ConfigCenter
from Platform.LogCenter.LogCenter import LogCenter
from Libraries.Utils import *
@Singleton
class ZuiwanDBManager():
def __init__(self):
self.logger = LogCenter.instance().get_logger('ZuiwanCenterLog')
self.db_model = DBModelFactory.instance().get_db_model()
# self.db_model_read = DBModelFactory.instance().get_db_model(readonly=True)
# self.table_name_user = self.conf.get('RDS', 'table_name_zuiwan_user')
# self.table_name_user_count = self.conf.getint('RDS', 'table_name_zuiwan_user_count')
# self.table_name_user_keys = json.loads(self.conf.get('RDS', 'table_name_zuiwan_user_keys'))
self.table_name_user = "zuiwan_user"
self.table_name_user_count = 1
self.table_name_user_keys = ('zuser_id', 'nick_name', 'email', 'create_time', 'avatar_url', 'credits','role','psw','real_name','sex')
self.table_name_meeting = "zuiwan_meeting"
self.table_name_meeting_count = 1
self.table_name_meeting_keys = ('meeting_id','topic','start_time','duration','des','people')
# 学校相关的信息存储到另一张表,由zuser_id关联
############## user part #################
def get_users_list(self, data):
limit_count = int(data.get('count', 10))
select_sql = DBModel.sql_select(self.table_name_user,
keys=data.get('keys',['zuser_id', 'nick_name', 'email', 'create_time', 'avatar_url', 'credits','role']),
limit='0,%d' % limit_count, order=[{'key': 'credits', 'desc': True}])
records = self.db_model.GetList(select_sql)
return records
def get_user_detail(self, data):
zuser_id = str(data['zuser_id'])
where_condition = DBModel.sql_and({"zuser_id": zuser_id})
sql = DBModel.sql_select(self.table_name_user, where=where_condition)
records = self.db_model.GetOne(sql)
return records
def find_user(self, data):
'''
query zuiwan_user info(without content by nick_name or zuser_id
:param data: t = queryType, c = queryContent
:return:
'''
limit_count = int(data.get('count', 3))
select_sql = DBModel.sql_select(self.table_name_user,
keys=self.table_name_user_keys,
where="`%s` like '%%%s%%'" % (data.get('t',''),data.get('c','')),
limit='0,%d' % limit_count, order=[{'key': 'create_time', 'desc': True}])
records = self.db_model.GetList(select_sql)
return records
def add_user(self, data):
'''
:param data {}
:return:
'''
result = {'code': ED.no_err}
if not 'zuser_id' in data or len(data.get('zuser_id','')) <= 0:
data['zuser_id'] = get_now_time_str_ms().replace('.', '') # like '1497257116332'
if not 'create_time' in data or data.get('create_time',0) == 0:
data['create_time'] = get_now_time_int()
else:
return {'code':ED.err_params}
sql = self.db_model.sql_insert(table=self.table_name_user, data=data, keys=self.table_name_user_keys)
flag = self.db_model.execute(sql)
if flag == None or flag.rowcount <= 0:
result['code'] = ED.unknown_err
return result
def delete_user(self,data):
result = {'code':ED.no_err}
try:
sql_delete_user = ""
if 'zuser_id' in data:
sql_delete_user = DBModel.sql_delete(self.table_name_user,where=DBModel.sql_and({'zuser_id':data['zuser_id']}))
elif 'nick_name' in data:
sql_delete_user = DBModel.sql_delete(self.table_name_user,where=DBModel.sql_and({'nick_name':data['nick_name']}))
else:
return {'code':ED.err_params}
ret_del = self.db_model.execute(sql_delete_user)
if ret_del == None:
result['code'] = ED.err_sys
except Exception,e:
self.logger.error("ZuiwanCenter delete user error, sql=[%s],msg=[%s]" % (repr(sql_delete_user),repr(e)))
result['code'] = ED.err_sys
return result
def update_user(self,data,params=None):
result = {'code':ED.no_err}
if params == None or len(params) == 0:
params = self.table_name_user_keys
try:
sql_update_user = DBModel.sql_update(self.table_name_user,data,where=DBModel.sql_and({'zuser_id':data['zuser_id']}),keys=params)
flag = self.db_model.execute(sql_update_user)
if flag == None:
result['code'] = self.add_user(data)['code']
except Exception,e:
self.logger.error("ZuiwanCenter update user error. sql=[%s],msg=[%s]" % (repr(sql_update_user),repr(e)))
result['code'] = ED.err_sys
return result
############ meeting part ################
def get_meetings_list(self,data):
pass
def get_meeting_info(self,data):
pass
def add_meeting(self,data):
result = {'code': ED.no_err}
if not 'meeting_id' in data or len(data.get('meeting_id') or '') <= 0:
data['meeting_id'] = get_now_time_str_ms().replace('.', '') # like '1497257116332'
sql = self.db_model.sql_insert(table=self.table_name_meeting, data=data, keys=self.table_name_meeting_keys)
flag = self.db_model.execute(sql)
if flag == None or flag.rowcount <= 0:
result['code'] = ED.unknown_err
return result
def delete_meeting(self,data):
result = {'code':ED.no_err}
if 'meeting_id' in data:
sql_delete_meeting = DBModel.sql_delete(self.table_name_meeting,where=DBModel.sql_and({'meeting_id':data['meeting_id']}))
else:
return {'code':ED.err_params}
try:
ret_del = self.db_model.execute(sql_delete_meeting)
if ret_del == None:
result['code'] = ED.err_sys
except Exception,e:
self.logger.error("ZuiwanCenter delete meeting error, sql=[%s],msg=[%s]" % (repr(sql_delete_meeting),repr(e)))
result['code'] = ED.err_sys
return result
def update_meeting(self,data,params=None):
result = {'code':ED.no_err}
if params == None or len(params) == 0:
params = self.table_name_keys
try:
sql_update_meeting = DBModel.sql_update(self.table_name_user,data,where=DBModel.sql_and({'meeting_id':data['meeting_id']}),keys=params)
flag = self.db_model.execute(sql_update_meeting)
if flag == None:
result['code'] = self.add_user(data)['code']
except Exception,e:
self.logger.error("ZuiwanCenter update meeting error. sql=[%s],msg=[%s]" % (repr(sql_update_meeting),repr(e)))
result['code'] = ED.err_sys
return result
def get_next_meeting_leave_list(self,data):
pass
def get_last_meeting_leave_list(self,data):
pass
def get_leave_list(self,data):
result = {'code':ED.no_err}
pass
|
Danceiny/HackGirlfriend
|
Platform/ZuiwanCenter/ZuiwanDBManager.py
|
Python
|
apache-2.0
| 7,371
|
import inspect
import logging
import os
import socket
import subprocess
import sys
import textwrap
import threading
import time
import traceback
import salt.utils.files
import salt.utils.win_runas
import yaml
from tests.support.case import ModuleCase
from tests.support.helpers import with_system_user
from tests.support.mock import Mock
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
try:
import win32service
import win32serviceutil
import win32event
import servicemanager
import win32api
CODE_DIR = win32api.GetLongPathName(RUNTIME_VARS.CODE_DIR)
HAS_WIN32 = True
except ImportError:
# Mock win32serviceutil object to avoid
# a stacktrace in the _ServiceManager class
win32serviceutil = Mock()
HAS_WIN32 = False
logger = logging.getLogger(__name__)
PASSWORD = "P@ssW0rd"
NOPRIV_STDERR = "ERROR: Logged-on user does not have administrative privilege.\n"
PRIV_STDOUT = (
"\nINFO: The system global flag 'maintain objects list' needs\n "
"to be enabled to see local opened files.\n See Openfiles "
"/? for more information.\n\n\nFiles opened remotely via local share "
"points:\n---------------------------------------------\n\n"
"INFO: No shared open files found.\n"
)
if HAS_WIN32:
RUNAS_PATH = os.path.abspath(os.path.join(CODE_DIR, "runas.py"))
RUNAS_OUT = os.path.abspath(os.path.join(CODE_DIR, "runas.out"))
def default_target(service, *args, **kwargs):
while service.active:
time.sleep(service.timeout)
class _ServiceManager(win32serviceutil.ServiceFramework):
"""
A windows service manager
"""
_svc_name_ = "Service Manager"
_svc_display_name_ = "Service Manager"
_svc_description_ = "A Service Manager"
run_in_foreground = False
target = default_target
def __init__(self, args, target=None, timeout=60, active=True):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self.timeout = timeout
self.active = active
if target is not None:
self.target = target
@classmethod
def log_error(cls, msg):
if cls.run_in_foreground:
logger.error(msg)
servicemanager.LogErrorMsg(msg)
@classmethod
def log_info(cls, msg):
if cls.run_in_foreground:
logger.info(msg)
servicemanager.LogInfoMsg(msg)
@classmethod
def log_exception(cls, msg):
if cls.run_in_foreground:
logger.exception(msg)
exc_info = sys.exc_info()
tb = traceback.format_tb(exc_info[2])
servicemanager.LogErrorMsg("{} {} {}".format(msg, exc_info[1], tb))
@property
def timeout_ms(self):
return self.timeout * 1000
def SvcStop(self):
"""
Stop the service by; terminating any subprocess call, notify
windows internals of the stop event, set the instance's active
attribute to 'False' so the run loops stop.
"""
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
self.active = False
def SvcDoRun(self):
"""
Run the monitor in a separete thread so the main thread is
free to react to events sent to the windows service.
"""
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ""),
)
self.log_info("Starting Service {}".format(self._svc_name_))
monitor_thread = threading.Thread(target=self.target_thread)
monitor_thread.start()
while self.active:
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout_ms)
if rc == win32event.WAIT_OBJECT_0:
# Stop signal encountered
self.log_info("Stopping Service")
break
if not monitor_thread.is_alive():
self.log_info("Update Thread Died, Stopping Service")
break
def target_thread(self, *args, **kwargs):
"""
Target Thread, handles any exception in the target method and
logs them.
"""
self.log_info("Monitor")
try:
self.target(self, *args, **kwargs)
except Exception as exc: # pylint: disable=broad-except
# TODO: Add traceback info to windows event log objects
self.log_exception("Exception In Target")
@classmethod
def install(cls, username=None, password=None, start_type=None):
if hasattr(cls, "_svc_reg_class_"):
svc_class = cls._svc_reg_class_
else:
svc_class = win32serviceutil.GetServiceClassString(cls)
win32serviceutil.InstallService(
svc_class,
cls._svc_name_,
cls._svc_display_name_,
description=cls._svc_description_,
userName=username,
password=password,
startType=start_type,
)
@classmethod
def remove(cls):
win32serviceutil.RemoveService(cls._svc_name_)
@classmethod
def start(cls):
win32serviceutil.StartService(cls._svc_name_)
@classmethod
def restart(cls):
win32serviceutil.RestartService(cls._svc_name_)
@classmethod
def stop(cls):
win32serviceutil.StopService(cls._svc_name_)
def service_class_factory(
cls_name,
name,
target=default_target,
display_name="",
description="",
run_in_foreground=False,
):
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
return type(
cls_name,
(_ServiceManager, object),
{
"__module__": mod.__name__,
"_svc_name_": name,
"_svc_display_name_": display_name or name,
"_svc_description_": description,
"run_in_foreground": run_in_foreground,
"target": target,
},
)
if HAS_WIN32:
test_service = service_class_factory("test_service", "test service")
SERVICE_SOURCE = """
from __future__ import absolute_import, unicode_literals
import logging
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
from tests.integration.utils.test_win_runas import service_class_factory
import salt.utils.win_runas
import sys
import yaml
OUTPUT = {}
USERNAME = '{}'
PASSWORD = '{}'
def target(service, *args, **kwargs):
service.log_info("target start")
if PASSWORD:
ret = salt.utils.win_runas.runas(
'cmd.exe /C OPENFILES',
username=USERNAME,
password=PASSWORD,
)
else:
ret = salt.utils.win_runas.runas(
'cmd.exe /C OPENFILES',
username=USERNAME,
)
service.log_info("win_runas returned %s" % ret)
with salt.utils.files.fopen(OUTPUT, 'w') as fp:
yaml.dump(ret, fp)
service.log_info("target stop")
# This class will get imported and run as the service
test_service = service_class_factory('test_service', 'test service', target=target)
if __name__ == '__main__':
try:
test_service.stop()
except Exception as exc: # pylint: disable=broad-except
logger.debug("stop service failed, this is ok.")
try:
test_service.remove()
except Exception as exc: # pylint: disable=broad-except
logger.debug("remove service failed, this os ok.")
test_service.install()
sys.exit(0)
"""
def wait_for_service(name, timeout=200):
start = time.time()
while True:
status = win32serviceutil.QueryServiceStatus(name)
if status[1] == win32service.SERVICE_STOPPED:
break
if time.time() - start > timeout:
raise TimeoutError(
"Timeout waiting for service"
) # pylint: disable=undefined-variable
time.sleep(0.3)
@skipIf(not HAS_WIN32, "This test runs only on windows.")
class RunAsTest(ModuleCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.hostname = socket.gethostname()
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username, PASSWORD)
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_no_pass(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username)
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_admin(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username, PASSWORD)
self.assertEqual(ret["stdout"], PRIV_STDOUT)
self.assertEqual(ret["stderr"], "")
self.assertEqual(ret["retcode"], 0)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_admin_no_pass(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username)
self.assertEqual(ret["stdout"], PRIV_STDOUT)
self.assertEqual(ret["stderr"], "")
self.assertEqual(ret["retcode"], 0)
def test_runas_system_user(self):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", "SYSTEM")
self.assertEqual(ret["stdout"], PRIV_STDOUT)
self.assertEqual(ret["stderr"], "")
self.assertEqual(ret["retcode"], 0)
def test_runas_network_service(self):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", "NETWORK SERVICE")
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
def test_runas_local_service(self):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", "LOCAL SERVICE")
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_winrs(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_winrs_no_pass(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_winrs_admin(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 0)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_winrs_admin_no_pass(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 0)
def test_runas_winrs_system_user(self):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', 'SYSTEM')['retcode'])
"""
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 0)
def test_runas_winrs_network_service_user(self):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', 'NETWORK SERVICE')['retcode'])
"""
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 1)
def test_runas_winrs_local_service_user(self):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', 'LOCAL SERVICE')['retcode'])
"""
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_powershell_remoting(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"powershell",
"Invoke-Command",
"-ComputerName",
self.hostname,
"-ScriptBlock",
"{{ python.exe {} }}".format(RUNAS_PATH),
]
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_powershell_remoting_no_pass(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"powershell",
"Invoke-Command",
"-ComputerName",
self.hostname,
"-ScriptBlock",
"{{ python.exe {} }}".format(RUNAS_PATH),
]
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_powershell_remoting_admin(self, username):
psrp_wrap = (
"powershell Invoke-Command -ComputerName {} -ScriptBlock {{ {} }}; exit"
" $LASTEXITCODE"
)
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
ret = salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)
sys.exit(ret['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
cmd = "python.exe {}; exit $LASTEXITCODE".format(RUNAS_PATH)
ret = subprocess.call(psrp_wrap.format(self.hostname, cmd), shell=True) # nosec
self.assertEqual(ret, 0)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_powershell_remoting_admin_no_pass(self, username):
psrp_wrap = (
"powershell Invoke-Command -ComputerName {} -ScriptBlock {{ {} }}; exit"
" $LASTEXITCODE"
)
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
cmd = "python.exe {}; exit $LASTEXITCODE".format(RUNAS_PATH)
ret = subprocess.call(psrp_wrap.format(self.hostname, cmd), shell=True) # nosec
self.assertEqual(ret, 0)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_service(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, PASSWORD)
with salt.utils.files.fopen(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
ret = subprocess.call(["python.exe", RUNAS_PATH])
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 1, ret
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_service_no_pass(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, "")
with salt.utils.files.fopen(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
ret = subprocess.call(["python.exe", RUNAS_PATH])
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 1, ret
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_service_admin(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, PASSWORD)
with salt.utils.files.fopen(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
ret = subprocess.call(["python.exe", RUNAS_PATH])
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 0, ret
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_service_admin_no_pass(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, "")
with salt.utils.files.fopen(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
ret = subprocess.call(["python.exe", RUNAS_PATH])
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 0, ret
def test_runas_service_system_user(self):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), "SYSTEM", "")
with salt.utils.files.fopen(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
ret = subprocess.call(["python.exe", RUNAS_PATH])
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 0, ret
|
saltstack/salt
|
tests/integration/utils/test_win_runas.py
|
Python
|
apache-2.0
| 23,623
|
from aisikl.events import action_event
from .component import Component, is_true
class Action(Component):
def __init__(self, dialog, id, type, parent_id, properties, element):
super().__init__(dialog, id, type, parent_id, properties, element)
self.accessible = properties.get('accessible', True)
self.tool_tip_text = properties.get('toolTipText')
self.shortcut = properties.get('sc')
self.action_list_id = properties.get('actionListId')
self.confirm_question = properties.get('confirmQuestion')
self.component_ids = properties.get('cids')
def get_components(self):
return [self.dialog.components[id] for id in self.component_ids]
def get_buttons_and_menu_items(self):
return [o for o in self.get_components()
if o.component_type in ('button', 'menuItem')]
def execute(self, original_source_name=None, params=None):
'''Executes the action and emits the appropriate event.'''
if not (self.accessible and self.enabled and self.enabled_in_ui and
self.visible and self.visible_in_ui):
# TODO: we should return here, but we can only do that once we
# properly support interactives. for now, the developer knows best.
pass
if not original_source_name:
self.log('action', 'Executing {}'.format(self.id))
ev = action_event(self, None, original_source_name or self.id, params)
# TODO: We should technically ask confirm_question before firing
# (if ev.listening is True), but we probably don't care.
self.dialog.app.send_events(ev)
def _ais_setAccessible(self, value):
self.accessible = is_true(value)
def _ais_setVisibleInUI(self, value):
super()._ais_setVisibleInUI(value)
for o in self.get_buttons_and_menu_items():
o._ais_setVisibleInUI(value)
def _ais_setEnabledInUI(self, value):
super()._ais_setEnabledInUI(value)
for o in self.get_buttons_and_menu_items():
o._ais_setEnabledInUI(value)
def _ais_setTitle(self, value):
for o in self.get_buttons_and_menu_items():
if o.title == self.title:
o._ais_setTitle(self, value)
super()._ais_setTitle(value)
def _ais_setToolTipText(self, value):
for o in self.get_buttons_and_menu_items():
if o.tool_tip_text == self.tool_tip_text:
o._ais_setToolTipText(value)
self.tool_tip_text = value
def _ais_setConfirmQuestion(self, value):
for o in self.get_buttons_and_menu_items():
if o.confirm_question == self.confirm_question:
o._ais_setConfirmQuestion(value)
self.confirm_question = value
|
fmfi-svt/votr
|
aisikl/components/action.py
|
Python
|
apache-2.0
| 2,773
|
# -*- coding: utf-8 -*-
"""
Utility functions, like date conversion and digit conversion
"""
__all__ = [
"Trie",
"arabic_digit_to_thai_digit",
"bahttext",
"collate",
"countthai",
"delete_tone",
"dict_trie",
"digit_to_text",
"display_thai_char",
"emoji_to_thai",
"eng_to_thai",
"find_keyword",
"is_native_thai",
"isthai",
"isthaichar",
"normalize",
"now_reign_year",
"num_to_thaiword",
"rank",
"reign_year_to_ad",
"remove_dangling",
"remove_dup_spaces",
"remove_repeat_vowels",
"remove_tonemark",
"remove_zw",
"reorder_vowels",
"text_to_arabic_digit",
"text_to_thai_digit",
"thai_digit_to_arabic_digit",
"thai_keyboard_dist",
"thai_strftime",
"thai_time",
"thai_to_eng",
"thaiword_to_date",
"thaiword_to_num",
"thaiword_to_time",
"time_to_thaiword",
"text_to_num",
"words_to_num",
]
from pythainlp.util.collate import collate
from pythainlp.util.date import (
now_reign_year,
reign_year_to_ad,
thaiword_to_date,
)
from pythainlp.util.digitconv import (
arabic_digit_to_thai_digit,
digit_to_text,
text_to_arabic_digit,
text_to_thai_digit,
thai_digit_to_arabic_digit,
)
from pythainlp.util.keyboard import (
eng_to_thai,
thai_keyboard_dist,
thai_to_eng,
)
from pythainlp.util.emojiconv import emoji_to_thai
from pythainlp.util.keyboard import eng_to_thai, thai_to_eng
from pythainlp.util.keywords import find_keyword, rank
from pythainlp.util.normalize import (
delete_tone,
normalize,
maiyamok,
remove_dangling,
remove_dup_spaces,
remove_repeat_vowels,
remove_tonemark,
remove_zw,
reorder_vowels,
)
from pythainlp.util.numtoword import bahttext, num_to_thaiword
from pythainlp.util.strftime import thai_strftime
from pythainlp.util.thai import (
countthai,
display_thai_char,
isthai,
isthaichar,
)
from pythainlp.util.thaiwordcheck import is_native_thai
from pythainlp.util.time import thai_time, thaiword_to_time, time_to_thaiword
from pythainlp.util.trie import Trie, dict_trie
from pythainlp.util.wordtonum import thaiword_to_num, text_to_num, words_to_num
|
PyThaiNLP/pythainlp
|
pythainlp/util/__init__.py
|
Python
|
apache-2.0
| 2,215
|
# -*- encoding: utf-8 -*-
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from .views import (
ExampleRefreshExpiryDatesFormView,
HomeView,
SalesLedgerChargeUpdateView,
SalesLedgerCheckoutSuccessView,
SalesLedgerCheckoutView,
SalesLedgerSessionRedirectView,
SettingsView,
)
admin.autodiscover()
urlpatterns = patterns(
'',
url(regex=r'^$',
view=HomeView.as_view(),
name='project.home'
),
url(regex=r'^settings/$',
view=SettingsView.as_view(),
name='project.settings'
),
url(regex=r'^',
view=include('login.urls')
),
url(regex=r'^admin/',
view=include(admin.site.urls)
),
url(regex=r'^checkout/',
view=include('checkout.urls')
),
url(regex=r'^contact/',
view=include('contact.urls')
),
url(r'^home/user/$',
view=RedirectView.as_view(
url=reverse_lazy('project.home'),
permanent=False
),
name='project.dash'
),
url(regex=r'^example/refresh/card/expiry/dates/$',
view=ExampleRefreshExpiryDatesFormView.as_view(),
name='example.refresh.card.expiry.dates'
),
url(regex=r'^sales/ledger/(?P<pk>\d+)/charge/$',
view=SalesLedgerChargeUpdateView.as_view(),
name='example.sales.ledger.charge'
),
url(regex=r'^sales/ledger/(?P<pk>\d+)/checkout/$',
view=SalesLedgerCheckoutView.as_view(),
name='example.sales.ledger.checkout'
),
url(regex=r'^sales/ledger/(?P<pk>\d+)/checkout/success/$',
view=SalesLedgerCheckoutSuccessView.as_view(),
name='example.sales.ledger.checkout.success'
),
url(regex=r'^sales/ledger/(?P<pk>\d+)/session/redirect/$',
view=SalesLedgerSessionRedirectView.as_view(),
name='example.sales.ledger.session.redirect'
),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# ^ helper function to return a URL pattern for serving files in debug mode.
# https://docs.djangoproject.com/en/1.5/howto/static-files/#serving-files-uploaded-by-a-user
urlpatterns += staticfiles_urlpatterns()
|
pkimber/checkout
|
example_checkout/urls.py
|
Python
|
apache-2.0
| 2,467
|
from django.urls import path
from . import views
from adminlte_full.adminlte import config
urlpatterns = [
path('', views.index),
path('', views.index, name=config['ADMINLTE_REGISTRATION_ENDPOINT']),
path('terms/', views.terms),
path('profile/', views.profile, name=config['ADMINLTE_PROFILE_ENDPOINT']),
]
|
kyzima-spb/django-adminlte-full
|
example/demo/urls.py
|
Python
|
apache-2.0
| 325
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import build_sys.util.util as util
from build_sys.cpp_writer import Code
"""Generates the simple constant text-expressions and
documentation."""
COMMANDS = [
# Command, character, docs
('alpha', 'utf8_char(0x03b1)', 'Greek small letter alpha'),
('angle', 'utf8_char(0x2220)', 'Angle-symbol'),
('beta', 'utf8_char(0x03b2)', 'Greek small letter beta'),
('deg', 'chars::degree_sign', 'Degree sign'),
('delta', 'chars::greek_capital_letter_delta', 'Greek capital delta'),
('dog', 'utf8_char(0x1f415)', 'Dog symbol'),
('dprime', 'chars::double_prime', 'Double prime'),
('ellipsis', 'utf8_char(0x22ef)', 'Ellipsis symbol'),
('ge', 'utf8_char(0x2265)', 'Greater or equal sign'),
('in', 'chars::double_prime', 'Inch symbol (double prime)'),
('inf', 'utf8_char(0x221e)', 'Infinity symbol'),
('interrobang', 'utf8_char(0x203d)', 'Surprised exclamation'),
('larr', 'utf8_char(0x2190)', 'Left arrow'),
('le', 'utf8_char(0x2264)', 'Less or equal sign'),
('li', 'chars::bullet', 'Bullet point'),
('lrarr', 'utf8_char(0x2194)', 'Left and right arrow'),
('ne', 'utf8_char(0x2260)', 'Not equal sign'),
('notparallel', 'utf8_char(0x2226)', 'Not parallel symbol'),
('parallel', 'utf8_char(0x2225)', 'Parallel symbol'),
('perfect', 'utf8_char(0x1f44c)', 'Perfect hand gesture'),
('pi', 'chars::greek_small_letter_pi', 'Greek small pi'),
('poop', 'utf8_char(0x1f4a9)', 'Pile of poop symbol'),
('prime', 'chars::prime', 'Prime symbol'),
('rarr', 'utf8_char(0x2192)', 'Right arrow'),
('scissors', 'utf8_char(0x2702)', 'A pair of scissors'),
('sq', 'chars::superscript_two', 'Superscript two'),
('sqrt', 'utf8_char(0x221a)', 'Square-root symbol'),
('times', 'utf8_char(0xD7)', 'Multiplication symbol'),
('tm', 'utf8_char(0x2122)', 'Trademark symbol'),
('tprime', 'chars::triple_prime', 'Triple prime'),
('whoa', 'utf8_char(0x1f450)', 'Raised hands')]
def get_header_code():
hh = Code()
hh.line('// Generated by %s\n' % os.path.basename(__file__))
hh.line('namespace faint{')
hh.line('const std::map<utf8_string, utf8_string>& constant_exprs(){')
hh.line('static std::map<utf8_string, utf8_string> constants =')
hh.line('{')
for c in COMMANDS[:-1]:
hh.line('{"%s", utf8_string(%s)},' % (c[0], c[1]))
c = COMMANDS[-1]
hh.line('{"%s", utf8_string(%s)}' % (c[0], c[1]))
hh.line('};')
hh.line('return constants;')
hh.line('}')
hh.line('} // namespace')
return hh
def get_help():
help = '# Generated by %s\n' % os.path.basename(__file__)
help += '||*Command*||*Symbol*||*Description*||\n'
for c in COMMANDS:
help += ('||\\%s||\\image(symbol-%s.png)||%s||\n' % (c[0], c[0], c[2]))
return help
def generate_header(file_path):
with open(file_path, 'w', newline='\n') as f:
f.write(str(get_header_code()));
def generate_help(file_path):
"""Writes a help-source-file documenting the commands"""
with open(file_path, 'w', newline='\n') as f:
f.write(get_help())
def generate(hh_path, help_path):
if util.changed(__file__, hh_path):
generate_header(hh_path)
if util.changed(__file__, help_path):
generate_help(help_path)
|
lukas-ke/faint-graphics-editor
|
build-sys/build_sys/gen_text_expressions.py
|
Python
|
apache-2.0
| 4,002
|
# Copyright 2020 The UniqueRandomizer Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Stochastic Beam Search (SBS).
The technique is described in the following paper:
Wouter Kool, Herke van Hoof, and Max Welling.
Stochastic Beams and Where to Find Them: The Gumbel-Top-k Trick for Sampling
Sequences Without Replacement.
https://arxiv.org/pdf/1903.06059.pdf
The implementation is slightly generalized from the description in the paper,
handling the case where not all leaves are at the same level of the tree.
"""
import typing
from typing import Any, Callable, List, Tuple, Union
import numpy as np
State = Any # Type alias. pylint: disable=invalid-name
Output = Any # Type alias. pylint: disable=invalid-name
BeamNode = typing.NamedTuple('BeamNode', [('output', Output),
('log_probability', float),
('gumbel', float)])
def sample_gumbels_with_maximum(log_probabilities, target_max):
"""Samples a set of gumbels which are conditioned on having a given maximum.
Based on https://gist.github.com/wouterkool/a3bb2aae8d6a80f985daae95252a8aa8.
Args:
log_probabilities: The log probabilities of the items to sample Gumbels for.
target_max: The desired maximum sampled Gumbel.
Returns:
The sampled Gumbels.
"""
gumbels = np.random.gumbel(loc=log_probabilities)
max_gumbel = np.max(gumbels)
# Use equations (23) and (24) in Appendix B.3 of the SBS paper.
# Note: Numpy may warn "divide by zero encountered in log1p" on the next code
# line. This is normal and expected, since one element of
# `gumbels - max_gumbel` should be zero. The math fixes itself later on, and
# that element ends up being shifted to target_max.
v = target_max - gumbels + np.log1p(-np.exp(gumbels - max_gumbel))
return target_max - np.maximum(v, 0) - np.log1p(np.exp(-np.abs(v)))
def stochastic_beam_search(
child_log_probability_fn: Callable[[List[State]], List[np.ndarray]],
child_state_fn: Callable[[List[Tuple[State, int]]],
List[Tuple[Union[State, Output], bool]]],
root_state: State,
k: int) -> List[BeamNode]:
"""Stochastic Beam Search.
Nodes in the beam include "states" which can be anything but must contain
enough information to:
1. Define a consistent ordering of all children of the node.
2. Enumerate the probabilities of all children.
3. Produce the state of the child with a given index.
Args:
child_log_probability_fn: A function that takes a list of states and returns
the log probabilities of the child states of each input state.
child_state_fn: A function that takes a list of (state, i) pairs and maps
each to a (ith_child, is_leaf) pair. If ith_child is a leaf state, is_leaf
should be True, and ith_child will potentially be an actual sampled item
that should be returned by stochastic_beam_search (it may have a different
form than other non-leaf states).
root_state: The state of the root node. This cannot be a leaf node.
k: The desired number of samples.
Returns:
A list of up to k BeamNode objects, corresponding to the sampled leaves.
"""
if k <= 0:
return []
# Data for nodes currently on the beam.
leaf_log_probs = []
leaf_gumbels = []
leaf_outputs = []
internal_states = [root_state]
internal_log_probs = [0.0]
internal_gumbels = [0.0]
# Expand internal nodes until there are none left to expand.
while internal_states:
# Compute child probabilities for all internal nodes.
child_log_probs_list = child_log_probability_fn(internal_states)
# Avoid creating tons of BeamNode objects for children of internal nodes
# (there may be beam_size*node_arity children). Instead pack data into lists
# for efficiency.
all_log_probs = []
all_gumbels = []
all_states = []
all_child_indices = []
# Sample Gumbels for children of internal nodes.
for node_state, node_log_prob, node_gumbel, child_log_probs in zip(
internal_states, internal_log_probs, internal_gumbels,
child_log_probs_list):
# Note: Numpy may warn "divide by zero encountered in log" on the next
# code line. This is normal and expected if a child has zero probability.
# We prevent zero-probability children from being added to the beam.
log_probabilities = child_log_probs + node_log_prob
good_indices = np.where(log_probabilities != np.NINF)[0]
log_probabilities = log_probabilities[good_indices]
gumbels = sample_gumbels_with_maximum(log_probabilities, node_gumbel)
all_log_probs.extend(log_probabilities)
all_gumbels.extend(gumbels)
all_states.extend([node_state] * len(log_probabilities))
all_child_indices.extend(good_indices)
# Select the k best candidates.
num_internal_candidates = len(all_gumbels)
num_leaf_candidates = len(leaf_gumbels)
if k >= num_internal_candidates + num_leaf_candidates:
# No change to leaf nodes, since all are selected.
to_expand_states = list(zip(all_states, all_child_indices))
to_expand_log_probs = all_log_probs
to_expand_gumbels = all_gumbels
else:
# Select the unsorted top k in O(num_candidates) time.
all_gumbels.extend(leaf_gumbels)
top_k_indices = np.argpartition(all_gumbels, -k)[-k:]
to_expand_states = []
to_expand_log_probs = []
to_expand_gumbels = []
leaf_indices = []
for i in top_k_indices:
if i >= num_internal_candidates:
leaf_indices.append(i - num_internal_candidates)
else:
to_expand_states.append((all_states[i], all_child_indices[i]))
to_expand_log_probs.append(all_log_probs[i])
to_expand_gumbels.append(all_gumbels[i])
leaf_log_probs = [leaf_log_probs[i] for i in leaf_indices]
leaf_gumbels = [leaf_gumbels[i] for i in leaf_indices]
leaf_outputs = [leaf_outputs[i] for i in leaf_indices]
# Among selected candidates, expand non-leaf nodes.
internal_log_probs = []
internal_gumbels = []
internal_states = []
child_states = child_state_fn(to_expand_states)
for log_prob, gumbel, (child_state, is_leaf) in zip(
to_expand_log_probs, to_expand_gumbels, child_states):
if is_leaf:
leaf_log_probs.append(log_prob)
leaf_gumbels.append(gumbel)
leaf_outputs.append(child_state)
else:
internal_log_probs.append(log_prob)
internal_gumbels.append(gumbel)
internal_states.append(child_state)
# Pack the leaf data into BeamNode objects.
sampled_nodes = []
for log_prob, gumbel, output in zip(
leaf_log_probs, leaf_gumbels, leaf_outputs):
sampled_nodes.append(BeamNode(output=output, log_probability=log_prob,
gumbel=gumbel))
# Sort the beam in order of decreasing Gumbels. This corresponds to the order
# one would get by sampling one-at-a-time without replacement.
return sorted(sampled_nodes, key=lambda x: x.gumbel, reverse=True)
|
google-research/unique-randomizer
|
unique_randomizer/stochastic_beam_search.py
|
Python
|
apache-2.0
| 7,609
|
#!/usr/bin/env python
import os
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Storage
from ServerConfig import TellStore
from ServerConfig import Kudu
from ServerConfig import Cassandra
from ServerConfig import Microbench
def startMBClient(populate = False, uoutFile = None):
default_out = ""
if Storage.storage == TellStore:
default_out = "mbench_{0}".format(TellStore.approach)
elif Storage.storage == Kudu:
default_out = "mbench_kudu"
elif Storage.storage == Cassandra:
default_out = "mbench_cassandra"
default_out = '{0}/{1}_sf{2}_N{3}'.format(Microbench.result_dir, default_out, Microbench.scaling, Microbench.numColumns)
if (uoutFile):
outfile = uoutFile
else:
outfile = default_out
appendFile = 0
while os.path.isfile(outfile + ".db"):
appendFile = appendFile + 1
outfile = "{0}_{1}".format(outfile, appendFile)
probabilities = "-i {0} -d {1} -u {2}".format(Microbench.insertProb, Microbench.deleteProb, Microbench.updateProb)
cmd = '{0}/watch/microbench/mbclient -H "{1}" -s {2} -c {3} -t {4} -a {5} -o {6} -b {7} -w {8} {9}'.format(TellStore.builddir, Microbench.getServerList(), Microbench.scaling, Microbench.clients, Microbench.clientThreads, Microbench.analyticalClients, outfile + ".db", Microbench.txBatch, Microbench.oltpWaitTime, probabilities)
if Microbench.onlyQ1:
cmd += ' -q'
if Microbench.noWarmUp:
cmd += " --no-warmup"
if (populate):
cmd += ' -P'
print "Execute {0}".format(cmd)
return os.system(cmd)
if __name__ == "__main__":
default_out = ''
parser = ArgumentParser()
parser.add_argument("-P", dest='populate', help="Populate data", action="store_true")
parser.add_argument("outfile", help="Result database", default=default_out, nargs='?')
args = parser.parse_args()
if (default_out != ''):
exit(startMBClient(args.populate, default_out))
else:
exit(startMBClient(args.populate))
|
tellproject/helper_scripts
|
mbclient.py
|
Python
|
apache-2.0
| 2,085
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes and functions for doing a single-machine batch all-reduce.
An all-reduce is taking the reduction (typically a sum) of a list of tensors,
each on a different device. The result must end up back on each device, which is
where the word "all" comes from. In summary, each device starts with a single
tensor, and ends up with the reduction of all tensors.
A batch all-reduce is doing several independent all-reduces. When doing a batch
all-reduce, care is taken to evenly distribute the reduction computations
across devices and inter-device tensor transfers across device links.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(reedwm): Support distributed all-reduces in this file.
# TODO(reedwm): Merge this code with allreduce.py, which contains some batch
# all-reduce code that this file calls. allreduce.py also supports distributed
# batch-reduce while this file only supports single-machine all-reduce.
import abc
import six
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import data_flow_ops
import allreduce
import constants
def _all_reduce_using_copy(tensors_across_devices, use_mean):
"""Does an all-reduce of a list of tensors by copying to the current device.
The tensors are copied to the current device and then reduced.
Args:
tensors_across_devices: A list of tensors, each on a different device.
use_mean: Whether to take the mean of the tensors instead of a sum:
Returns:
A reduced tensor on the current device.
"""
reduced_tensor = tf.add_n(tensors_across_devices)
if use_mean:
reduced_tensor *= 1 / len(tensors_across_devices)
return reduced_tensor
@six.add_metaclass(abc.ABCMeta)
class BatchAllReduceAlgorithm(object):
"""Represents an algorithm for performing a batch all-reduce operation."""
def batch_all_reduce(self,
all_device_tensors,
num_splits,
compact_tensors,
defer_tensors,
xla_compile=False):
"""Performs a batch all-reduce.
The reduction done is a sum.
`all_device_tensors` is a list of list of tensors that will be batch
all-reduced. All tensors within a single inner list must be on the same
device. The nth element in each list, for any n, will be reduced together.
The return value is in the same form as `all_device_tensors`, except that
each tensor is reduced.
For example, if `all_device_tensors` is:
[[ A, B ], # A and B are on GPU 0
[ C, D ]] # C and D are on GPU 1
Then the return value will be:
[[ A+C, B+D ], # These two tensors are on GPU 0
[ A+C, B+D ]] # These two tensors are on GPU 1
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
is a tensor where `i` is the device index and `j` is the tensor index.
num_splits: If not None, tensors will be concatenated and split into this
many pieces during the all-reduce, then split back into their original
shapes afterwards. Has no impact on correctness and can improve
performance. Requires all tensors to be the same type.
compact_tensors: If True, tensors are casted to fp16 before being all-
reduced. Improves performance, but hurts numerical stability.
defer_tensors: If True, every time the return value
`reduced_all_device_tensors` is evaluated, the result will be the
reduced tensors values of `all_device_tensors` from the previous session
run instead of the current session run, or zero on the first session
run. This can improve performance. When training neural networks,
deferring gradients often does not harm training, so this can be used to
improve performance.
xla_compile: If True, use XLA to compile gradients packing and unpacking
ops.
Returns:
reduced_all_device_tensors: A list in the same form as
`all_device_tensors`, except each tensor has been reduced.
warmup_ops: A list of ops needed to be run once before the all-reduce can
occur.
"""
# Before all-reducing tensors, we do several preprocessing functions that
# can speed up the all-reduce. We undo these functions after all-reducing
# the tensors.
# all_device_packed_tensors is a 2-d list of tensors indexed by
# [device_id][tensor_id], holding packed tensors from all devices involved
# in all-reduce.
all_device_packed_tensors = []
# all_device_warmup_ops is a 2-d list of ops indexed by
# [device_id][tensor_id], holding warmup_ops that need to be run once before
# all-reduce can occur.
all_device_warmup_ops = []
# all_device_put_ops is a 2-d list of ops indexed by
# [device_id][tensor_id], holding put ops for deferred tensors. They will be
# called in each all-reduce step automatically due to control dependency.
all_device_put_ops = []
# packers is a list of _TensorPacker, one for each device involved in
# all-reduce.
packers = [
_TensorPacker(num_splits, compact_tensors) for _ in all_device_tensors
]
for packer, device_tensors in zip(packers, all_device_tensors):
def pack_single_device_tensors(packer=packer,
device_tensors=device_tensors):
"""Pack gradient tensors of a device."""
packed_tensors = packer.maybe_concat_tensors(device_tensors)
packed_tensors = packer.maybe_compact_tensors(packed_tensors)
# When xla_compile=False, defer tensors after concat for better
# performance.
if defer_tensors and not xla_compile:
packed_tensors, put_ops, warmup_ops = defer_single_device_tensors(
packed_tensors)
all_device_put_ops.append(put_ops)
all_device_warmup_ops.append(warmup_ops)
packed_tensors = packer.maybe_split_tensors(packed_tensors)
return packed_tensors
with tf.device(device_tensors[0].device):
if xla_compile:
packed_tensors = tf.xla.experimental.compile(
pack_single_device_tensors)
# When xla_compile=True, intermediate tensors in packing process are
# not materialized. Thus, we defer tensors after packing process is
# completed instead of in the middle of it.
if defer_tensors:
packed_tensors, put_ops, warmup_ops = defer_single_device_tensors(
packed_tensors)
all_device_put_ops.append(put_ops)
all_device_warmup_ops.append(warmup_ops)
else:
packed_tensors = pack_single_device_tensors()
all_device_packed_tensors.append(packed_tensors)
# Perform all-reduce on packed tensors.
all_device_tensors = self._do_batch_all_reduce(all_device_packed_tensors)
all_device_unpacked_tensors = []
for packer, device_tensors in zip(packers, all_device_tensors):
def unpack_single_device_tensors(packer=packer,
device_tensors=device_tensors):
"""Unpack gradient tensors of a device."""
unpacked_tensors = packer.undo_maybe_split_tensors(device_tensors)
unpacked_tensors = packer.undo_maybe_compact_tensors(unpacked_tensors)
unpacked_tensors = packer.undo_maybe_concat_tensors(unpacked_tensors)
return unpacked_tensors
with tf.device(device_tensors[0].device):
if xla_compile:
unpacked_device_tensor = tf.xla.experimental.compile(
unpack_single_device_tensors)
else:
unpacked_device_tensor = unpack_single_device_tensors()
all_device_unpacked_tensors.append(unpacked_device_tensor)
# Note: There is no undo operation for deferring tensors. But we do need to
# call _add_put_op_control_deps at the end if we deferred the tensors.
if defer_tensors:
all_device_unpacked_tensors = _add_put_op_control_deps(
all_device_unpacked_tensors, num_splits, all_device_put_ops)
return all_device_unpacked_tensors, all_device_warmup_ops
@abc.abstractmethod
def _do_batch_all_reduce(self, all_device_tensors):
"""Performs a batch all-reduce.
Unlike `self.batch_all_reduce`, this does not do any preprocessing of the
tensors.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
is a tensor where `i` is the device index and `j` is the tensor index.
Returns:
reduced_all_device_tensors: A list in the same form as
`all_device_tensors`, except each tensor has been reduced.
"""
pass
class CopyToDeviceAlgorithm(BatchAllReduceAlgorithm):
"""An algorithm that copies tensors to be reduced to a specific device."""
def __init__(self, devices_to_reduce_on, use_mean=False):
self._devices = devices_to_reduce_on
self._use_mean = use_mean
def _do_batch_all_reduce(self, all_device_tensors):
reduced_tensors = []
for i, tensors_across_devices in enumerate(zip(*all_device_tensors)):
with tf.device(self._devices[i % len(self._devices)]):
reduced_tensor = _all_reduce_using_copy(tensors_across_devices,
self._use_mean)
reduced_tensors.append(reduced_tensor)
# The tensors will be brought back to each device once they are used.
return [reduced_tensors] * len(all_device_tensors)
class HierarchicalCopyAlgorithm(BatchAllReduceAlgorithm):
"""An algorithm that uses hierarchical copies. This is only optimized for
eight devices connected in NetworkTopology.DGX1 or NetworkTopology.GCP_V100
topology.
"""
def __init__(self, network_topology):
"""Initializer for HierarchicalCopyAlgorithm.
Args:
network_topology: An instance of Enum class constants.NetworkTopology.
"""
self._network_topology = network_topology
def _do_batch_all_reduce(self, all_device_tensors):
avail_devices = [device_tensors[0].device
for device_tensors in all_device_tensors]
reduced_tensors = []
num_devices = len(avail_devices)
group_size = num_devices // 2
for i, tensors_across_devices in enumerate(zip(*all_device_tensors)):
group_0_main_device, group_1_main_device = self.__get_main_devices(
i, num_devices)
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Reduce the first group.
group_0_tensors = tensors_across_devices[group_0_begin:
group_0_begin + group_size]
with tf.device(avail_devices[group_0_main_device]):
group_0_reduced_tensor = _all_reduce_using_copy(group_0_tensors, False)
# Reduce the second group.
group_1_tensors = tensors_across_devices[group_1_begin:
group_1_begin + group_size]
with tf.device(avail_devices[group_1_main_device]):
group_1_reduced_tensor = _all_reduce_using_copy(group_1_tensors, False)
# Reduce between the groups.
with tf.device(avail_devices[group_0_main_device]):
total_reduced_tensor = _all_reduce_using_copy(
[group_0_reduced_tensor, group_1_reduced_tensor], False)
# Broadcast the result back into the root of each group.
with tf.device(avail_devices[group_0_main_device]):
group_0_reduced_tensor_bcast = tf.identity(total_reduced_tensor)
with tf.device(avail_devices[group_1_main_device]):
group_1_reduced_tensor_bcast = tf.identity(total_reduced_tensor)
reduced_tensors_bcast = []
for j in range(len(tensors_across_devices)):
with tf.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_tensor = group_0_reduced_tensor_bcast
else:
src_device_tensor = group_1_reduced_tensor_bcast
reduced_tensors_bcast.append(tf.identity(src_device_tensor))
reduced_tensors.append(reduced_tensors_bcast)
reduced_tensors = list(zip(*reduced_tensors))
return reduced_tensors
def __get_main_devices(self, tensor_index, num_devices):
"""Returns the pair of main devices to use for initial reduction.
Args:
tensor_index: Index of the current tensor in the list of tensors to copy.
num_devices: Total number of devices.
Returns:
A tuple containing pair of main device indices for the initial
reduction. Then, the first element of the tuple should be used for the
final reduction.
Raises:
ValueError: Invalid input arguments.
"""
if self._network_topology == constants.NetworkTopology.DGX1:
return tensor_index % num_devices, (tensor_index +
(num_devices // 2)) % num_devices
elif self._network_topology == constants.NetworkTopology.GCP_V100:
if num_devices != 8:
raise ValueError('HierarchicalCopy only supports eight devices in %s.' %
self._network_topology)
# TODO(hinsu): Generalize main device indices to handle any other
# isomorphic connection graph that connects two cliques using connections
# other than 0-5 and 2-7.
main_device_pairs = [(0, 5), (2, 7), (5, 0), (7, 2)]
return main_device_pairs[tensor_index % len(main_device_pairs)]
else:
# TODO(reedwm): make this logic more general for arbitrary topology.
raise ValueError(
'HierarchicalCopy is not supported for %s network topology.' %
self._network_topology)
class AllReduceSpecAlgorithm(BatchAllReduceAlgorithm):
"""An algorithm that uses an all reduce spec."""
def __init__(self, all_reduce_spec, gpu_indices, agg_small_grads_max_bytes,
agg_small_grads_max_group):
spec = allreduce.parse_all_reduce_spec(all_reduce_spec)
if len(spec) != 1:
raise ValueError(
'Replicated mode does not support hybrid all-reduce strategies')
self._all_reduce_spec = spec[0]
self._gpu_indices = gpu_indices
self._agg_small_grads_max_bytes = agg_small_grads_max_bytes
self._agg_small_grads_max_group = agg_small_grads_max_group
def _do_batch_all_reduce(self, all_device_tensors):
# TODO(reedwm): Merge allreduce.sum_gradients_all_reduce with the other
# gradient aggregation code, since gradient aggregation is doing an all
# reduce. Currently, we do gradient repacking in two different places.
# TODO(reedwm): Change the allreduce code to reduce tensors instead of
# tower_grads.
tower_grads = [[(t, None) for t in device_tensors]
for device_tensors in all_device_tensors]
aggregated_device_grads = allreduce.sum_gradients_all_reduce(
False, # single_session
['/job:localhost'],
tower_grads,
1,
self._all_reduce_spec.alg,
self._all_reduce_spec.shards,
self._gpu_indices,
agg_small_grads_max_bytes=self._agg_small_grads_max_bytes,
agg_small_grads_max_group=self._agg_small_grads_max_group)
return [[t for t, _ in grad_vars] for grad_vars in aggregated_device_grads]
def algorithm_from_params(params):
"""Returns a BatchAllReduceAlgorithm from a Params tuple."""
if params.all_reduce_spec:
if params.gpu_indices:
gpu_indices = [int(x) for x in params.gpu_indices.split(',')]
else:
gpu_indices = [x for x in range(params.num_gpus)]
return AllReduceSpecAlgorithm(params.all_reduce_spec, gpu_indices,
params.agg_small_grads_max_bytes,
params.agg_small_grads_max_group)
elif params.hierarchical_copy:
return HierarchicalCopyAlgorithm(params.network_topology)
else:
if params.local_parameter_device == 'gpu':
devices_to_reduce_on = ['/gpu:%d' % i for i in range(params.num_gpus)]
else:
devices_to_reduce_on = ['/cpu:0']
return CopyToDeviceAlgorithm(devices_to_reduce_on)
def _apply_to_all_device_tensors(all_device_tensors, apply_func, colocate=True):
"""Applies a function to each tensor in `all_device_tensors`.
A new list of lists of tensors is returned, where every tensor in
`all_device_tensors` has had `apply_func` called on it. `all_device_tensors`
is not modified.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]` is
a tensor where `i` is the device index and `j` is the tensor index.
apply_func: A function taking in three arguments: tensor, device_index,
tensor_index, and returning a modified tensor.
`tensor` is `all_device_tensors[device_index][tensor_index]`.
colocate: If True, apply_func will be run under context manager colocated
with it's input tensor.
Returns:
A list in the same form as `all_device_tensors`, except each tensor has had
`apply_func` called on it.
"""
new_all_device_tensors = []
for device_index, device_tensors in enumerate(all_device_tensors):
new_device_tensors = []
for tensor_index, t in enumerate(device_tensors):
if colocate:
with tf.colocate_with(t):
new_t = apply_func(t, device_index, tensor_index)
else:
new_t = apply_func(t, device_index, tensor_index)
new_device_tensors.append(new_t)
new_all_device_tensors.append(new_device_tensors)
return new_all_device_tensors
def _defer_tensor(tensor):
"""Defers the retrieval of a tensor.
The tensor is put into a StagingArea, and the return value is the
retrieval of the tensor from the StagingArea. The effect is that the
tensor returned from this function is the tensor that was put in the
StagingArea for the previous Session.run() call.
Args:
tensor: The tensor to defer for one step.
Returns:
deferred_tensor: The tensor deferred for one step.
put_op: An op to put `tensor` in the StagingArea. Must be run every step
that `deferred_tensor` is run.
warmup_op: A warmup op that should be called before the first step. Puts
a zero tensor into the StagingArea.
"""
tensor_stage = data_flow_ops.StagingArea([tensor.dtype], [tensor.shape])
put_op = tensor_stage.put([tensor])
warmup_op = tensor_stage.put([tf.zeros(tensor.shape, dtype=tensor.dtype)])
# Fetch the next tensor to use.
(tensor,) = tensor_stage.get()
return tensor, put_op, warmup_op
def defer_single_device_tensors(device_tensors):
"""Defer tensors (gradients in this case) from a single device.
Args:
device_tensors: A list of gradients tensors from a single device to defer.
Returns:
deferred_tensors: A list of tensors deferred for one step.
put_ops: A list of ops that put `tensors` in the StagingAreas. Must be run
every step that `deferred_tensors` is run.
warmup_ops: Warmup ops that should be called before the first step. Puts
zero tensors into the StagingArea.
"""
put_ops = []
warmup_ops = []
deferred_tensors = []
for tensor in device_tensors:
deferred_tensor, put_op, warmup_op = _defer_tensor(tensor)
deferred_tensors.append(deferred_tensor)
put_ops.append(put_op)
warmup_ops.append(warmup_op)
return deferred_tensors, put_ops, warmup_ops
def _add_put_op_control_deps(all_device_tensors, num_splits, put_ops):
"""Add control dependencies from `put_ops` to `all_device_tensors`.
This should only be called when deferred tensors are being used.
The control dependencies are added so that the put ops are run whenever
`all_device_tensors` is run. That way, the caller does not have to explicitly
run the put ops.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]` is
a tensor where `i` is the device index and `j` is the tensor index.
num_splits: The number of splits that were used for the all-reduce.
put_ops: A list of put ops from deferring the tensors.
Returns:
A list in the same form as `all_device_tensors`, except each tensor has a
control dependency on an op in `put_ops`.
"""
def apply_func(tensor, device_index, tensor_index):
if num_splits == 0:
deps = [put_ops[device_index][tensor_index]]
else:
deps = put_ops[device_index]
assert len(deps) == 1
with tf.control_dependencies(deps):
return tf.identity(tensor, name='control_dependency')
return _apply_to_all_device_tensors(all_device_tensors, apply_func)
class _TensorPacker(object):
"""Packs and unpacks tensors into groups.
This class first concatenates a set of tensors, then split the concatenated
tensor into a small number of chunks. This is useful for all-reducing tensors,
as doing a small number of all-reduces on large tensors can be faster than
doing a large number of all-reduces on small tensors.
It also provides option to compact tensors by casting them to fp16, for better
all-reduce performance.
This class maintains states of processed tensors like shapes and types. So
each packer can only be used to pack and unpack one list of tensors. If you
need to pack multiple lists of tensors (say from multiple devices), then you
need multiple _TensorPacker object, one for each device.
"""
def __init__(self, num_splits, compact):
"""Initializes the _TensorPacker.
Args:
num_splits: The number of tensors to split the concatenated tensor into.
The batch all-reduce will consist of `num_splits` all-reduces. if None
or zero, tensors are not split or concatenated.
compact: If True, tensors are casted to fp16 during packing and casted
back to their original dtypes during unpacking.
"""
self._num_splits = num_splits
self._compact = compact
self._before_compact_dtypes = []
def maybe_concat_tensors(self, device_tensors):
"""Concatenate tensors into a single tensor."""
if not self._num_splits:
return device_tensors
flat_tensors = [tf.reshape(t, [-1]) for t in device_tensors]
self._orig_shapes = [t.shape for t in device_tensors]
self._orig_sizes = [s.num_elements() for s in self._orig_shapes]
# All shapes must be fully defined.
assert None not in self._orig_sizes
concatenated_grad = tf.concat(flat_tensors, 0)
return [concatenated_grad]
def maybe_split_tensors(self, concatenated_tensor):
"""Split concatenated tensor into `num_splits` pieces."""
if not self._num_splits:
return concatenated_tensor
if len(concatenated_tensor) != 1:
raise RuntimeError('tensors must be concatenated via '
'maybe_concat_tensors() before splitting')
concatenated_tensor = concatenated_tensor[0]
total_tensor_size = concatenated_tensor.shape.num_elements()
split_size = total_tensor_size // self._num_splits
split_size_last = total_tensor_size - split_size * (self._num_splits - 1)
split_sizes = [split_size] * (self._num_splits - 1) + [split_size_last]
tensor_packs = tf.split(concatenated_tensor, split_sizes)
return tensor_packs
def undo_maybe_split_tensors(self, tensor_packs):
"""Undo maybe_split_tensors()."""
if not self._num_splits:
return tensor_packs
return [tf.concat(tensor_packs, 0)]
def undo_maybe_concat_tensors(self, concatenated_tensor):
"""Undo maybe_concat_tensors()."""
if not self._num_splits:
return concatenated_tensor
if len(concatenated_tensor) != 1:
raise RuntimeError(
'undo_maybe_split_tensors() must be called before '
'undo_maybe_concat_tensors when num_splits is greater than 1')
concatenated_tensor = concatenated_tensor[0]
tensors_with_sizes = tf.split(concatenated_tensor,
self._orig_sizes)
tensors_with_shapes = [
tf.reshape(grad, shape) for grad, shape in zip(
tensors_with_sizes, self._orig_shapes)
]
return tensors_with_shapes
def maybe_compact_tensors(self, device_tensors):
"""Cast tensors to fp16 and store their original types."""
if not self._compact:
return device_tensors
if self._before_compact_dtypes:
raise RuntimeError('maybe_compact_tensors can only be called once.')
self._before_compact_dtypes = [t.dtype for t in device_tensors]
compact_tensors = [tf.cast(t, tf.float16) for t in device_tensors]
return compact_tensors
def undo_maybe_compact_tensors(self, compact_tensors):
"""Undo maybe_compact_tensors()."""
if not self._compact:
return compact_tensors
if not self._before_compact_dtypes:
raise RuntimeError('maybe_compact_tensors() must be called before '
'undo_maybe_compact_tensors()')
device_tensors = [
tf.cast(t, dtype)
for t, dtype in zip(compact_tensors, self._before_compact_dtypes)
]
return device_tensors
|
tensorflow/benchmarks
|
scripts/tf_cnn_benchmarks/batch_allreduce.py
|
Python
|
apache-2.0
| 25,715
|
##
# Copyright (c) 2014-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
OpenDirectory live service tests.
"""
from __future__ import print_function
from itertools import chain
from uuid import UUID
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks, returnValue
try:
from twext.who.opendirectory import DirectoryService
moduleImported = True
except:
moduleImported = False
print("Could not import OpenDirectory")
if moduleImported:
from twext.who.expression import (
CompoundExpression, Operand, MatchExpression, MatchType, MatchFlags
)
from txdav.who.directory import CalendarDirectoryServiceMixin
from txdav.who.opendirectory import DirectoryService as OpenDirectoryService
class CalOpenDirectoryService(OpenDirectoryService, CalendarDirectoryServiceMixin):
pass
LOCAL_SHORTNAMES = "odtestalbert odtestbill odtestcarl odtestdavid odtestsubgroupa".split()
NETWORK_SHORTNAMES = "odtestamanda odtestbetty odtestcarlene odtestdenise odtestsubgroupb odtestgrouptop".split()
def onlyIfPopulated(func):
"""
Only run the decorated test method if the "odtestamanda" record exists
"""
@inlineCallbacks
def checkThenRun(self):
record = yield self.service.recordWithShortName(self.service.recordType.user, u"odtestamanda")
if record is not None:
result = yield func(self)
returnValue(result)
else:
print("OD not populated, skipping {}".format(func.func_name))
return checkThenRun
class LiveOpenDirectoryServiceTestCase(unittest.TestCase):
"""
Live service tests for L{DirectoryService}.
"""
def setUp(self):
self.service = DirectoryService()
def tearDown(self):
self.service._deletePool()
def verifyResults(self, records, expected, unexpected):
shortNames = []
for record in records:
for shortName in record.shortNames:
shortNames.append(shortName)
for name in expected:
self.assertTrue(name in shortNames)
for name in unexpected:
self.assertFalse(name in shortNames)
@onlyIfPopulated
@inlineCallbacks
def test_shortNameStartsWith(self):
records = yield self.service.recordsFromExpression(
MatchExpression(
self.service.fieldName.shortNames, u"odtest",
matchType=MatchType.startsWith
)
)
self.verifyResults(
records,
chain(LOCAL_SHORTNAMES, NETWORK_SHORTNAMES),
["anotherodtestamanda", "anotherodtestalbert"]
)
@onlyIfPopulated
@inlineCallbacks
def test_uid(self):
for uid, name in (
(u"9DC04A71-E6DD-11DF-9492-0800200C9A66", u"odtestbetty"),
(u"9DC04A75-E6DD-11DF-9492-0800200C9A66", u"odtestbill"),
):
record = yield self.service.recordWithUID(uid)
self.assertTrue(record is not None)
self.assertEquals(record.shortNames[0], name)
@onlyIfPopulated
@inlineCallbacks
def test_guid(self):
for guid, name in (
(UUID("9DC04A71-E6DD-11DF-9492-0800200C9A66"), u"odtestbetty"),
(UUID("9DC04A75-E6DD-11DF-9492-0800200C9A66"), u"odtestbill"),
):
record = yield self.service.recordWithGUID(guid)
self.assertTrue(record is not None)
self.assertEquals(record.shortNames[0], name)
@onlyIfPopulated
@inlineCallbacks
def test_compoundWithoutRecordType(self):
expression = CompoundExpression(
[
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"be",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"be",
matchType=MatchType.startsWith
),
],
Operand.OR
),
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"test",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"test",
matchType=MatchType.startsWith
),
],
Operand.OR
),
],
Operand.AND
)
records = yield self.service.recordsFromExpression(expression)
# We should get back users and groups since we did not specify a type:
self.verifyResults(
records,
[
"odtestbetty", "odtestalbert", "anotherodtestalbert",
"odtestgroupbetty", "odtestgroupalbert"
],
["odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb"]
)
@onlyIfPopulated
@inlineCallbacks
def test_compoundWithExplicitRecordType(self):
expression = CompoundExpression(
[
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"be",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"be",
matchType=MatchType.startsWith
),
],
Operand.OR
),
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"test",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"test",
matchType=MatchType.startsWith
),
],
Operand.OR
),
],
Operand.AND
)
records = yield self.service.recordsFromExpression(
expression, recordTypes=[self.service.recordType.user]
)
# We should get back users but not groups:
self.verifyResults(
records,
["odtestbetty", "odtestalbert", "anotherodtestalbert"],
["odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb"]
)
@onlyIfPopulated
@inlineCallbacks
def test_compoundWithMultipleExplicitRecordTypes(self):
expression = CompoundExpression(
[
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"be",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"be",
matchType=MatchType.startsWith
),
],
Operand.OR
),
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"test",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"test",
matchType=MatchType.startsWith
),
],
Operand.OR
),
],
Operand.AND
)
records = yield self.service.recordsFromExpression(
expression, recordTypes=[
self.service.recordType.user,
self.service.recordType.group
]
)
# We should get back users and groups:
self.verifyResults(
records,
[
"odtestbetty", "odtestalbert", "anotherodtestalbert",
"odtestgroupbetty", "odtestgroupalbert"
],
["odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb"]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingTokens(self):
self.calService = CalOpenDirectoryService()
records = yield self.calService.recordsMatchingTokens([u"be", u"test"])
self.verifyResults(
records,
[
"odtestbetty", "odtestalbert", "anotherodtestalbert",
"odtestgroupbetty", "odtestgroupalbert"
],
["odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb"]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingTokensWithContextUser(self):
self.calService = CalOpenDirectoryService()
records = yield self.calService.recordsMatchingTokens(
[u"be", u"test"],
context=self.calService.searchContext_user
)
self.verifyResults(
records,
[
"odtestbetty", "odtestalbert", "anotherodtestalbert",
],
[
"odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb",
"odtestgroupbetty", "odtestgroupalbert"
]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingTokensWithContextGroup(self):
self.calService = CalOpenDirectoryService()
records = yield self.calService.recordsMatchingTokens(
[u"be", u"test"],
context=self.calService.searchContext_group
)
self.verifyResults(
records,
[
"odtestgroupbetty", "odtestgroupalbert"
],
[
"odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb",
"odtestbetty", "odtestalbert", "anotherodtestalbert"
]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingMultipleFieldsNoRecordType(self):
self.calService = CalOpenDirectoryService()
fields = (
(u"fullNames", u"be", MatchFlags.caseInsensitive, MatchType.contains),
(u"fullNames", u"test", MatchFlags.caseInsensitive, MatchType.contains),
)
records = (yield self.calService.recordsMatchingFields(
fields, operand=Operand.AND, recordType=None
))
self.verifyResults(
records,
[
"odtestgroupbetty", "odtestgroupalbert",
"odtestbetty", "odtestalbert", "anotherodtestalbert"
],
[
"odtestamanda",
]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingSingleFieldNoRecordType(self):
self.calService = CalOpenDirectoryService()
fields = (
(u"fullNames", u"test", MatchFlags.caseInsensitive, MatchType.contains),
)
records = (yield self.calService.recordsMatchingFields(
fields, operand=Operand.AND, recordType=None
))
self.verifyResults(
records,
[
"odtestgroupbetty", "odtestgroupalbert",
"odtestbetty", "odtestalbert", "anotherodtestalbert",
"odtestamanda",
],
[
"nobody",
]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingFieldsWithRecordType(self):
self.calService = CalOpenDirectoryService()
fields = (
(u"fullNames", u"be", MatchFlags.caseInsensitive, MatchType.contains),
(u"fullNames", u"test", MatchFlags.caseInsensitive, MatchType.contains),
)
records = (yield self.calService.recordsMatchingFields(
fields, operand=Operand.AND, recordType=self.calService.recordType.user
))
self.verifyResults(
records,
[
"odtestbetty", "odtestalbert", "anotherodtestalbert"
],
[
"odtestamanda", "odtestgroupalbert", "odtestgroupbetty",
]
)
|
red-hood/calendarserver
|
contrib/od/test/test_live.py
|
Python
|
apache-2.0
| 14,432
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from cairis.core.ARM import *
from ConcernAssociationDialog import ConcernAssociationDialog
__author__ = 'Shamal Faily'
class ConcernAssociationListCtrl(wx.ListCtrl):
def __init__(self,parent,winId,dp,boxSize=wx.DefaultSize):
wx.ListCtrl.__init__(self,parent,winId,size=boxSize,style=wx.LC_REPORT)
self.dbProxy = dp
self.theCurrentEnvironment = ''
self.InsertColumn(0,'Source')
self.SetColumnWidth(0,100)
self.InsertColumn(1,'n')
self.SetColumnWidth(1,50)
self.InsertColumn(2,'Link Verb')
self.SetColumnWidth(2,75)
self.InsertColumn(3,'n')
self.SetColumnWidth(3,50)
self.InsertColumn(4,'Target')
self.SetColumnWidth(4,100)
self.theSelectedIdx = -1
self.theDimMenu = wx.Menu()
self.theDimMenu.Append(CONCA_MENUADD_ID,'Add')
self.theDimMenu.Append(CONCA_MENUDELETE_ID,'Delete')
self.Bind(wx.EVT_RIGHT_DOWN,self.OnRightDown)
wx.EVT_MENU(self.theDimMenu,CONCA_MENUADD_ID,self.onAddAssociation)
wx.EVT_MENU(self.theDimMenu,CONCA_MENUDELETE_ID,self.onDeleteAssociation)
self.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnItemSelected)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnItemDeselected)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED,self.onAssociationActivated)
def setEnvironment(self,environmentName):
self.theCurrentEnvironment = environmentName
def OnRightDown(self,evt):
self.PopupMenu(self.theDimMenu)
def onAddAssociation(self,evt):
dlg = ConcernAssociationDialog(self,self.dbProxy,self.theCurrentEnvironment)
if (dlg.ShowModal() == CONCERNASSOCIATION_BUTTONCOMMIT_ID):
self.theSelectedIdx = self.GetItemCount()
self.InsertStringItem(self.theSelectedIdx,dlg.source())
self.SetStringItem(self.theSelectedIdx,1,dlg.sourceMultiplicity())
self.SetStringItem(self.theSelectedIdx,2,dlg.link())
self.SetStringItem(self.theSelectedIdx,3,dlg.targetMultiplicity())
self.SetStringItem(self.theSelectedIdx,4,dlg.target())
def onDeleteAssociation(self,evt):
if (self.theSelectedIdx == -1):
errorText = 'No association selected'
errorLabel = 'Delete concern'
dlg = wx.MessageDialog(self,errorText,errorLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
else:
selectedValue = self.GetItemText(self.theSelectedIdx)
self.DeleteItem(self.theSelectedIdx)
def OnItemSelected(self,evt):
self.theSelectedIdx = evt.GetIndex()
def OnItemDeselected(self,evt):
self.theSelectedIdx = -1
def onAssociationActivated(self,evt):
self.theSelectedIdx = evt.GetIndex()
source = self.GetItemText(self.theSelectedIdx)
sourceMultiplicity = self.GetItem(self.theSelectedIdx,1)
link = self.GetItem(self.theSelectedIdx,2)
targetMultiplicity = self.GetItem(self.theSelectedIdx,3)
target = self.GetItem(self.theSelectedIdx,4)
dlg = ConcernAssociationDialog(self,self.dbProxy,self.theCurrentEnvironment,source,sourceMultiplicity.GetText(),link.GetText(),target.GetText(),targetMultiplicity.GetText())
if (dlg.ShowModal() == CONCERNASSOCIATION_BUTTONCOMMIT_ID):
self.SetStringItem(self.theSelectedIdx,0,dlg.source())
self.SetStringItem(self.theSelectedIdx,1,dlg.sourceMultiplicity())
self.SetStringItem(self.theSelectedIdx,2,dlg.link())
self.SetStringItem(self.theSelectedIdx,3,dlg.targetMultiplicity())
self.SetStringItem(self.theSelectedIdx,4,dlg.target())
def load(self,assocs):
for source,sourceMultiplicity,link,target,targetMultiplicity in assocs:
idx = self.GetItemCount()
self.InsertStringItem(idx,source)
self.SetStringItem(idx,1,sourceMultiplicity)
self.SetStringItem(idx,2,link)
self.SetStringItem(idx,3,targetMultiplicity)
self.SetStringItem(idx,4,target)
def dimensions(self):
assocs = []
for x in range(self.GetItemCount()):
source = self.GetItemText(x)
sourceMultiplicity = self.GetItem(x,1)
link = self.GetItem(x,2)
targetMultiplicity = self.GetItem(x,3)
target = self.GetItem(x,4)
assocs.append((source,sourceMultiplicity.GetText(),link.GetText(),target.GetText(),targetMultiplicity.GetText()))
return assocs
|
nathanbjenx/cairis
|
cairis/gui/ConcernAssociationListCtrl.py
|
Python
|
apache-2.0
| 5,001
|
#!/usr/bin/env python
"""
Problem Definition :
If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage.
"""
def main():
ones = ['', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine']
double = ['Ten', 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']
tens = ['', '', 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
hundreds = ['', '', '', 'Hundred', 'Thousand']
words = 0
for num in xrange(1, 101):
word = ''
if len(str(num)) == 4:
digit = int(num/1000)
word = word + ones[digit] + hundreds[4]
num %= 1000
if len(str(num)) == 3:
digit = int(num/100)
word = word + ones[digit] + hundreds[3]
num %= 100
if num:
word += 'And'
if len(str(num)) == 2:
digit = int(num/10)
if digit == 1:
num %= 10
word += double[num]
num = 0
else:
word += tens[digit]
num %= 10
if len(str(num)) == 1:
word += ones[num]
words += len(word)
print words
if __name__ == '__main__':
main()
|
vivekpabani/projecteuler
|
python/017/problem_017.py
|
Python
|
apache-2.0
| 1,758
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup
def read_desc(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='calvin',
version='0.1',
url="http://github.com/EricssonResearch/calvin-base",
license="Apache Software License",
author="Ericsson Research",
author_email="N/A",
tests_require=[
'mock>1.0.1',
'pytest>=1.4.25',
'pytest-twisted'
],
install_requires=[
'colorlog>=2.6.0',
'kademlia>=0.4',
'ply>=3.4',
'Twisted>=15.0.0',
'requests >= 2.6.0',
'infi.traceback>=0.3.11',
' wrapt==1.10.2'
],
description="Calvin is a distributed runtime and development framework for an actor based dataflow"
"programming methodology",
long_description=read_desc('README.md'),
packages=["calvin"],
include_package_data=True,
platforms='any',
test_suite="calvin.test.test_calvin",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Framework :: Twisted",
"Natural Language :: English",
"Intended Audience :: Developers",
"Topic :: Software Development",
],
extras_require={
'testing': ['pytest', 'mock']
},
entry_points={
'console_scripts': [
'csdeploy=calvin.Tools.deploy_app:main',
'csdocs=calvin.Tools.calvindoc:main',
'cscompile=calvin.Tools.cscompiler:main',
'csinstall=calvin.Tools.csinstaller:main',
'csweb=calvin.Tools.www.csweb:main'
]
}
)
|
shengwen1994/calvin-base
|
setup.py
|
Python
|
apache-2.0
| 2,480
|
from CTFd import create_app
from CTFd.models import *
from sqlalchemy_utils import database_exists, create_database, drop_database
from sqlalchemy.engine.url import make_url
def create_ctfd(ctf_name="CTFd", name="admin", email="admin@ctfd.io", password="password", setup=True):
app = create_app('CTFd.config.TestingConfig')
url = make_url(app.config['SQLALCHEMY_DATABASE_URI'])
if url.drivername == 'postgres':
url.drivername = 'postgresql'
if database_exists(url):
drop_database(url)
create_database(url)
with app.app_context():
app.db.create_all()
if setup:
with app.app_context():
with app.test_client() as client:
data = {}
r = client.get('/setup') # Populate session with nonce
with client.session_transaction() as sess:
data = {
"ctf_name": ctf_name,
"name": name,
"email": email,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/setup', data=data)
return app
def register_user(app, name="user", email="user@ctfd.io", password="password"):
with app.app_context():
with app.test_client() as client:
r = client.get('/register')
with client.session_transaction() as sess:
data = {
"name": name,
"email": email,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/register', data=data)
def login_as_user(app, name="user", password="password"):
with app.app_context():
with app.test_client() as client:
r = client.get('/login')
with client.session_transaction() as sess:
data = {
"name": name,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/login', data=data)
return client
def gen_challenge(db, name='chal_name', description='chal_description', value=100, category='chal_category', type=0):
chal = Challenges(name, description, value, category)
db.session.add(chal)
db.session.commit()
return chal
def gen_award(db, teamid, name="award_name", value=100):
award = Awards(teamid, name, value)
db.session.add(award)
db.session.commit()
return award
def gen_tag(db, chal, tag='tag_tag'):
tag = Tags(chal, tag)
db.session.add(tag)
db.session.commit()
return tag
def gen_file():
pass
def gen_flag(db, chal, flag='flag', key_type=0):
key = Keys(chal, flag, key_type)
db.session.add(key)
db.session.commit()
return key
def gen_team(db, name='name', email='user@ctfd.io', password='password'):
team = Teams(name, email, password)
db.session.add(team)
db.session.commit()
return team
def gen_solve(db, chalid, teamid, ip='127.0.0.1', flag='rightkey'):
solve = Solves(chalid, teamid, ip, flag)
db.session.add(solve)
db.session.commit()
return solve
def gen_wrongkey(db, teamid, chalid, flag='wrongkey'):
wrongkey = WrongKeys(teamid, chalid, flag)
db.session.add(wrongkey)
db.session.commit()
return wrongkey
def gen_tracking(db, ip, team):
tracking = Tracking(ip, team)
db.session.add(tracking)
db.session.commit()
return tracking
def gen_page(db, route, html):
page = Pages(route, html)
db.session.add(page)
db.session.commit()
return page
|
semprix/CTFIgniter
|
payload/CTFd/tests/helpers.py
|
Python
|
apache-2.0
| 3,681
|
#!/usr/bin/env python
'''
API v1.0 Command and Control (C2) routes for Mission Control
'''
__author__ = 'Edna Donoughe'
from flask import jsonify
from ooiservices.app.main import api
from ooiservices.app.models import Array
import json
from ooiservices.app.main.errors import bad_request
from ooiservices.app.main.authentication import auth
from ooiservices.app.decorators import scope_required
from ooiservices.app.main.c2 import read_store
from ooiservices.app.main.c2 import _get_platform, _get_instrument, _get_instruments
# - - - - - - - - - - - - - - - - - - - - - - - -
# C2 Mission Control - array routes
# - - - - - - - - - - - - - - - - - - - - - - - -
@api.route('/c2/array/<string:array_code>/mission_display', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_array_mission_display(array_code):
#Get C2 array mission (display), return mission_display (contents of platform Mission tab)
array = Array.query.filter_by(array_code=array_code).first()
if not array:
return bad_request('unknown array (array_code: \'%s\')' % array_code)
mission_display = {}
return jsonify(mission_display=mission_display)
# - - - - - - - - - - - - - - - - - - - - - - - -
# C2 Mission Control - platform
# - - - - - - - - - - - - - - - - - - - - - - - -
@api.route('/c2/platform/<string:reference_designator>/mission/instruments_list', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_platform_mission_instruments_list(reference_designator):
# C2 get [platform] Mission tab instruments_list, return instruments [{instrument1}, {instrument2}, ...]
# where each instrument dictionary (is a row in instruments list) contains:
# {'reference_designator': reference_designator, 'instrument_deployment_id': id, 'display_name': display_name }
# Samples:
# http://localhost:4000/c2/platform/reference_designator/mission/instruments_list
# http://localhost:4000/c2/platform/reference_designator/mission/instruments_list
contents = []
platform_info = {}
platform_deployment = _get_platform(reference_designator)
if platform_deployment:
# get ordered set of instrument_deployments for platform
# Get instruments for this platform
instruments, oinstruments = _get_instruments(reference_designator)
# create list of reference_designators (instruments) and accumulate dict result (key=reference_designator) for output
for instrument_deployment in instruments:
row = {}
row['reference_designator'] = instrument_deployment['reference_designator']
if instrument_deployment['display_name']:
row['display_name'] = instrument_deployment['display_name']
else:
row['display_name'] = instrument_deployment['reference_designator']
platform_info[instrument_deployment['reference_designator']] = row
# Create list of dictionaries representing row(s) for 'data' (ordered by reference_designator)
# 'data' == rows for initial grid ('Current Status')
for instrument_reference_designator in oinstruments:
if instrument_reference_designator in platform_info:
contents.append(platform_info[instrument_reference_designator])
return jsonify(instruments=contents)
@api.route('/c2/platform/<string:reference_designator>/mission_display', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_platform_mission_display(reference_designator):
#Get C2 platform Mission tab contents, return mission_display
mission_display = {}
platform = _get_platform(reference_designator)
if platform:
mission_display = {} # todo populate display content
return jsonify(mission_display=mission_display)
# - - - - - - - - - - - - - - - - - - - - - - - -
# C2 Mission Control - instrument
# - - - - - - - - - - - - - - - - - - - - - - - -
@api.route('/c2/instrument/<string:reference_designator>/mission_display', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_instrument_mission_display(reference_designator):
#Get C2 instrument Mission tab contents, return mission_display
mission_display = {}
instrument = _get_instrument(reference_designator)
if instrument:
mission_display = {} # todo populated display content
return jsonify(mission_display=mission_display)
@api.route('/c2/platform/<string:reference_designator>/mission_selections', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_platform_mission_selections(reference_designator):
# C2 get platform Mission tab mission selections content, return mission_selections [{},{}...]
# return list of platform mission plans
mission_selections = []
platform = _get_platform(reference_designator)
if platform:
mission_selections = _get_mission_selections(reference_designator)
return jsonify(mission_selections=mission_selections)
@api.route('/c2/instrument/<string:reference_designator>/mission_selections', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_instrument_mission_selections(reference_designator):
# C2 get instrument Mission tab mission selections content, return mission_selections [{},{}...]
# return list of instrument mission plans
mission_selections = []
instrument = _get_instrument(reference_designator)
if instrument:
mission_selections = _get_mission_selections(reference_designator)
return jsonify(mission_selections=mission_selections)
@api.route('/c2/platform/<string:reference_designator>/mission_selection/<string:mission_plan_store>', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_platform_mission_selection(reference_designator, mission_plan_store):
# C2 get [platform] selected mission_plan content, return mission_plan
if not mission_plan_store:
return bad_request('mission_plan_store parameter is empty')
mission_plan = {}
platform = _get_platform(reference_designator)
if platform:
mission_plan = _get_mission_selection(mission_plan_store)
return jsonify(mission_plan=mission_plan)
@api.route('/c2/instrument/<string:reference_designator>/mission_selection/<string:mission_plan_store>', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_instrument_mission_selection(reference_designator, mission_plan_store):
# C2 get [instrument] selected mission_plan content from store (file, uframe), return mission_plan
if not mission_plan_store:
return bad_request('mission_plan_store parameter is empty')
mission_plan = {}
instrument = _get_instrument(reference_designator)
if instrument:
mission_plan = _get_mission_selection(mission_plan_store)
return jsonify(mission_plan=mission_plan)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# private helper methods
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _get_mission_selections(reference_designator):
mission_selections = []
response_text = json_get_uframe_mission_selections(reference_designator)
if response_text:
try:
mission_selections = json.loads(response_text)
except:
return bad_request('Malformed mission_selections; not in valid json format. (reference designator \'%s\')'
% reference_designator)
return mission_selections
def _get_mission_selection(mission_plan_store):
mission_plan = []
response_text = json_get_uframe_mission_selection(mission_plan_store)
if response_text:
try:
mission_plan.append(response_text)
except:
return bad_request('Malformed mission_plan data; not in valid json format.')
return mission_plan
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Private helpers for file data (./ooiuiservices/tests/c2data/*)
# Each of these will be replaced with interface to uframe or other interface (other than file)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def json_get_uframe_mission_selections(reference_designator):
try:
data = None
if reference_designator:
if len(reference_designator) == 27:
mission_type = 'instrument'
elif len(reference_designator) == 14:
mission_type = 'platform'
else:
return []
filename = "_".join([mission_type, 'missions'])
data = read_store(filename)
except:
return None
return data
def json_get_uframe_mission_selection(mission_plan_filename):
try:
data = None
if mission_plan_filename:
data = read_store(mission_plan_filename)
except:
return None
return data
|
birdage/ooi-ui-services
|
ooiservices/app/main/c2_mission.py
|
Python
|
apache-2.0
| 9,042
|
# Copyright 2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import (
function_tester,
list_ctx_and_func_name)
def copying_to_leaf(x, y, axis):
return (len(x.shape) - len(y.shape) - axis) == 0
def ref_broadcast_to(x, y, axis):
if axis < 0 or copying_to_leaf(x, y, axis):
# Copy data to leaf
return np.ones(x.shape) * y
# Copy data from specified axis
xs = len(x.shape)
ys = len(y.shape)
if xs == 2:
t = y[:, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif xs == 3:
if ys == 1:
if axis == 0:
t = y[:, np.newaxis, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif axis == 1:
t = y[np.newaxis, :, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif ys == 2:
if axis == 0:
t = y[:, :, np.newaxis]
return np.broadcast_to(t, x.shape)
elif xs == 4:
if ys == 1:
if axis == 0:
t = y[:, np.newaxis, np.newaxis, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif axis == 1:
t = y[np.newaxis, :, np.newaxis, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif axis == 2:
t = y[np.newaxis, np.newaxis, :, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif ys == 2:
if axis == 0:
t = y[:, :, np.newaxis, np.newaxis]
return np.broadcast_to(t, x.shape)
elif axis == 1:
t = y[np.newaxis, :, :, np.newaxis]
return np.broadcast_to(t, x.shape)
elif ys == 3:
if axis == 0:
t = y[:, :, :, np.newaxis]
return np.broadcast_to(t, x.shape)
PARAMS = [
((2, 3), (2), 0),
((2, 3), (3), 1),
((2, 3, 4), (2), 0),
((2, 3, 4), (3), 1),
((2, 3, 4), (4), 2),
((2, 3, 4), (2, 3), 0),
((2, 3, 4), (3, 4), 1),
((2, 3, 4, 5), (2), 0),
((2, 3, 4, 5), (3), 1),
((2, 3, 4, 5), (4), 2),
((2, 3, 4, 5), (5), 3),
((2, 3, 4, 5), (2, 3), 0),
((2, 3, 4, 5), (3, 4), 1),
((2, 3, 4, 5), (4, 5), 2),
((2, 3, 4, 5), (2, 3, 4), 0),
((2, 3, 4, 5), (3, 4, 5), 1),
((2, 3, 4, 5), (5), -1),
((2, 3, 4, 5), (4, 5), -1),
((2, 3, 4, 5), (3, 4, 5), -1),
((2, 3, 4, 5), (2, 3, 4, 5), -1),
((2, 3, 4, 5), (2, 3, 4, 5), -2)
]
@pytest.mark.parametrize("seed", [314])
@pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['broadcast_to']))
@pytest.mark.parametrize("xs, ys, axis", PARAMS)
def test_broadcast_to_forward(xs, ys, axis, seed, fname, ctx, func_name):
rng = np.random.RandomState(seed)
ref_func = eval('ref_' + fname)
func = getattr(F, fname)
inputs = [rng.random_sample(xs), rng.random_sample(ys)]
function_tester(rng, func, ref_func, inputs, [axis],
backward=[False, False],
ctx=ctx, func_name=func_name)
|
sony/nnabla
|
python/test/function/test_broadcast_to.py
|
Python
|
apache-2.0
| 3,832
|
"""
Satellite PostgreSQL database queries
=====================================
This module contains the following parsers:
SatelliteAdminSettings - command ``psql -d foreman -c 'select name, value, "default" from settings where name in (\'destroy_vm_on_host_delete\', \'unregister_delete_host\') --csv'``
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SatelliteComputeResources - command ``psql -d foreman -c 'select name, type from compute_resources' --csv``
-----------------------------------------------------------------------------------------------------------
SatelliteCoreTaskReservedResourceCount - command ``psql -d pulpcore -c 'select count(*) from core_taskreservedresource' --csv``
-------------------------------------------------------------------------------------------------------------------------------
SatelliteQualifiedCapsules - command ``psql -d foreman -c "select name from smart_proxies where download_policy = 'background'" --csv``
---------------------------------------------------------------------------------------------------------------------------------------
SatelliteQualifiedKatelloRepos - command ``psql -d foreman -c "select id, name, url, download_policy from katello_root_repositories where download_policy = 'background' or url is NULL" --csv``
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SatelliteSCAStatus - command ``psql -d candlepin -c "select displayname, content_access_mode from cp_owner" --csv``
-------------------------------------------------------------------------------------------------------------------
"""
import os
import yaml
from csv import DictReader
from insights import parser, CommandParser
from insights.specs import Specs
from insights.parsers import SkipException, ParseException
from insights.parsers import keyword_search, calc_offset
from insights.util import deprecated
class SatellitePostgreSQLQuery(CommandParser, list):
"""
Parent class of satellite postgresql table queries. It can not be used
directly, A child class with overriding columns attribute is required.
It saves the rows data into a list. Each row is saved into a dict.
The key is the column name, the value is the value of the column.
Resultant data structure::
[
{
'name': 'abc',
'url': '',
'value': 'test'
},
{
'name': 'def',
'url': 'http://xx.com',
'value': ''
}
]
Sample Output::
name,url,value
abc,,test
def,http://xx.com,
Raises:
SkipException: when there isn't data in the table
ParseException: when the output isn't in good csv format.
NotImplementedError: when the subclass doesn't override the columns attribute.
"""
# child class should override the columns attribute with its own column names
columns = []
def parse_content(self, content):
if not self.columns:
raise NotImplementedError("Please override the columns attribute.")
start_index = calc_offset(content, self.columns, require_all=True)
valid_lines = content[start_index:]
reader = DictReader(os.linesep.join(valid_lines).splitlines(True))
for row in reader:
self.append(row)
if not self:
raise SkipException("There is no data in the table.")
def search(self, **kwargs):
"""
Get the rows by searching the table with kwargs.
This uses the :py:func:`insights.parsers.keyword_search` function for
searching; see its documentation for usage details. If no search
parameters are given, no rows are returned.
It simplify the value of the column according to actual usage.
Returns:
list: A list of dictionaries of rows that match the given
search criteria.
"""
return keyword_search(self, **kwargs)
@parser(Specs.satellite_settings)
class SatelliteAdminSettings(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d foreman -c '"select name, value, "default" from settings where name in ('destroy_vm_on_host_delete', 'unregister_delete_host') --csv"``.
Sample output::
name,value,default
unregister_delete_host,"--- true
...","--- false
..."
destroy_vm_on_host_delete,,"--- true
..."
Examples:
>>> type(table)
<class 'insights.parsers.satellite_postgresql_query.SatelliteAdminSettings'>
>>> table.get_setting('unregister_delete_host')
True
>>> table.get_setting('destroy_vm_on_host_delete')
True
"""
columns = ['name', 'value', 'default']
def _parse_yaml(self, value):
if value:
try:
return yaml.safe_load(value)
except Exception:
raise ParseException("Bad format value: %s" % value)
return value
def parse_content(self, content):
"""
The "default" and "value" columns must be selected, or else the
settings value can't be determined.
The "default" and "value" column are in yaml format, it is transfer to
python object.
Raises:
SkipException: when value or default column isn't found in the
table.
ParseException: when the value or default in bad yaml format.
"""
super(SatelliteAdminSettings, self).parse_content(content)
for row in self:
row['default'] = self._parse_yaml(row['default'])
row['value'] = self._parse_yaml(row['value'])
def get_setting(self, setting_name):
"""
Get the actual value of setting_name.
If the value column isn't empty, the value of the setting_name is the
value column, or else it's the default column.
Args:
setting_name (str): the value of name column which is searched in the table.
Returns:
It depends on the setting, maybe boolean, string, int or a list.
None if the setting_name doesn't exist in the table.
"""
rows = self.search(name=setting_name)
if rows:
value = rows[0].get('value')
return rows[0].get('default') if value == '' else value
@parser(Specs.satellite_compute_resources)
class SatelliteComputeResources(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d foreman -c 'select name, type from compute_resources' --csv``.
Sample output::
name,type
test_compute_resource1,Foreman::Model::Libvirt
test_compute_resource2,Foreman::Model::RHV
Examples:
>>> type(resources_table)
<class 'insights.parsers.satellite_postgresql_query.SatelliteComputeResources'>
>>> rows=resources_table.search(type='Foreman::Model::Libvirt')
>>> len(rows)
1
>>> rows[0]['name']
'test_compute_resource1'
"""
columns = ['name', 'type']
@parser(Specs.satellite_core_taskreservedresource_count)
class SatelliteCoreTaskReservedResourceCount(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d pulpcore -c 'select count(*) from core_taskreservedresource' --csv``.
Sample output::
count
0
Examples:
>>> type(tasks)
<class 'insights.parsers.satellite_postgresql_query.SatelliteCoreTaskReservedResourceCount'>
>>> tasks[0]['count']
'0'
"""
columns = ['count']
@parser(Specs.satellite_katello_empty_url_repositories)
class SatelliteKatelloEmptyURLRepositories(SatellitePostgreSQLQuery):
"""
.. warning::
This parser is deprecated, please use
:py:class:`insights.parsers.satellite_postgresql_query.SatelliteQualifiedKatelloRepos` instead.
Parse the output of the command ``psql -d foreman -c 'select id, name from katello_root_repositories where url is NULL;' --csv``.
Sample output::
id,name
54,testa
55,testb
Examples:
>>> type(katello_root_repositories)
<class 'insights.parsers.satellite_postgresql_query.SatelliteKatelloEmptyURLRepositories'>
>>> len(katello_root_repositories)
2
>>> katello_root_repositories[0]['name']
'testa'
"""
columns = ['id', 'name']
def __init__(self, *args, **kwargs):
deprecated(SatelliteKatelloEmptyURLRepositories, 'Please use the SatelliteQualifiedKatelloRepos parser in the current module.')
super(SatelliteKatelloEmptyURLRepositories, self).__init__(*args, **kwargs)
@parser(Specs.satellite_qualified_katello_repos)
class SatelliteQualifiedKatelloRepos(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d foreman -c "select id, name, url, download_policy from katello_root_repositories where download_policy = 'background' or url is NULL" --csv``.
Sample output::
id,name,url,download_policy
2,Red Hat Satellite Tools 6.8 for RHEL 7 Server RPMs x86_64,,on_demand
3,Red Hat Enterprise Linux 8 for x86_64 - AppStream RPMs 8,https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os,background
4,Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server,https://cdn.redhat.com/content/dist/rhel/server/7/7Server/x86_64/os,background
Examples:
>>> type(repos)
<class 'insights.parsers.satellite_postgresql_query.SatelliteQualifiedKatelloRepos'>
>>> len(repos)
3
>>> repos[0]['name']
'Red Hat Satellite Tools 6.8 for RHEL 7 Server RPMs x86_64'
"""
columns = ['id', 'name', 'url', 'download_policy']
@parser(Specs.satellite_qualified_capsules)
class SatelliteQualifiedCapsules(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d foreman -c "select name from smart_proxies where download_policy = 'background'" --csv``.
Sample output::
name
capsule1.test.com
capsule2.test.com
Examples:
>>> type(capsules)
<class 'insights.parsers.satellite_postgresql_query.SatelliteQualifiedCapsules'>
>>> len(capsules)
2
>>> capsules[0]['name']
'capsule1.test.com'
"""
columns = ['name']
@parser(Specs.satellite_sca_status)
class SatelliteSCAStatus(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d candlepin -c "select displayname, content_access_mode from cp_owner" --csv``.
Sample output::
displayname,content_access_mode
Default Organization,entitlement
Orgq,org_environment
Examples:
>>> type(sat_sca_info)
<class 'insights.parsers.satellite_postgresql_query.SatelliteSCAStatus'>
>>> sat_sca_info.sca_enabled
True
"""
columns = ['displayname', 'content_access_mode']
@property
def sca_enabled(self):
"""
If the value of content_access_mode is "org_environment", it means the SCA is enabled for this organization.
Return True if any organization has SCA enabled on the satellite else False
"""
return bool(len(self.search(content_access_mode='org_environment')))
|
RedHatInsights/insights-core
|
insights/parsers/satellite_postgresql_query.py
|
Python
|
apache-2.0
| 11,542
|
import datetime
from django.contrib.sessions.models import Session
from celery import task
from celery.utils.log import get_task_logger
from brainindex.index import create_thought, update_thought, delete_thought, CREATE, UPDATE, DELETE
logger = get_task_logger(__name__)
@task(ignore_result = True)
def clear_expired_sessions():
moment = datetime.datetime.now()
Session.objects.filter(expire_date__lte = moment).delete()
@task(ignore_result = True)
def index_operation(thought, op_type):
if op_type == CREATE:
create_thought(thought)
elif op_type == UPDATE:
update_thought(thought)
elif op_type == DELETE:
delete_thought(thought)
else:
logger.warn('Unsupported index operation.')
|
michaupl/braincloud
|
brainblog/tasks.py
|
Python
|
apache-2.0
| 745
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: mango
@contact: w4n9@sina.com
@create: 16/7/1
hail hydra!
"""
__author__ = "mango"
__version__ = "0.1"
from multiprocessing import Process, Queue
import uuid
def process(q):
"""
test process
"""
content = str(uuid.uuid4())
for i in range(4):
q.put({'content': content})
def main():
"""
main process
"""
q = Queue()
plist = []
for i in range(4):
proc = Process(target=process, args=(q,))
plist.append(proc)
for proc in plist:
proc.start()
for proc in plist:
proc.join()
while True:
if q.empty():
print "empty"
break
else:
print q.get()
|
w4n9H/PythonSkillTree
|
Distributed/ProcessThread/LocalProcessQueue.py
|
Python
|
apache-2.0
| 752
|
#! /usr/bin/env python
# Python script for the Interoute Virtual Data Centre API:
# Name: dcg_member_listing.py
# Purpose: List the properties and membership of Direct Connect Groups
# Requires: class VDCApiCall in the file vdc_api_call.py
# See the repo: https://github.com/Interoute/API-fun-and-education
#
# You can pass options via the command line: type 'python dcg_member_listing.py -h' for usage information
#
# The VDC account used must be able to access the VDC regions in the argument 'regionlist'.
# Use the regionlist argument to change the regions for a limited account (for example, a 14-day trial account is excluded from Asia region)
# Example of passing region names as arguments (do not use braces, quotes or commas): 'python dcg_member_listing.py --regionlist Europe USA -n'
#
# Copyright (C) Interoute Communications Limited, 2017
from __future__ import print_function
import vdc_api_call as vdc
import getpass
import json
import os
import string
import datetime
import argparse
import re
import sys
def print_network_members(vmList,networkid,isProvisioned,prefixChars):
networkmembers = []
for vm in vmList:
for i in range(len(vm['nic'])):
if networkid == vm['nic'][i]['networkid']:
networkmembers.append([int(vm['nic'][i]['ipaddress'].split('.')[-1]),vm['nic'][i]['ipaddress'],vm['name'],vm['id']])
break # Can break out of this loop as soon as the network id is found for a NIC
if len(networkmembers)>0:
networkmembers.sort() # VMs will be sorted by the last segment of their IP address (=first element of each members list)
for i in range(len(networkmembers)):
if i==len(networkmembers)-1: #this is last VM in the network
print(prefixChars + unichr(0x2514)+" %s: '%s'" % (networkmembers[i][1],networkmembers[i][2]))
else:
print(prefixChars + unichr(0x251C)+" %s: '%s'" % (networkmembers[i][1],networkmembers[i][2]))
else:
if isProvisioned:
print(prefixChars + "*(NO MEMBERS)")
else:
print(prefixChars + "*(NOT PROVISIONED)")
if __name__ == '__main__':
# STEP 1: Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default=os.path.join(os.path.expanduser('~'), '.vdcapi'),
help="path/name of the config file to be used for the API URL and API keys")
parser.add_argument("-d", "--dcgname", help="Show information only for the specified DCG name")
parser.add_argument("-b", "--dcgid", help="Show information only for the specified DCG ID")
parser.add_argument("-n","--netmem",action='store_true',
help="show the VM members of the Private Direct Connect networks")
parser.add_argument("-r","--regionlist",default=['Europe', 'USA', 'Asia'],nargs='+',
help="specify the list of regions to be checked")
# Note : The VDC account used must be able to access all of the VDC regions in the argument 'regionlist'.
# Use this argument to change the list for a limited account (for example, a 14-day trial account is excluded from Asia region)
config_file = parser.parse_args().config
dcgid_requested = parser.parse_args().dcgid
dcgname_requested = parser.parse_args().dcgname
show_netmem = parser.parse_args().netmem
vdcRegions = parser.parse_args().regionlist
# STEP 2: If config file is found, read its content,
# else query user for the URL, API key, Secret key
if os.path.isfile(config_file):
with open(config_file) as fh:
data = fh.read()
config = json.loads(data)
api_url = config['api_url']
apiKey = config['api_key']
secret = config['api_secret']
else:
print('API url (e.g. http://10.220.18.115:8080/client/api):', end='')
api_url = raw_input()
print('API key:', end='')
apiKey = raw_input()
secret = getpass.getpass(prompt='API secret:')
# STEP 3: Create the api access object
api = vdc.VDCApiCall(api_url, apiKey, secret)
# STEP 4: API calls to get the information about DCGs and networks
if dcgid_requested:
dcgList = api.listDirectConnectGroups({'id':dcgid_requested})
if dcgList['count'] == 0:
print("ERROR: The dcgid input did not match a DCG in this VDC account.")
sys.exit("FATAL: Program terminating")
elif dcgname_requested:
dcgList = api.listDirectConnectGroups({'name':dcgname_requested})
if dcgList['count'] == 0:
print("ERROR: The dcgname input did not match a DCG in this VDC account.")
sys.exit("FATAL: Program terminating")
else:
dcgList = api.listDirectConnectGroups({})
networksLists = {}
if show_netmem:
vmLists = {}
for r in vdcRegions:
nlistPDC = api.listNetworks({'region': r, 'subtype': 'privatedirectconnect'})
nlistPDCEgress = api.listNetworks({'region': r, 'subtype': 'privatedirectconnectwithgatewayservicesegress'})
if nlistPDC['count'] == 0 and nlistPDCEgress['count'] == 0: # there are no PrivateDirectConnect networks in this region
networksLists[r] = {'count':0, 'network':[]}
else:
networksLists[r] = {'count': nlistPDC['count'] + nlistPDCEgress['count'], 'network': nlistPDC['network'] + nlistPDCEgress['network']}
if show_netmem:
zonesResponse = api.listZones({'region':r})
zonesList = [z['name'] for z in zonesResponse['zone']]
vmRawList = api.listVirtualMachines({'region':r})
for z in zonesList:
try:
vmLists[z] = [v for v in vmRawList['virtualmachine'] if v['zonename']==z]
except KeyError: # there are no VMs in this region so lookup in the dict will fail
vmLists[z] = []
# STEP 5: Process the information from the API calls
try:
checkTime = datetime.datetime.utcnow() # get the current time (UTC = GMT)
print("\nDirect Connect Group listing for the account '%s' checked at %s:"
% (api.getApiLimit({})['apilimit']['account'], checkTime.strftime("%Y-%m-%d %H:%M:%S UTC")))
if dcgid_requested:
print("\n** Results are shown only for dcgid=%s" % dcgid_requested)
elif dcgname_requested:
print("\n** Results are shown only for dcgname=\'%s\'" % dcgname_requested)
if len(vdcRegions)==3:
print("\n** All VDC regions are being scanned for Private Direct Connect networks")
else:
print("\n** Only these regions will be scanned and their Private Direct Connect networks shown: %s" % (vdcRegions))
print("\n** Networks which have 'isprovisioned' set to False are labelled with '/NotProv/' and are not functional")
print("** Output may not be correct for DCGs and networks that were not created with NetworkAPI functions because\n** they may be missing the information in the listNetworks call which identifies the DCG membership of the network.")
print("** (+E) denotes networks with gateway services for Internet egress\n")
for d in dcgList['directconnectgroups']:
print(" "+unichr(0x2015)+' \'%s\' (dcgid: %s)' % (d['name'], d['id']))
members = []
for r in vdcRegions:
if networksLists[r]['network'] != []:
for n in networksLists[r]['network']:
if n['dcgfriendlyname'] == d['name']:
if 'isprovisioned' not in n:
n['isprovisioned'] = 'Unknown'
members.append([n['cidr'],n['name'],n['zonename'],r,n['id'],n['isprovisioned'],n['displaytext'],n['subtype']])
if len(members)>0:
members = sorted(members, key=lambda x: x[2]) #sort by zonename
members = sorted(members, key=lambda x: x[3]) #sort by region
for i in range(len(members)):
if members[i][7] == 'privatedirectconnectwithgatewayservicesegress':
egressLabel = " (+E)"
else:
egressLabel = ""
if members[i][5] == True:
provisionedLabel = ""
elif members[i][5] == False:
provisionedLabel = "/NotProv/ "
elif members[i][5] == 'Unknown':
provisionedLabel = "/ProvUnknown/ "
if i==len(members)-1: #if this is last item in list
if members[i][1] != members[i][6]: #if network 'name' and 'displaytext' are not the same
print(" "+unichr(0x2514)+" %s%s: %s'%s'|'%s' (%s, %s)" % (members[i][0],egressLabel,provisionedLabel,members[i][1],members[i][6],members[i][2],members[i][3]))
else:
print(" "+unichr(0x2514)+" %s%s: %s'%s' (%s, %s)" % (members[i][0],egressLabel,provisionedLabel,members[i][1],members[i][2],members[i][3]))
if show_netmem:
if vmLists[members[i][2]] != {}:
print_network_members(vmLists[members[i][2]],members[i][4],members[i][5]," ")
else:
print(" " + "*(NO MEMBERS)")
else:
if members[i][1] != members[i][6]: #if network 'name' and 'displaytext' are not the same
print(" "+unichr(0x251C)+" %s%s: %s'%s'|'%s' (%s, %s)" % (members[i][0],egressLabel,provisionedLabel,members[i][1],members[i][6],members[i][2],members[i][3]))
else:
print(" "+unichr(0x251C)+" %s%s: %s'%s' (%s, %s)" % (members[i][0],egressLabel,provisionedLabel,members[i][1],members[i][2],members[i][3]))
if show_netmem:
if vmLists[members[i][2]] != {}:
print_network_members(vmLists[members[i][2]],members[i][4],members[i][5]," "+unichr(0x2502)+" ")
else:
print(" " + "*(NO MEMBERS)")
print(" ")
else:
print(" *(NO NETWORKS)")
print(" ")
except KeyError:
print("Exception: KeyError")
##print('Nothing to do: No Direct Connect Groups found')
|
Interoute/API-fun-and-education
|
dcg_member_listing.py
|
Python
|
apache-2.0
| 10,474
|
# !/user/bin/python
# -*- coding: utf-8 -*-
#- Author : (DEK) Devendra Kavthekar
# Write a program to generate and print another tuple whose values are
# even numbers in the given tuple (1,2,3,4,5,6,7,8,9,10).
# Hints:
# Use "for" to iterate the tuple
# Use tuple() to generate a tuple from a list.
def main():
val = tuple([value for value in range(1, 11)])
print val
list = []
for number in val:
if number % 2 == 0:
list.append(str(number))
print ','.join(list)
if __name__ == '__main__':
main()
|
dek-odoo/python-samples
|
python exercises/dek_program043.py
|
Python
|
apache-2.0
| 546
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class CreateFolderResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'CreateFolderResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None # CreateFolderResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # long
|
liosha2007/temporary-groupdocs-python-sdk
|
groupdocs/models/CreateFolderResponse.py
|
Python
|
apache-2.0
| 1,154
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates the widget under the Sessions tab within the ``%manage_dataproc widget``"""
import json
import ipyvuetify as v
import sparkmagic.utils.configuration as conf
from sparkmagic.utils.constants import LANG_SCALA, LANG_PYTHON
from sparkmagic.controllerwidget.abstractmenuwidget import AbstractMenuWidget
from googledataprocauthenticator.utils.utils import get_session_id_to_name
from googledataprocauthenticator.utils.constants import WIDGET_WIDTH
class CreateSessionWidget(AbstractMenuWidget):
def __init__(self, spark_controller, ipywidget_factory, ipython_display,
endpoints, refresh_method, state, db):
super(CreateSessionWidget, self).__init__(spark_controller, ipywidget_factory,
ipython_display, True)
self.endpoints = endpoints
self.refresh_method = refresh_method
self.properties = json.dumps(conf.session_configs())
self.state = state
self.db = db
self.delete_pressed = False
backicon = v.Icon(children=['mdi-arrow-left'])
backicon.on_event('click', self._on_back_click)
back_toolbar = v.Toolbar(
elevation="0",
children=[
v.ToolbarItems(children=[backicon]),
v.ToolbarTitle(children=['Create new session']),
v.Spacer()
],
app=True, # If true, the other widgets float under on scroll
)
self.name_textfield = v.TextField(
class_='ma-2',
placeholder='Enter session name',
label='Name',
dense=True,
color='primary',
outlined=True,
v_model=None,
)
self.endpoints_dropdown_widget = v.Select(
class_='ma-2',
placeholder='Select an endpoint',
label='Endpoint',
dense=True,
color='primary',
persistent_hint=True,
hide_selected=True,
outlined=True,
items=list(self.endpoints.keys()),
auto_select_first=True,
v_model=None,
)
self.language_dropdown = v.Select(
class_='ma-2',
label='Language',
placeholder='Select a language',
dense=True,
color='primary',
persistent_hint=True,
hide_selected=False,
outlined=True,
items=[LANG_SCALA, LANG_PYTHON],
auto_select_first=True,
v_model=None,
)
self.properties_textbox = v.TextField(
class_='ma-2',
label='Properties',
dense=True,
color='primary',
outlined=True,
v_model=json.dumps(conf.session_configs()),
)
self.create_session = v.Btn(class_='ma-2', color='primary', children=['Create'])
self.create_session.on_event('click', self._on_create_click)
self.cancel = v.Btn(class_='ma-2', color='primary', children=['Cancel'])
self.cancel.on_event('click', self._on_cancel_click)
self.create_session_container = v.Container(
style_=f'width: {WIDGET_WIDTH};', class_='ma-2',
children=[
back_toolbar,
v.Row(class_='ma-2', children=[
v.Col(children=[self.name_textfield])
]),
v.Row(class_='ma-2', children=[
v.Col(children=[self.endpoints_dropdown_widget])
]),
v.Row(class_='ma-2', children=[
v.Col(children=[self.language_dropdown])
]),
v.Row(class_='ma-2', children=[
v.Col(children=[self.properties_textbox])
]),
v.Row(class_='ma-2', children=[self.create_session, self.cancel]),
]
)
no_back_toolbar = v.Toolbar(
elevation="0",
children=[
v.ToolbarTitle(
titleMarginStart='12dp',
contentInsetStartWithNavigation="56dp",
children=['Sessions']
),
v.Spacer()
],
app=True, # If true, the other widgets float under on scroll
)
new_session = v.Btn(class_='ma-2', color='primary', children=['New Session'])
new_session.on_event('click', self._on_new_session_click)
self.toolbar = v.Row(children=[no_back_toolbar, new_session])
session_table_values = self._generate_session_values()
self.delete_icon = v.Icon(children=['mdi-delete'])
self.delete_icon.on_event('click', self._on_delete_icon_pressed)
self.session_table = v.DataTable(
style_=f'width: {WIDGET_WIDTH};', no_data_text='No sessions', hide_default_footer=True,
disable_pagination=True, item_key='name', headers=[
{'text': 'Session', 'align': 'start', 'sortable': False, 'value': 'name'},
{'text': 'ID', 'sortable': False, 'value': 'id'},
{'text': 'Status', 'sortable': False, 'value': 'status'},
{'text': 'Kind', 'sortable': False, 'value': 'kind'},
{'text': '', 'sortable': False, 'value': 'actions'},
],
items=session_table_values, dense=False, fixedHeader=False, v_slots=[
{'name': 'item.actions', 'children' : [self.delete_icon]},
{'name': 'no-data', 'children': ['No sessions']}
]
)
self.session_table.on_event('click:row', self._remove_row_from_table)
self.toolbar_with_table = v.Container(
style_=f'width: {WIDGET_WIDTH};', class_='mx-auto', children=[
v.Row(class_='mx-auto', children=[self.toolbar]),
v.Row(class_='mx-auto', children=[self.session_table])
]
)
self.children = [self.create_session_container, self.toolbar_with_table]
for child in self.children:
child.parent_widget = self
self._update_view()
def run(self):
pass
def _on_create_click(self, _widget, _event, _data):
try:
properties_json = self.properties_textbox.v_model
if properties_json.strip() != "":
conf.override(
conf.session_configs.__name__,
json.loads(self.properties_textbox.v_model)
)
except ValueError as caught_exc:
self.ipython_display.send_error(
"Session properties must be a valid JSON string. Error:\n{}".format(caught_exc)
)
return
endpoint = self.endpoints[self.endpoints_dropdown_widget.v_model]
language = self.language_dropdown.v_model
alias = self.name_textfield.v_model
skip = False
properties = conf.get_session_properties(language)
try:
self.spark_controller.add_session(alias, endpoint, skip, properties)
# session_id_to_name dict is necessary to restore session name across notebook sessions
# since the livy server does not store the name.
session_id_to_name = get_session_id_to_name(self.db, self.ipython_display)
# add session id -> name to session_id_to_name dict
session_id_to_name[self.spark_controller.session_manager.get_session(alias).id] = alias
self.db['autorestore/' + 'session_id_to_name'] = session_id_to_name
except ValueError as caught_exc:
self.ipython_display.send_error("""Could not add session with
name:
{}
properties:
{}
due to error: '{}'""".format(alias, properties, caught_exc))
return
self.refresh_method(0)
def _on_delete_icon_pressed(self, _widget, _event, _data):
self.delete_pressed = True
def _remove_row_from_table(self, _table, _event, row):
if self.delete_pressed:
session_name = row.get('name')
session_id = row.get('id')
try:
self.spark_controller.delete_session_by_name(session_name)
session_id_to_name = get_session_id_to_name(self.db, self.ipython_display)
session_id_to_name.pop(session_id)
self.db['autorestore/' + 'session_id_to_name'] = session_id_to_name
self.refresh_method(0)
except Exception as caught_exc:
self.ipython_display.send_error("Failed delete session due to the following "\
f"error: {str(caught_exc)}")
def _on_cancel_click(self, _widget, _event, _data):
self.state = 'list'
self._update_view()
def _on_new_session_click(self, _widget, _event, _data):
self.state = 'add'
self._update_view()
def _on_back_click(self, _widget, _event, _data):
self.state = 'list'
self._update_view()
def _generate_session_values(self):
session_table_values = []
for name, session in self.spark_controller.get_managed_clients().items():
session_table_values.append({'name':name, 'id':session.id, \
'status':session.status, 'kind':session.kind})
return session_table_values
def _update_view(self):
if self.state == 'add':
self.toolbar_with_table.layout.display = 'none'
self.create_session_container.layout.display = 'flex'
elif self.state == 'list':
self.create_session_container.layout.display = 'none'
self.toolbar_with_table.layout.display = 'flex'
|
GoogleCloudDataproc/dataprocmagic
|
googledataprocauthenticator/controllerwidget/createsessionwidget.py
|
Python
|
apache-2.0
| 10,212
|
import pytest
import numpy as np
import pandas as pd
from unittest import mock
import os
import yaml
import mlflow.statsmodels
import mlflow.utils
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.models.utils import _read_example
from mlflow.models import Model, infer_signature
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from tests.helper_functions import (
pyfunc_serve_and_score_model,
_compare_conda_env_requirements,
_assert_pip_requirements,
_is_available_on_pypi,
_compare_logged_code_paths,
)
from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import
from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import
from tests.statsmodels.model_fixtures import (
ols_model,
arma_model,
glsar_model,
gee_model,
glm_model,
gls_model,
recursivels_model,
rolling_ols_model,
rolling_wls_model,
wls_model,
)
EXTRA_PYFUNC_SERVING_TEST_ARGS = [] if _is_available_on_pypi("statsmodels") else ["--no-conda"]
# The code in this file has been adapted from the test cases of the lightgbm flavor.
def _get_dates_from_df(df):
start_date = df["start"][0]
end_date = df["end"][0]
return start_date, end_date
@pytest.fixture
def model_path(tmpdir, subdir="model"):
return os.path.join(str(tmpdir), subdir)
@pytest.fixture
def statsmodels_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["pytest", "statsmodels"])
return conda_env
def _test_models_list(tmpdir, func_to_apply):
from statsmodels.tsa.base.tsa_model import TimeSeriesModel
fixtures = [
ols_model,
arma_model,
glsar_model,
gee_model,
glm_model,
gls_model,
recursivels_model,
rolling_ols_model,
rolling_wls_model,
wls_model,
]
for algorithm in fixtures:
name = algorithm.__name__
path = os.path.join(tmpdir, name)
model = algorithm()
if isinstance(model.alg, TimeSeriesModel):
start_date, end_date = _get_dates_from_df(model.inference_dataframe)
func_to_apply(model, path, start_date, end_date)
else:
func_to_apply(model, path, model.inference_dataframe)
def _test_model_save_load(statsmodels_model, model_path, *predict_args):
mlflow.statsmodels.save_model(statsmodels_model=statsmodels_model.model, path=model_path)
reloaded_model = mlflow.statsmodels.load_model(model_uri=model_path)
reloaded_pyfunc = pyfunc.load_model(model_uri=model_path)
if hasattr(statsmodels_model.model, "predict"):
np.testing.assert_array_almost_equal(
statsmodels_model.model.predict(*predict_args),
reloaded_model.predict(*predict_args),
)
np.testing.assert_array_almost_equal(
reloaded_model.predict(*predict_args),
reloaded_pyfunc.predict(statsmodels_model.inference_dataframe),
)
def _test_model_log(statsmodels_model, model_path, *predict_args):
model = statsmodels_model.model
with TempDir(chdr=True, remove_on_exit=True) as tmp:
try:
artifact_path = "model"
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["statsmodels"])
model_info = mlflow.statsmodels.log_model(
statsmodels_model=model, artifact_path=artifact_path, conda_env=conda_env
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
assert model_info.model_uri == model_uri
reloaded_model = mlflow.statsmodels.load_model(model_uri=model_uri)
if hasattr(model, "predict"):
np.testing.assert_array_almost_equal(
model.predict(*predict_args), reloaded_model.predict(*predict_args)
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
model_config = Model.load(os.path.join(model_path, "MLmodel"))
assert pyfunc.FLAVOR_NAME in model_config.flavors
assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]
assert os.path.exists(os.path.join(model_path, env_path))
finally:
mlflow.end_run()
@pytest.mark.large
def test_models_save_load(tmpdir):
_test_models_list(tmpdir, _test_model_save_load)
@pytest.mark.large
def test_models_log(tmpdir):
_test_models_list(tmpdir, _test_model_log)
def test_signature_and_examples_are_saved_correctly():
model, _, X = ols_model()
signature_ = infer_signature(X)
example_ = X[0:3, :]
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.statsmodels.save_model(
model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
assert np.array_equal(_read_example(mlflow_model, path), example)
def test_model_load_from_remote_uri_succeeds(model_path, mock_s3_bucket):
model, _, inference_dataframe = arma_model()
mlflow.statsmodels.save_model(statsmodels_model=model, path=model_path)
artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket)
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = artifact_root + "/" + artifact_path
reloaded_model = mlflow.statsmodels.load_model(model_uri=model_uri)
start_date, end_date = _get_dates_from_df(inference_dataframe)
np.testing.assert_array_almost_equal(
model.predict(start=start_date, end=end_date),
reloaded_model.predict(start=start_date, end=end_date),
)
def test_log_model_calls_register_model():
# Adapted from lightgbm tests
ols = ols_model()
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch, TempDir(chdr=True, remove_on_exit=True) as tmp:
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["statsmodels"])
mlflow.statsmodels.log_model(
statsmodels_model=ols.model,
artifact_path=artifact_path,
conda_env=conda_env,
registered_model_name="OLSModel1",
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
mlflow.register_model.assert_called_once_with(
model_uri, "OLSModel1", await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS
)
def test_log_model_no_registered_model_name():
ols = ols_model()
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch, TempDir(chdr=True, remove_on_exit=True) as tmp:
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["statsmodels"])
mlflow.statsmodels.log_model(
statsmodels_model=ols.model, artifact_path=artifact_path, conda_env=conda_env
)
mlflow.register_model.assert_not_called()
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
model_path, statsmodels_custom_env
):
ols = ols_model()
mlflow.statsmodels.save_model(
statsmodels_model=ols.model, path=model_path, conda_env=statsmodels_custom_env
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != statsmodels_custom_env
with open(statsmodels_custom_env, "r") as f:
statsmodels_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == statsmodels_custom_env_parsed
def test_model_save_persists_requirements_in_mlflow_model_directory(
model_path, statsmodels_custom_env
):
ols = ols_model()
mlflow.statsmodels.save_model(
statsmodels_model=ols.model, path=model_path, conda_env=statsmodels_custom_env
)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(statsmodels_custom_env, saved_pip_req_path)
@pytest.mark.large
def test_log_model_with_pip_requirements(tmpdir):
ols = ols_model()
# Path to a requirements file
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
with mlflow.start_run():
mlflow.statsmodels.log_model(ols.model, "model", pip_requirements=req_file.strpath)
_assert_pip_requirements(mlflow.get_artifact_uri("model"), ["mlflow", "a"], strict=True)
# List of requirements
with mlflow.start_run():
mlflow.statsmodels.log_model(
ols.model, "model", pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), ["mlflow", "a", "b"], strict=True
)
# Constraints file
with mlflow.start_run():
mlflow.statsmodels.log_model(
ols.model, "model", pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"),
["mlflow", "b", "-c constraints.txt"],
["a"],
strict=True,
)
@pytest.mark.large
def test_log_model_with_extra_pip_requirements(tmpdir):
ols = ols_model()
default_reqs = mlflow.statsmodels.get_default_pip_requirements()
# Path to a requirements file
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
with mlflow.start_run():
mlflow.statsmodels.log_model(ols.model, "model", extra_pip_requirements=req_file.strpath)
_assert_pip_requirements(mlflow.get_artifact_uri("model"), ["mlflow", *default_reqs, "a"])
# List of requirements
with mlflow.start_run():
mlflow.statsmodels.log_model(
ols.model, "model", extra_pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), ["mlflow", *default_reqs, "a", "b"]
)
# Constraints file
with mlflow.start_run():
mlflow.statsmodels.log_model(
ols.model, "model", extra_pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"),
["mlflow", *default_reqs, "b", "-c constraints.txt"],
["a"],
)
def test_model_save_accepts_conda_env_as_dict(model_path):
ols = ols_model()
conda_env = dict(mlflow.statsmodels.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.statsmodels.save_model(statsmodels_model=ols.model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(statsmodels_custom_env):
ols = ols_model()
artifact_path = "model"
with mlflow.start_run():
mlflow.statsmodels.log_model(
statsmodels_model=ols.model,
artifact_path=artifact_path,
conda_env=statsmodels_custom_env,
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != statsmodels_custom_env
with open(statsmodels_custom_env, "r") as f:
statsmodels_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == statsmodels_custom_env_parsed
def test_model_log_persists_requirements_in_mlflow_model_directory(statsmodels_custom_env):
ols = ols_model()
artifact_path = "model"
with mlflow.start_run():
mlflow.statsmodels.log_model(
statsmodels_model=ols.model,
artifact_path=artifact_path,
conda_env=statsmodels_custom_env,
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(statsmodels_custom_env, saved_pip_req_path)
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
model_path,
):
ols = ols_model()
mlflow.statsmodels.save_model(statsmodels_model=ols.model, path=model_path)
_assert_pip_requirements(model_path, mlflow.statsmodels.get_default_pip_requirements())
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies():
ols = ols_model()
artifact_path = "model"
with mlflow.start_run():
mlflow.statsmodels.log_model(statsmodels_model=ols.model, artifact_path=artifact_path)
model_uri = mlflow.get_artifact_uri(artifact_path)
_assert_pip_requirements(model_uri, mlflow.statsmodels.get_default_pip_requirements())
def test_pyfunc_serve_and_score():
model, _, inference_dataframe = ols_model()
artifact_path = "model"
with mlflow.start_run():
mlflow.statsmodels.log_model(model, artifact_path)
model_uri = mlflow.get_artifact_uri(artifact_path)
resp = pyfunc_serve_and_score_model(
model_uri,
data=pd.DataFrame(inference_dataframe),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
scores = pd.read_json(resp.content.decode("utf-8"), orient="records").values.squeeze()
np.testing.assert_array_almost_equal(scores, model.predict(inference_dataframe))
def test_log_model_with_code_paths():
artifact_path = "model"
ols = ols_model()
with mlflow.start_run(), mock.patch(
"mlflow.statsmodels._add_code_from_conf_to_system_path"
) as add_mock:
mlflow.statsmodels.log_model(ols.model, artifact_path, code_paths=[__file__])
model_uri = mlflow.get_artifact_uri(artifact_path)
_compare_logged_code_paths(__file__, model_uri, mlflow.statsmodels.FLAVOR_NAME)
mlflow.statsmodels.load_model(model_uri)
add_mock.assert_called()
|
mlflow/mlflow
|
tests/statsmodels/test_statsmodels_model_export.py
|
Python
|
apache-2.0
| 16,600
|
#!/usr/bin/env python3
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to extract build metadata from bazel BUILD.
# To avoid having two sources of truth for the build metadata (build
# targets, source files, header files etc.), this script analyzes the contents
# of bazel BUILD files and generates a YAML file (currently called
# build_autogenerated.yaml). The format and semantics of the generated YAML files
# is chosen to match the format of a "build.yaml" file, which used
# to be build the source of truth for gRPC build before bazel became
# the primary build system.
# A good basic overview of the "build.yaml" format is available here:
# https://github.com/grpc/grpc/blob/master/templates/README.md. Note that
# while useful as an overview, the doc does not act as formal spec
# (formal spec does not exist in fact) and the doc can be incomplete,
# inaccurate or slightly out of date.
# TODO(jtattermusch): In the future we want to get rid of the legacy build.yaml
# format entirely or simplify it to a point where it becomes self-explanatory
# and doesn't need any detailed documentation.
import collections
import os
import re
import subprocess
import sys
from typing import Any, Dict, Iterable, List, Optional
import xml.etree.ElementTree as ET
import build_cleaner
import yaml
BuildMetadata = Dict[str, Any]
BuildDict = Dict[str, BuildMetadata]
BuildYaml = Dict[str, Any]
def _bazel_query_xml_tree(query: str) -> ET.Element:
"""Get xml output of bazel query invocation, parsed as XML tree"""
output = subprocess.check_output(
['tools/bazel', 'query', '--noimplicit_deps', '--output', 'xml', query])
return ET.fromstring(output)
def _rule_dict_from_xml_node(rule_xml_node):
"""Converts XML node representing a rule (obtained from "bazel query --output xml") to a dictionary that contains all the metadata we will need."""
result = {
'class': rule_xml_node.attrib.get('class'),
'name': rule_xml_node.attrib.get('name'),
'srcs': [],
'hdrs': [],
'deps': [],
'data': [],
'tags': [],
'args': [],
'generator_function': None,
'size': None,
'flaky': False,
}
for child in rule_xml_node:
# all the metadata we want is stored under "list" tags
if child.tag == 'list':
list_name = child.attrib['name']
if list_name in ['srcs', 'hdrs', 'deps', 'data', 'tags', 'args']:
result[list_name] += [item.attrib['value'] for item in child]
if child.tag == 'string':
string_name = child.attrib['name']
if string_name in ['generator_function', 'size']:
result[string_name] = child.attrib['value']
if child.tag == 'boolean':
bool_name = child.attrib['name']
if bool_name in ['flaky']:
result[bool_name] = child.attrib['value'] == 'true'
return result
def _extract_rules_from_bazel_xml(xml_tree):
"""Extract bazel rules from an XML tree node obtained from "bazel query --output xml" command."""
result = {}
for child in xml_tree:
if child.tag == 'rule':
rule_dict = _rule_dict_from_xml_node(child)
rule_clazz = rule_dict['class']
rule_name = rule_dict['name']
if rule_clazz in [
'cc_library',
'cc_binary',
'cc_test',
'cc_proto_library',
'proto_library',
'upb_proto_library',
'upb_proto_reflection_library',
]:
if rule_name in result:
raise Exception('Rule %s already present' % rule_name)
result[rule_name] = rule_dict
return result
def _get_bazel_label(target_name: str) -> str:
if ':' in target_name:
return '//%s' % target_name
else:
return '//:%s' % target_name
def _extract_source_file_path(label: str) -> str:
"""Gets relative path to source file from bazel deps listing"""
if label.startswith('//'):
label = label[len('//'):]
# labels in form //:src/core/lib/surface/call_test_only.h
if label.startswith(':'):
label = label[len(':'):]
# labels in form //test/core/util:port.cc
label = label.replace(':', '/')
return label
def _extract_public_headers(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of public headers from a bazel rule"""
result = []
for dep in bazel_rule['hdrs']:
if dep.startswith('//:include/') and dep.endswith('.h'):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_nonpublic_headers(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of non-public headers from a bazel rule"""
result = []
for dep in bazel_rule['hdrs']:
if dep.startswith('//') and not dep.startswith(
'//:include/') and dep.endswith('.h'):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_sources(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of source files from a bazel rule"""
result = []
for dep in bazel_rule['srcs']:
if dep.startswith('//') and (dep.endswith('.cc') or dep.endswith('.c')
or dep.endswith('.proto')):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_deps(bazel_rule: BuildMetadata,
bazel_rules: BuildDict) -> List[str]:
"""Gets list of deps from from a bazel rule"""
return list(sorted(bazel_rule['deps']))
def _create_target_from_bazel_rule(target_name: str,
bazel_rules: BuildDict) -> BuildMetadata:
"""Create build.yaml-like target definition from bazel metadata"""
bazel_rule = bazel_rules[_get_bazel_label(target_name)]
# Create a template for our target from the bazel rule. Initially we only
# populate some "private" fields with the original info we got from bazel
# and only later we will populate the public fields (once we do some extra
# postprocessing).
result = {
'name': target_name,
'_PUBLIC_HEADERS_BAZEL': _extract_public_headers(bazel_rule),
'_HEADERS_BAZEL': _extract_nonpublic_headers(bazel_rule),
'_SRC_BAZEL': _extract_sources(bazel_rule),
'_DEPS_BAZEL': _extract_deps(bazel_rule, bazel_rules),
'public_headers': bazel_rule['_COLLAPSED_PUBLIC_HEADERS'],
'headers': bazel_rule['_COLLAPSED_HEADERS'],
'src': bazel_rule['_COLLAPSED_SRCS'],
'deps': bazel_rule['_COLLAPSED_DEPS'],
}
return result
def _external_dep_name_from_bazel_dependency(bazel_dep: str) -> Optional[str]:
"""Returns name of dependency if external bazel dependency is provided or None"""
if bazel_dep.startswith('@com_google_absl//'):
# special case for add dependency on one of the absl libraries (there is not just one absl library)
prefixlen = len('@com_google_absl//')
return bazel_dep[prefixlen:]
elif bazel_dep == '//external:upb_lib':
return 'upb'
elif bazel_dep == '//external:benchmark':
return 'benchmark'
elif bazel_dep == '//external:libssl':
return 'libssl'
else:
# all the other external deps such as protobuf, cares, zlib
# don't need to be listed explicitly, they are handled automatically
# by the build system (make, cmake)
return None
def _compute_transitive_metadata(
rule_name: str, bazel_rules: Any,
bazel_label_to_dep_name: Dict[str, str]) -> None:
"""Computes the final build metadata for Bazel target with rule_name.
The dependencies that will appear on the deps list are:
* Public build targets including binaries and tests;
* External targets, like absl, re2.
All other intermediate dependencies will be merged, which means their
source file, headers, etc. will be collected into one build target. This
step of processing will greatly reduce the complexity of the generated
build specifications for other build systems, like CMake, Make, setuptools.
The final build metadata are:
* _TRANSITIVE_DEPS: all the transitive dependencies including intermediate
targets;
* _COLLAPSED_DEPS: dependencies that fits our requirement above, and it
will remove duplicated items and produce the shortest
possible dependency list in alphabetical order;
* _COLLAPSED_SRCS: the merged source files;
* _COLLAPSED_PUBLIC_HEADERS: the merged public headers;
* _COLLAPSED_HEADERS: the merged non-public headers;
* _EXCLUDE_DEPS: intermediate targets to exclude when performing collapsing
of sources and dependencies.
For the collapsed_deps, the algorithm improved cases like:
The result in the past:
end2end_tests -> [grpc_test_util, grpc, gpr, address_sorting, upb]
grpc_test_util -> [grpc, gpr, address_sorting, upb, ...]
grpc -> [gpr, address_sorting, upb, ...]
The result of the algorithm:
end2end_tests -> [grpc_test_util]
grpc_test_util -> [grpc]
grpc -> [gpr, address_sorting, upb, ...]
"""
bazel_rule = bazel_rules[rule_name]
direct_deps = _extract_deps(bazel_rule, bazel_rules)
transitive_deps = set()
collapsed_deps = set()
exclude_deps = set()
collapsed_srcs = set(_extract_sources(bazel_rule))
collapsed_public_headers = set(_extract_public_headers(bazel_rule))
collapsed_headers = set(_extract_nonpublic_headers(bazel_rule))
for dep in direct_deps:
external_dep_name_maybe = _external_dep_name_from_bazel_dependency(dep)
if dep in bazel_rules:
# Descend recursively, but no need to do that for external deps
if external_dep_name_maybe is None:
if "_PROCESSING_DONE" not in bazel_rules[dep]:
# This item is not processed before, compute now
_compute_transitive_metadata(dep, bazel_rules,
bazel_label_to_dep_name)
transitive_deps.update(bazel_rules[dep].get(
'_TRANSITIVE_DEPS', []))
collapsed_deps.update(
collapsed_deps, bazel_rules[dep].get('_COLLAPSED_DEPS', []))
exclude_deps.update(bazel_rules[dep].get('_EXCLUDE_DEPS', []))
# This dep is a public target, add it as a dependency
if dep in bazel_label_to_dep_name:
transitive_deps.update([bazel_label_to_dep_name[dep]])
collapsed_deps.update(collapsed_deps,
[bazel_label_to_dep_name[dep]])
# Add all the transitive deps of our every public dep to exclude
# list since we want to avoid building sources that are already
# built by our dependencies
exclude_deps.update(bazel_rules[dep]['_TRANSITIVE_DEPS'])
continue
# This dep is an external target, add it as a dependency
if external_dep_name_maybe is not None:
transitive_deps.update([external_dep_name_maybe])
collapsed_deps.update(collapsed_deps, [external_dep_name_maybe])
continue
# Direct dependencies are part of transitive dependencies
transitive_deps.update(direct_deps)
# Calculate transitive public deps (needed for collapsing sources)
transitive_public_deps = set(
[x for x in transitive_deps if x in bazel_label_to_dep_name])
# Remove intermediate targets that our public dependencies already depend
# on. This is the step that further shorten the deps list.
collapsed_deps = set([x for x in collapsed_deps if x not in exclude_deps])
# Compute the final source files and headers for this build target whose
# name is `rule_name` (input argument of this function).
#
# Imaging a public target PX has transitive deps [IA, IB, PY, IC, PZ]. PX,
# PY and PZ are public build targets. And IA, IB, IC are intermediate
# targets. In addition, PY depends on IC.
#
# Translate the condition into dependency graph:
# PX -> [IA, IB, PY, IC, PZ]
# PY -> [IC]
# Public targets: [PX, PY, PZ]
#
# The collapsed dependencies of PX: [PY, PZ].
# The excluded dependencies of X: [PY, IC, PZ].
# (IC is excluded as a dependency of PX. It is already included in PY, hence
# it would be redundant to include it again.)
#
# Target PX should include source files and headers of [PX, IA, IB] as final
# build metadata.
for dep in transitive_deps:
if dep not in exclude_deps and dep not in transitive_public_deps:
if dep in bazel_rules:
collapsed_srcs.update(_extract_sources(bazel_rules[dep]))
collapsed_public_headers.update(
_extract_public_headers(bazel_rules[dep]))
collapsed_headers.update(
_extract_nonpublic_headers(bazel_rules[dep]))
# This item is a "visited" flag
bazel_rule['_PROCESSING_DONE'] = True
# Following items are described in the docstinrg.
bazel_rule['_TRANSITIVE_DEPS'] = list(sorted(transitive_deps))
bazel_rule['_COLLAPSED_DEPS'] = list(sorted(collapsed_deps))
bazel_rule['_COLLAPSED_SRCS'] = list(sorted(collapsed_srcs))
bazel_rule['_COLLAPSED_PUBLIC_HEADERS'] = list(
sorted(collapsed_public_headers))
bazel_rule['_COLLAPSED_HEADERS'] = list(sorted(collapsed_headers))
bazel_rule['_EXCLUDE_DEPS'] = list(sorted(exclude_deps))
# TODO(jtattermusch): deduplicate with transitive_dependencies.py (which has a slightly different logic)
# TODO(jtattermusch): This is done to avoid introducing too many intermediate
# libraries into the build.yaml-based builds (which might in cause issues
# building language-specific artifacts) and also because the libraries
# in build.yaml-based build are generally considered units of distributions
# (= public libraries that are visible to the user and are installable),
# while in bazel builds it is customary to define larger number of smaller
# "sublibraries". The need for elision (and expansion)
# of intermediate libraries can be re-evaluated in the future.
def _populate_transitive_metadata(bazel_rules: Any,
public_dep_names: Iterable[str]) -> None:
"""Add 'transitive_deps' field for each of the rules"""
# Create the map between Bazel label and public dependency name
bazel_label_to_dep_name = {}
for dep_name in public_dep_names:
bazel_label_to_dep_name[_get_bazel_label(dep_name)] = dep_name
# Make sure we reached all the Bazel rules
# TODO(lidiz) potentially we could only update a subset of rules
for rule_name in bazel_rules:
if '_PROCESSING_DONE' not in bazel_rules[rule_name]:
_compute_transitive_metadata(rule_name, bazel_rules,
bazel_label_to_dep_name)
def update_test_metadata_with_transitive_metadata(
all_extra_metadata: BuildDict, bazel_rules: BuildDict) -> None:
"""Patches test build metadata with transitive metadata."""
for lib_name, lib_dict in list(all_extra_metadata.items()):
# Skip if it isn't not an test
if lib_dict.get('build') != 'test' or lib_dict.get('_TYPE') != 'target':
continue
bazel_rule = bazel_rules[_get_bazel_label(lib_name)]
if '//external:benchmark' in bazel_rule['_TRANSITIVE_DEPS']:
lib_dict['benchmark'] = True
lib_dict['defaults'] = 'benchmark'
if '//external:gtest' in bazel_rule['_TRANSITIVE_DEPS']:
lib_dict['gtest'] = True
lib_dict['language'] = 'c++'
def _get_transitive_protos(bazel_rules, t):
que = [
t,
]
visited = set()
ret = []
while que:
name = que.pop(0)
rule = bazel_rules.get(name, None)
if rule:
for dep in rule['deps']:
if dep not in visited:
visited.add(dep)
que.append(dep)
for src in rule['srcs']:
if src.endswith('.proto'):
ret.append(src)
return list(set(ret))
def _expand_upb_proto_library_rules(bazel_rules):
# Expand the .proto files from UPB proto library rules into the pre-generated
# upb.h and upb.c files.
GEN_UPB_ROOT = '//:src/core/ext/upb-generated/'
GEN_UPBDEFS_ROOT = '//:src/core/ext/upbdefs-generated/'
EXTERNAL_LINKS = [('@com_google_protobuf//', ':src/'),
('@com_google_googleapis//', ''),
('@com_github_cncf_udpa//', ''),
('@com_envoyproxy_protoc_gen_validate//', ''),
('@envoy_api//', ''), ('@opencensus_proto//', '')]
for name, bazel_rule in bazel_rules.items():
gen_func = bazel_rule.get('generator_function', None)
if gen_func in ('grpc_upb_proto_library',
'grpc_upb_proto_reflection_library'):
# get proto dependency
deps = bazel_rule['deps']
if len(deps) != 1:
raise Exception(
'upb rule "{0}" should have 1 proto dependency but has "{1}"'
.format(name, deps))
# deps is not properly fetched from bazel query for upb_proto_library target
# so add the upb dependency manually
bazel_rule['deps'] = [
'//external:upb_lib', '//external:upb_lib_descriptor',
'//external:upb_generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me'
]
# populate the upb_proto_library rule with pre-generated upb headers
# and sources using proto_rule
protos = _get_transitive_protos(bazel_rules, deps[0])
if len(protos) == 0:
raise Exception(
'upb rule "{0}" should have at least one proto file.'.
format(name))
srcs = []
hdrs = []
for proto_src in protos:
for external_link in EXTERNAL_LINKS:
if proto_src.startswith(external_link[0]):
proto_src = proto_src[len(external_link[0]) +
len(external_link[1]):]
break
if proto_src.startswith('@'):
raise Exception('"{0}" is unknown workspace.'.format(name))
proto_src = _extract_source_file_path(proto_src)
ext = '.upb' if gen_func == 'grpc_upb_proto_library' else '.upbdefs'
root = GEN_UPB_ROOT if gen_func == 'grpc_upb_proto_library' else GEN_UPBDEFS_ROOT
srcs.append(root + proto_src.replace('.proto', ext + '.c'))
hdrs.append(root + proto_src.replace('.proto', ext + '.h'))
bazel_rule['srcs'] = srcs
bazel_rule['hdrs'] = hdrs
def _generate_build_metadata(build_extra_metadata: BuildDict,
bazel_rules: BuildDict) -> BuildDict:
"""Generate build metadata in build.yaml-like format bazel build metadata and build.yaml-specific "extra metadata"."""
lib_names = list(build_extra_metadata.keys())
result = {}
for lib_name in lib_names:
lib_dict = _create_target_from_bazel_rule(lib_name, bazel_rules)
# populate extra properties from the build.yaml-specific "extra metadata"
lib_dict.update(build_extra_metadata.get(lib_name, {}))
# store to results
result[lib_name] = lib_dict
# Rename targets marked with "_RENAME" extra metadata.
# This is mostly a cosmetic change to ensure that we end up with build.yaml target
# names we're used to from the past (and also to avoid too long target names).
# The rename step needs to be made after we're done with most of processing logic
# otherwise the already-renamed libraries will have different names than expected
for lib_name in lib_names:
to_name = build_extra_metadata.get(lib_name, {}).get('_RENAME', None)
if to_name:
# store lib under the new name and also change its 'name' property
if to_name in result:
raise Exception('Cannot rename target ' + str(lib_name) + ', ' +
str(to_name) + ' already exists.')
lib_dict = result.pop(lib_name)
lib_dict['name'] = to_name
result[to_name] = lib_dict
# dep names need to be updated as well
for lib_dict_to_update in list(result.values()):
lib_dict_to_update['deps'] = list([
to_name if dep == lib_name else dep
for dep in lib_dict_to_update['deps']
])
return result
def _convert_to_build_yaml_like(lib_dict: BuildMetadata) -> BuildYaml:
lib_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'library'
]
target_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'target'
]
test_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'test'
]
# list libraries and targets in predefined order
lib_list = [lib_dict[lib_name] for lib_name in lib_names]
target_list = [lib_dict[lib_name] for lib_name in target_names]
test_list = [lib_dict[lib_name] for lib_name in test_names]
# get rid of temporary private fields prefixed with "_" and some other useless fields
for lib in lib_list:
for field_to_remove in [
k for k in list(lib.keys()) if k.startswith('_')
]:
lib.pop(field_to_remove, None)
for target in target_list:
for field_to_remove in [
k for k in list(target.keys()) if k.startswith('_')
]:
target.pop(field_to_remove, None)
target.pop('public_headers',
None) # public headers make no sense for targets
for test in test_list:
for field_to_remove in [
k for k in list(test.keys()) if k.startswith('_')
]:
test.pop(field_to_remove, None)
test.pop('public_headers',
None) # public headers make no sense for tests
build_yaml_like = {
'libs': lib_list,
'filegroups': [],
'targets': target_list,
'tests': test_list,
}
return build_yaml_like
def _extract_cc_tests(bazel_rules: BuildDict) -> List[str]:
"""Gets list of cc_test tests from bazel rules"""
result = []
for bazel_rule in list(bazel_rules.values()):
if bazel_rule['class'] == 'cc_test':
test_name = bazel_rule['name']
if test_name.startswith('//'):
prefixlen = len('//')
result.append(test_name[prefixlen:])
return list(sorted(result))
def _exclude_unwanted_cc_tests(tests: List[str]) -> List[str]:
"""Filters out bazel tests that we don't want to run with other build systems or we cannot build them reasonably"""
# most qps tests are autogenerated, we are fine without them
tests = [test for test in tests if not test.startswith('test/cpp/qps:')]
# microbenchmarks aren't needed for checking correctness
tests = [
test for test in tests
if not test.startswith('test/cpp/microbenchmarks:')
]
tests = [
test for test in tests
if not test.startswith('test/core/promise/benchmark:')
]
# we have trouble with census dependency outside of bazel
tests = [
test for test in tests
if not test.startswith('test/cpp/ext/filters/census:') and
not test.startswith('test/core/xds:xds_channel_stack_modifier_test')
]
# missing opencensus/stats/stats.h
tests = [
test for test in tests if not test.startswith(
'test/cpp/end2end:server_load_reporting_end2end_test')
]
tests = [
test for test in tests if not test.startswith(
'test/cpp/server/load_reporter:lb_load_reporter_test')
]
# The test uses --running_under_bazel cmdline argument
# To avoid the trouble needing to adjust it, we just skip the test
tests = [
test for test in tests if not test.startswith(
'test/cpp/naming:resolver_component_tests_runner_invoker')
]
# the test requires 'client_crash_test_server' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:time_change_test')
]
# the test requires 'client_crash_test_server' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:client_crash_test')
]
# the test requires 'server_crash_test_client' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:server_crash_test')
]
# test never existed under build.yaml and it fails -> skip it
tests = [
test for test in tests
if not test.startswith('test/core/tsi:ssl_session_cache_test')
]
# the binary of this test does not get built with cmake
tests = [
test for test in tests
if not test.startswith('test/cpp/util:channelz_sampler_test')
]
# we don't need to generate fuzzers outside of bazel
tests = [test for test in tests if not test.endswith('_fuzzer')]
return tests
def _generate_build_extra_metadata_for_tests(
tests: List[str], bazel_rules: BuildDict) -> BuildDict:
"""For given tests, generate the "extra metadata" that we need for our "build.yaml"-like output. The extra metadata is generated from the bazel rule metadata by using a bunch of heuristics."""
test_metadata = {}
for test in tests:
test_dict = {'build': 'test', '_TYPE': 'target'}
bazel_rule = bazel_rules[_get_bazel_label(test)]
bazel_tags = bazel_rule['tags']
if 'manual' in bazel_tags:
# don't run the tests marked as "manual"
test_dict['run'] = False
if bazel_rule['flaky']:
# don't run tests that are marked as "flaky" under bazel
# because that would only add noise for the run_tests.py tests
# and seeing more failures for tests that we already know are flaky
# doesn't really help anything
test_dict['run'] = False
if 'no_uses_polling' in bazel_tags:
test_dict['uses_polling'] = False
if 'grpc_fuzzer' == bazel_rule['generator_function']:
# currently we hand-list fuzzers instead of generating them automatically
# because there's no way to obtain maxlen property from bazel BUILD file.
print(('skipping fuzzer ' + test))
continue
if 'bazel_only' in bazel_tags:
continue
# if any tags that restrict platform compatibility are present,
# generate the "platforms" field accordingly
# TODO(jtattermusch): there is also a "no_linux" tag, but we cannot take
# it into account as it is applied by grpc_cc_test when poller expansion
# is made (for tests where uses_polling=True). So for now, we just
# assume all tests are compatible with linux and ignore the "no_linux" tag
# completely.
known_platform_tags = set(['no_windows', 'no_mac'])
if set(bazel_tags).intersection(known_platform_tags):
platforms = []
# assume all tests are compatible with linux and posix
platforms.append('linux')
platforms.append(
'posix') # there is no posix-specific tag in bazel BUILD
if not 'no_mac' in bazel_tags:
platforms.append('mac')
if not 'no_windows' in bazel_tags:
platforms.append('windows')
test_dict['platforms'] = platforms
cmdline_args = bazel_rule['args']
if cmdline_args:
test_dict['args'] = list(cmdline_args)
if test.startswith('test/cpp'):
test_dict['language'] = 'c++'
elif test.startswith('test/core'):
test_dict['language'] = 'c'
else:
raise Exception('wrong test' + test)
# short test name without the path.
# There can be name collisions, but we will resolve them later
simple_test_name = os.path.basename(_extract_source_file_path(test))
test_dict['_RENAME'] = simple_test_name
test_metadata[test] = test_dict
# detect duplicate test names
tests_by_simple_name = {}
for test_name, test_dict in list(test_metadata.items()):
simple_test_name = test_dict['_RENAME']
if not simple_test_name in tests_by_simple_name:
tests_by_simple_name[simple_test_name] = []
tests_by_simple_name[simple_test_name].append(test_name)
# choose alternative names for tests with a name collision
for collision_list in list(tests_by_simple_name.values()):
if len(collision_list) > 1:
for test_name in collision_list:
long_name = test_name.replace('/', '_').replace(':', '_')
print((
'short name of "%s" collides with another test, renaming to %s'
% (test_name, long_name)))
test_metadata[test_name]['_RENAME'] = long_name
return test_metadata
def _detect_and_print_issues(build_yaml_like: BuildYaml) -> None:
"""Try detecting some unusual situations and warn about them."""
for tgt in build_yaml_like['targets']:
if tgt['build'] == 'test':
for src in tgt['src']:
if src.startswith('src/') and not src.endswith('.proto'):
print(('source file from under "src/" tree used in test ' +
tgt['name'] + ': ' + src))
# extra metadata that will be used to construct build.yaml
# there are mostly extra properties that we weren't able to obtain from the bazel build
# _TYPE: whether this is library, target or test
# _RENAME: whether this target should be renamed to a different name (to match expectations of make and cmake builds)
_BUILD_EXTRA_METADATA = {
'third_party/address_sorting:address_sorting': {
'language': 'c',
'build': 'all',
'_RENAME': 'address_sorting'
},
'gpr': {
'language': 'c',
'build': 'all',
},
'grpc': {
'language': 'c',
'build': 'all',
'baselib': True,
'generate_plugin_registry': True
},
'grpc++': {
'language': 'c++',
'build': 'all',
'baselib': True,
},
'grpc++_alts': {
'language': 'c++',
'build': 'all',
'baselib': True
},
'grpc++_error_details': {
'language': 'c++',
'build': 'all'
},
'grpc++_reflection': {
'language': 'c++',
'build': 'all'
},
'grpc++_unsecure': {
'language': 'c++',
'build': 'all',
'baselib': True,
},
# TODO(jtattermusch): do we need to set grpc_csharp_ext's LDFLAGS for wrapping memcpy in the same way as in build.yaml?
'grpc_csharp_ext': {
'language': 'c',
'build': 'all',
},
'grpc_unsecure': {
'language': 'c',
'build': 'all',
'baselib': True,
'generate_plugin_registry': True
},
'grpcpp_channelz': {
'language': 'c++',
'build': 'all'
},
'grpc++_test': {
'language': 'c++',
'build': 'private',
},
'src/compiler:grpc_plugin_support': {
'language': 'c++',
'build': 'protoc',
'_RENAME': 'grpc_plugin_support'
},
'src/compiler:grpc_cpp_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_cpp_plugin'
},
'src/compiler:grpc_csharp_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_csharp_plugin'
},
'src/compiler:grpc_node_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_node_plugin'
},
'src/compiler:grpc_objective_c_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_objective_c_plugin'
},
'src/compiler:grpc_php_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_php_plugin'
},
'src/compiler:grpc_python_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_python_plugin'
},
'src/compiler:grpc_ruby_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_ruby_plugin'
},
# TODO(jtattermusch): consider adding grpc++_core_stats
# test support libraries
'test/core/util:grpc_test_util': {
'language': 'c',
'build': 'private',
'_RENAME': 'grpc_test_util'
},
'test/core/util:grpc_test_util_unsecure': {
'language': 'c',
'build': 'private',
'_RENAME': 'grpc_test_util_unsecure'
},
# TODO(jtattermusch): consider adding grpc++_test_util_unsecure - it doesn't seem to be used by bazel build (don't forget to set secure: False)
'test/cpp/util:test_config': {
'language': 'c++',
'build': 'private',
'_RENAME': 'grpc++_test_config'
},
'test/cpp/util:test_util': {
'language': 'c++',
'build': 'private',
'_RENAME': 'grpc++_test_util'
},
# end2end test support libraries
'test/core/end2end:end2end_tests': {
'language': 'c',
'build': 'private',
'_RENAME': 'end2end_tests'
},
'test/core/end2end:end2end_nosec_tests': {
'language': 'c',
'build': 'private',
'_RENAME': 'end2end_nosec_tests'
},
# benchmark support libraries
'test/cpp/microbenchmarks:helpers': {
'language': 'c++',
'build': 'test',
'defaults': 'benchmark',
'_RENAME': 'benchmark_helpers'
},
'test/cpp/interop:interop_client': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'interop_client'
},
'test/cpp/interop:interop_server': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'interop_server'
},
'test/cpp/interop:xds_interop_client': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'xds_interop_client'
},
'test/cpp/interop:xds_interop_server': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'xds_interop_server'
},
'test/cpp/interop:http2_client': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'http2_client'
},
'test/cpp/qps:qps_json_driver': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'qps_json_driver'
},
'test/cpp/qps:qps_worker': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'qps_worker'
},
'test/cpp/util:grpc_cli': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'grpc_cli'
},
# TODO(jtattermusch): create_jwt and verify_jwt breaks distribtests because it depends on grpc_test_utils and thus requires tests to be built
# For now it's ok to disable them as these binaries aren't very useful anyway.
#'test/core/security:create_jwt': { 'language': 'c', 'build': 'tool', '_TYPE': 'target', '_RENAME': 'grpc_create_jwt' },
#'test/core/security:verify_jwt': { 'language': 'c', 'build': 'tool', '_TYPE': 'target', '_RENAME': 'grpc_verify_jwt' },
# TODO(jtattermusch): add remaining tools such as grpc_print_google_default_creds_token (they are not used by bazel build)
# TODO(jtattermusch): these fuzzers had no build.yaml equivalent
# test/core/compression:message_compress_fuzzer
# test/core/compression:message_decompress_fuzzer
# test/core/compression:stream_compression_fuzzer
# test/core/compression:stream_decompression_fuzzer
# test/core/slice:b64_decode_fuzzer
# test/core/slice:b64_encode_fuzzer
}
# We need a complete picture of all the targets and dependencies we're interested in
# so we run multiple bazel queries and merge the results.
_BAZEL_DEPS_QUERIES = [
'deps("//test/...")',
'deps("//:all")',
'deps("//src/compiler/...")',
'deps("//src/proto/...")',
# The ^ is needed to differentiate proto_library from go_proto_library
'deps(kind("^proto_library", @envoy_api//envoy/...))',
]
# Step 1: run a bunch of "bazel query --output xml" queries to collect
# the raw build metadata from the bazel build.
# At the end of this step we will have a dictionary of bazel rules
# that are interesting to us (libraries, binaries, etc.) along
# with their most important metadata (sources, headers, dependencies)
#
# Example of a single bazel rule after being populated:
# '//:grpc' : { 'class': 'cc_library',
# 'hdrs': ['//:include/grpc/byte_buffer.h', ... ],
# 'srcs': ['//:src/core/lib/surface/init.cc', ... ],
# 'deps': ['//:grpc_common', ...],
# ... }
bazel_rules = {}
for query in _BAZEL_DEPS_QUERIES:
bazel_rules.update(
_extract_rules_from_bazel_xml(_bazel_query_xml_tree(query)))
# Step 1.5: The sources for UPB protos are pre-generated, so we want
# to expand the UPB proto library bazel rules into the generated
# .upb.h and .upb.c files.
_expand_upb_proto_library_rules(bazel_rules)
# Step 2: Extract the known bazel cc_test tests. While most tests
# will be buildable with other build systems just fine, some of these tests
# would be too difficult to build and run with other build systems,
# so we simply exclude the ones we don't want.
# Note that while making tests buildable with other build systems
# than just bazel is extra effort, we still need to do that for these
# reasons:
# - If our cmake build doesn't have any tests at all, it's hard to make
# sure that what it built actually works (we need at least some "smoke tests").
# This is quite important because the build flags between bazel / non-bazel flag might differ
# (sometimes it's for interesting reasons that are not easy to overcome)
# which makes it even more important to have at least some tests for cmake/make
# - Our portability suite actually runs cmake tests and migration of portability
# suite fully towards bazel might be intricate (e.g. it's unclear whether it's
# possible to get a good enough coverage of different compilers / distros etc.
# with bazel)
# - some things that are considered "tests" in build.yaml-based builds are actually binaries
# we'd want to be able to build anyway (qps_json_worker, interop_client, interop_server, grpc_cli)
# so it's unclear how much make/cmake simplification we would gain by removing just some (but not all) test
# TODO(jtattermusch): Investigate feasibility of running portability suite with bazel.
tests = _exclude_unwanted_cc_tests(_extract_cc_tests(bazel_rules))
# Step 3: Generate the "extra metadata" for all our build targets.
# While the bazel rules give us most of the information we need,
# the legacy "build.yaml" format requires some additional fields that
# we cannot get just from bazel alone (we call that "extra metadata").
# In this step, we basically analyze the build metadata we have from bazel
# and use heuristics to determine (and sometimes guess) the right
# extra metadata to use for each target.
#
# - For some targets (such as the public libraries, helper libraries
# and executables) determining the right extra metadata is hard to do
# automatically. For these targets, the extra metadata is supplied "manually"
# in form of the _BUILD_EXTRA_METADATA dictionary. That allows us to match
# the semantics of the legacy "build.yaml" as closely as possible.
#
# - For test binaries, it is possible to generate the "extra metadata" mostly
# automatically using a rule-based heuristic approach because most tests
# look and behave alike from the build's perspective.
#
# TODO(jtattermusch): Of course neither "_BUILD_EXTRA_METADATA" or
# the heuristic approach used for tests are ideal and they cannot be made
# to cover all possible situations (and are tailored to work with the way
# the grpc build currently works), but the idea was to start with something
# reasonably simple that matches the "build.yaml"-like semantics as closely
# as possible (to avoid changing too many things at once) and gradually get
# rid of the legacy "build.yaml"-specific fields one by one. Once that is done,
# only very little "extra metadata" would be needed and/or it would be trivial
# to generate it automatically.
all_extra_metadata = {}
all_extra_metadata.update(_BUILD_EXTRA_METADATA)
all_extra_metadata.update(
_generate_build_extra_metadata_for_tests(tests, bazel_rules))
# Step 4: Compute the build metadata that will be used in the final build.yaml.
# The final build metadata includes transitive dependencies, and sources/headers
# expanded without intermediate dependencies.
# Example:
# '//:grpc' : { ...,
# '_TRANSITIVE_DEPS': ['//:gpr_base', ...],
# '_COLLAPSED_DEPS': ['gpr', ...],
# '_COLLAPSED_SRCS': [...],
# '_COLLAPSED_PUBLIC_HEADERS': [...],
# '_COLLAPSED_HEADERS': [...]
# }
_populate_transitive_metadata(bazel_rules, list(all_extra_metadata.keys()))
# Step 4a: Update the existing test metadata with the updated build metadata.
# Certain build metadata of certain test targets depend on the transitive
# metadata that wasn't available earlier.
update_test_metadata_with_transitive_metadata(all_extra_metadata, bazel_rules)
# Step 5: Generate the final metadata for all the targets.
# This is done by combining the bazel build metadata and the "extra metadata"
# we obtained in the previous step.
# In this step, we also perform some interesting massaging of the target metadata
# to end up with a result that is as similar to the legacy build.yaml data
# as possible.
# - Some targets get renamed (to match the legacy build.yaml target names)
# - Some intermediate libraries get elided ("expanded") to better match the set
# of targets provided by the legacy build.yaml build
#
# Originally the target renaming was introduced to address these concerns:
# - avoid changing too many things at the same time and avoid people getting
# confused by some well know targets suddenly being missing
# - Makefile/cmake and also language-specific generators rely on some build
# targets being called exactly the way they they are. Some of our testing
# scrips also invoke executables (e.g. "qps_json_driver") by their name.
# - The autogenerated test name from bazel includes the package path
# (e.g. "test_cpp_TEST_NAME"). Without renaming, the target names would
# end up pretty ugly (e.g. test_cpp_qps_qps_json_driver).
# TODO(jtattermusch): reevaluate the need for target renaming in the future.
#
# Example of a single generated target:
# 'grpc' : { 'language': 'c',
# 'public_headers': ['include/grpc/byte_buffer.h', ... ],
# 'headers': ['src/core/ext/filters/client_channel/client_channel.h', ... ],
# 'src': ['src/core/lib/surface/init.cc', ... ],
# 'deps': ['gpr', 'address_sorting', ...],
# ... }
all_targets_dict = _generate_build_metadata(all_extra_metadata, bazel_rules)
# Step 6: convert the dictionary with all the targets to a dict that has
# the desired "build.yaml"-like layout.
# TODO(jtattermusch): We use the custom "build.yaml"-like layout because
# currently all other build systems use that format as their source of truth.
# In the future, we can get rid of this custom & legacy format entirely,
# but we would need to update the generators for other build systems
# at the same time.
#
# Layout of the result:
# { 'libs': { TARGET_DICT_FOR_LIB_XYZ, ... },
# 'targets': { TARGET_DICT_FOR_BIN_XYZ, ... },
# 'tests': { TARGET_DICT_FOR_TEST_XYZ, ...} }
build_yaml_like = _convert_to_build_yaml_like(all_targets_dict)
# detect and report some suspicious situations we've seen before
_detect_and_print_issues(build_yaml_like)
# Step 7: Store the build_autogenerated.yaml in a deterministic (=sorted)
# and cleaned-up form.
# A basic overview of the resulting "build.yaml"-like format is here:
# https://github.com/grpc/grpc/blob/master/templates/README.md
# TODO(jtattermusch): The "cleanup" function is taken from the legacy
# build system (which used build.yaml) and can be eventually removed.
build_yaml_string = build_cleaner.cleaned_build_yaml_dict_as_string(
build_yaml_like)
with open('build_autogenerated.yaml', 'w') as file:
file.write(build_yaml_string)
|
stanley-cheung/grpc
|
tools/buildgen/extract_metadata_from_bazel_xml.py
|
Python
|
apache-2.0
| 46,207
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cloudfiles
import httplib
import json
import mox
from django import http
from django.conf import settings
from django_openstack import api
from glance import client as glance_client
from mox import IsA
from novaclient import service_catalog, client as base_client
from novaclient.keystone import client as keystone_client
from novaclient.v1_1 import client as nova_client
from openstack import compute as OSCompute
from openstackx import admin as OSAdmin
from openstackx import auth as OSAuth
from openstackx import extras as OSExtras
from django_openstack import test
from django_openstack.middleware import keystone
TEST_CONSOLE_KIND = 'vnc'
TEST_EMAIL = 'test@test.com'
TEST_HOSTNAME = 'hostname'
TEST_INSTANCE_ID = '2'
TEST_PASSWORD = '12345'
TEST_PORT = 8000
TEST_RETURN = 'retValue'
TEST_TENANT_DESCRIPTION = 'tenantDescription'
TEST_TENANT_ID = '1234'
TEST_TENANT_NAME = 'foo'
TEST_TOKEN = 'aToken'
TEST_TOKEN_ID = 'userId'
TEST_URL = 'http://%s:%s/something/v1.0' % (TEST_HOSTNAME, TEST_PORT)
TEST_USERNAME = 'testUser'
class Server(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, image, attrs=None):
self.id = id
self.image = image
if attrs is not None:
self.attrs = attrs
def __eq__(self, other):
if self.id != other.id or \
self.image['id'] != other.image['id']:
return False
for k in self.attrs:
if other.attrs.__getattr__(k) != v:
return False
return True
def __ne__(self, other):
return not self == other
class Tenant(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, description, enabled):
self.id = id
self.description = description
self.enabled = enabled
def __eq__(self, other):
return self.id == other.id and \
self.description == other.description and \
self.enabled == other.enabled
def __ne__(self, other):
return not self == other
class Token(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, username, tenant_id, tenant_name,
serviceCatalog=None):
self.id = id
self.user = {'name': username}
self.tenant = {'id': tenant_id, 'name': tenant_name}
self.serviceCatalog = serviceCatalog
def __eq__(self, other):
return self.id == other.id and \
self.user['name'] == other.user['name'] and \
self.tenant_id == other.tenant_id and \
self.serviceCatalog == other.serviceCatalog
def __ne__(self, other):
return not self == other
class APIResource(api.APIResourceWrapper):
""" Simple APIResource for testing """
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerObject=None):
if innerObject is None:
class InnerAPIResource(object):
pass
innerObject = InnerAPIResource()
innerObject.foo = 'foo'
innerObject.bar = 'bar'
return APIResource(innerObject)
class APIDict(api.APIDictWrapper):
""" Simple APIDict for testing """
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerDict=None):
if innerDict is None:
innerDict = {'foo': 'foo',
'bar': 'bar'}
return APIDict(innerDict)
class APITestCase(test.TestCase):
def setUp(self):
def fake_keystoneclient(request, username=None, password=None,
tenant_id=None, token_id=None, endpoint=None):
return self.stub_keystoneclient()
super(APITestCase, self).setUp()
self._original_keystoneclient = api.keystoneclient
self._original_novaclient = api.novaclient
api.keystoneclient = fake_keystoneclient
api.novaclient = lambda request: self.stub_novaclient()
def stub_novaclient(self):
if not hasattr(self, "novaclient"):
self.mox.StubOutWithMock(nova_client, 'Client')
self.novaclient = self.mox.CreateMock(nova_client.Client)
return self.novaclient
def stub_keystoneclient(self):
if not hasattr(self, "keystoneclient"):
self.mox.StubOutWithMock(keystone_client, 'Client')
self.keystoneclient = self.mox.CreateMock(keystone_client.Client)
return self.keystoneclient
def tearDown(self):
super(APITestCase, self).tearDown()
api.novaclient = self._original_novaclient
api.keystoneclient = self._original_keystoneclient
class APIResourceWrapperTests(test.TestCase):
def test_get_attribute(self):
resource = APIResource.get_instance()
self.assertEqual(resource.foo, 'foo')
def test_get_invalid_attribute(self):
resource = APIResource.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
def test_get_inner_missing_attribute(self):
resource = APIResource.get_instance()
with self.assertRaises(AttributeError):
resource.baz
class APIDictWrapperTests(test.TestCase):
# APIDict allows for both attribute access and dictionary style [element]
# style access. Test both
def test_get_item(self):
resource = APIDict.get_instance()
self.assertEqual(resource.foo, 'foo')
self.assertEqual(resource['foo'], 'foo')
def test_get_invalid_item(self):
resource = APIDict.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
with self.assertRaises(KeyError):
resource['missing']
def test_get_inner_missing_attribute(self):
resource = APIDict.get_instance()
with self.assertRaises(AttributeError):
resource.baz
with self.assertRaises(KeyError):
resource['baz']
def test_get_with_default(self):
resource = APIDict.get_instance()
self.assertEqual(resource.get('foo'), 'foo')
self.assertIsNone(resource.get('baz'))
self.assertEqual('retValue', resource.get('baz', 'retValue'))
# Wrapper classes that only define _attrs don't need extra testing.
# Wrapper classes that have other attributes or methods need testing
class ImageWrapperTests(test.TestCase):
dict_with_properties = {
'properties':
{'image_state': 'running'},
'size': 100,
}
dict_without_properties = {
'size': 100,
}
def test_get_properties(self):
image = api.Image(self.dict_with_properties)
image_props = image.properties
self.assertIsInstance(image_props, api.ImageProperties)
self.assertEqual(image_props.image_state, 'running')
def test_get_other(self):
image = api.Image(self.dict_with_properties)
self.assertEqual(image.size, 100)
def test_get_properties_missing(self):
image = api.Image(self.dict_without_properties)
with self.assertRaises(AttributeError):
image.properties
def test_get_other_missing(self):
image = api.Image(self.dict_without_properties)
with self.assertRaises(AttributeError):
self.assertNotIn('missing', image._attrs,
msg="Test assumption broken. Find new missing attribute")
image.missing
class ServerWrapperTests(test.TestCase):
HOST = 'hostname'
ID = '1'
IMAGE_NAME = 'imageName'
IMAGE_OBJ = {'id': '3', 'links': [{'href': '3', u'rel': u'bookmark'}]}
def setUp(self):
super(ServerWrapperTests, self).setUp()
# these are all objects "fetched" from the api
self.inner_attrs = {'host': self.HOST}
self.inner_server = Server(self.ID, self.IMAGE_OBJ, self.inner_attrs)
self.inner_server_no_attrs = Server(self.ID, self.IMAGE_OBJ)
#self.request = self.mox.CreateMock(http.HttpRequest)
def test_get_attrs(self):
server = api.Server(self.inner_server, self.request)
attrs = server.attrs
# for every attribute in the "inner" object passed to the api wrapper,
# see if it can be accessed through the api.ServerAttribute instance
for k in self.inner_attrs:
self.assertEqual(attrs.__getattr__(k), self.inner_attrs[k])
def test_get_other(self):
server = api.Server(self.inner_server, self.request)
self.assertEqual(server.id, self.ID)
def test_get_attrs_missing(self):
server = api.Server(self.inner_server_no_attrs, self.request)
with self.assertRaises(AttributeError):
server.attrs
def test_get_other_missing(self):
server = api.Server(self.inner_server, self.request)
with self.assertRaises(AttributeError):
self.assertNotIn('missing', server._attrs,
msg="Test assumption broken. Find new missing attribute")
server.missing
def test_image_name(self):
self.mox.StubOutWithMock(api, 'image_get')
api.image_get(IsA(http.HttpRequest),
self.IMAGE_OBJ['id']
).AndReturn(api.Image({'name': self.IMAGE_NAME}))
server = api.Server(self.inner_server, self.request)
self.mox.ReplayAll()
image_name = server.image_name
self.assertEqual(image_name, self.IMAGE_NAME)
self.mox.VerifyAll()
class ApiHelperTests(test.TestCase):
""" Tests for functions that don't use one of the api objects """
def test_url_for(self):
GLANCE_URL = 'http://glance/glanceapi/'
NOVA_URL = 'http://nova/novapi/'
url = api.url_for(self.request, 'image')
self.assertEqual(url, GLANCE_URL + 'internal')
url = api.url_for(self.request, 'image', admin=False)
self.assertEqual(url, GLANCE_URL + 'internal')
url = api.url_for(self.request, 'image', admin=True)
self.assertEqual(url, GLANCE_URL + 'admin')
url = api.url_for(self.request, 'compute')
self.assertEqual(url, NOVA_URL + 'internal')
url = api.url_for(self.request, 'compute', admin=False)
self.assertEqual(url, NOVA_URL + 'internal')
url = api.url_for(self.request, 'compute', admin=True)
self.assertEqual(url, NOVA_URL + 'admin')
self.assertNotIn('notAnApi', self.request.user.service_catalog,
'Select a new nonexistent service catalog key')
with self.assertRaises(api.ServiceCatalogException):
url = api.url_for(self.request, 'notAnApi')
class TenantAPITests(APITestCase):
def test_tenant_create(self):
DESCRIPTION = 'aDescription'
ENABLED = True
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.create(TEST_TENANT_ID, DESCRIPTION,
ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_create(self.request, TEST_TENANT_ID,
DESCRIPTION, ENABLED)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_tenant_get(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.get(TEST_TENANT_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_get(self.request, TEST_TENANT_ID)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_tenant_list(self):
tenants = (TEST_RETURN, TEST_RETURN + '2')
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.list().AndReturn(tenants)
self.mox.ReplayAll()
ret_val = api.tenant_list(self.request)
self.assertEqual(len(ret_val), len(tenants))
for tenant in ret_val:
self.assertIsInstance(tenant, api.Tenant)
self.assertIn(tenant._apiresource, tenants)
self.mox.VerifyAll()
def test_tenant_update(self):
DESCRIPTION = 'aDescription'
ENABLED = True
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.update(TEST_TENANT_ID, TEST_TENANT_NAME,
DESCRIPTION, ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_update(self.request, TEST_TENANT_ID,
TEST_TENANT_NAME, DESCRIPTION, ENABLED)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class UserAPITests(APITestCase):
def test_user_create(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.create(TEST_USERNAME, TEST_PASSWORD, TEST_EMAIL,
TEST_TENANT_ID, True).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_create(self.request, TEST_USERNAME, TEST_EMAIL,
TEST_PASSWORD, TEST_TENANT_ID, True)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_delete(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.delete(TEST_USERNAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_delete(self.request, TEST_USERNAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_user_get(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.get(TEST_USERNAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_get(self.request, TEST_USERNAME)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_list(self):
users = (TEST_USERNAME, TEST_USERNAME + '2')
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.list(tenant_id=None).AndReturn(users)
self.mox.ReplayAll()
ret_val = api.user_list(self.request)
self.assertEqual(len(ret_val), len(users))
for user in ret_val:
self.assertIsInstance(user, api.User)
self.assertIn(user._apiresource, users)
self.mox.VerifyAll()
def test_user_update_email(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.update_email(TEST_USERNAME,
TEST_EMAIL).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_email(self.request, TEST_USERNAME,
TEST_EMAIL)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_update_password(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.update_password(TEST_USERNAME,
TEST_PASSWORD).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_password(self.request, TEST_USERNAME,
TEST_PASSWORD)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_update_tenant(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.update_tenant(TEST_USERNAME,
TEST_TENANT_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_tenant(self.request, TEST_USERNAME,
TEST_TENANT_ID)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class RoleAPITests(APITestCase):
def test_role_add_for_tenant_user(self):
keystoneclient = self.stub_keystoneclient()
role = api.Role(APIResource.get_instance())
role.id = TEST_RETURN
role.name = TEST_RETURN
keystoneclient.roles = self.mox.CreateMockAnything()
keystoneclient.roles.add_user_to_tenant(TEST_TENANT_ID,
TEST_USERNAME,
TEST_RETURN).AndReturn(role)
api._get_role = self.mox.CreateMockAnything()
api._get_role(IsA(http.HttpRequest), IsA(str)).AndReturn(role)
self.mox.ReplayAll()
ret_val = api.role_add_for_tenant_user(self.request,
TEST_TENANT_ID,
TEST_USERNAME,
TEST_RETURN)
self.assertEqual(ret_val, role)
self.mox.VerifyAll()
class AdminApiTests(APITestCase):
def stub_admin_api(self, count=1):
self.mox.StubOutWithMock(api, 'admin_api')
admin_api = self.mox.CreateMock(OSAdmin.Admin)
for i in range(count):
api.admin_api(IsA(http.HttpRequest)).AndReturn(admin_api)
return admin_api
def test_get_admin_api(self):
self.mox.StubOutClassWithMocks(OSAdmin, 'Admin')
OSAdmin.Admin(auth_token=TEST_TOKEN, management_url=TEST_URL)
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'compute', True).AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute', True).AndReturn(TEST_URL)
self.mox.ReplayAll()
self.assertIsNotNone(api.admin_api(self.request))
self.mox.VerifyAll()
def test_flavor_create(self):
FLAVOR_DISK = 1000
FLAVOR_ID = 6
FLAVOR_MEMORY = 1024
FLAVOR_NAME = 'newFlavor'
FLAVOR_VCPU = 2
admin_api = self.stub_admin_api()
admin_api.flavors = self.mox.CreateMockAnything()
admin_api.flavors.create(FLAVOR_NAME, FLAVOR_MEMORY, FLAVOR_VCPU,
FLAVOR_DISK, FLAVOR_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_create(self.request, FLAVOR_NAME,
str(FLAVOR_MEMORY), str(FLAVOR_VCPU),
str(FLAVOR_DISK), FLAVOR_ID)
self.assertIsInstance(ret_val, api.Flavor)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_flavor_delete(self):
FLAVOR_ID = 6
admin_api = self.stub_admin_api(count=2)
admin_api.flavors = self.mox.CreateMockAnything()
admin_api.flavors.delete(FLAVOR_ID, False).AndReturn(TEST_RETURN)
admin_api.flavors.delete(FLAVOR_ID, True).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_delete(self.request, FLAVOR_ID)
self.assertIsNone(ret_val)
ret_val = api.flavor_delete(self.request, FLAVOR_ID, purge=True)
self.assertIsNone(ret_val)
def test_service_get(self):
NAME = 'serviceName'
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.get(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.service_get(self.request, NAME)
self.assertIsInstance(ret_val, api.Services)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_service_list(self):
services = (TEST_RETURN, TEST_RETURN + '2')
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.list().AndReturn(services)
self.mox.ReplayAll()
ret_val = api.service_list(self.request)
for service in ret_val:
self.assertIsInstance(service, api.Services)
self.assertIn(service._apiresource, services)
self.mox.VerifyAll()
def test_service_update(self):
ENABLED = True
NAME = 'serviceName'
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.update(NAME, ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.service_update(self.request, NAME, ENABLED)
self.assertIsInstance(ret_val, api.Services)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class TokenApiTests(APITestCase):
def setUp(self):
super(TokenApiTests, self).setUp()
self._prev_OPENSTACK_KEYSTONE_URL = getattr(settings,
'OPENSTACK_KEYSTONE_URL',
None)
settings.OPENSTACK_KEYSTONE_URL = TEST_URL
def tearDown(self):
super(TokenApiTests, self).tearDown()
settings.OPENSTACK_KEYSTONE_URL = self._prev_OPENSTACK_KEYSTONE_URL
def test_token_create(self):
catalog = {
'access': {
'token': {
'id': TEST_TOKEN_ID,
},
'user': {
'roles': [],
}
}
}
test_token = Token(TEST_TOKEN_ID, TEST_USERNAME,
TEST_TENANT_ID, TEST_TENANT_NAME)
keystoneclient = self.stub_keystoneclient()
keystoneclient.tokens = self.mox.CreateMockAnything()
keystoneclient.tokens.authenticate(username=TEST_USERNAME,
password=TEST_PASSWORD,
tenant=TEST_TENANT_ID
).AndReturn(test_token)
self.mox.ReplayAll()
ret_val = api.token_create(self.request, TEST_TENANT_ID,
TEST_USERNAME, TEST_PASSWORD)
self.assertEqual(test_token.tenant['id'], ret_val.tenant['id'])
self.mox.VerifyAll()
class ComputeApiTests(APITestCase):
def stub_compute_api(self, count=1):
self.mox.StubOutWithMock(api, 'compute_api')
compute_api = self.mox.CreateMock(OSCompute.Compute)
for i in range(count):
api.compute_api(IsA(http.HttpRequest)).AndReturn(compute_api)
return compute_api
def test_get_compute_api(self):
class ComputeClient(object):
__slots__ = ['auth_token', 'management_url']
self.mox.StubOutClassWithMocks(OSCompute, 'Compute')
compute_api = OSCompute.Compute(auth_token=TEST_TOKEN,
management_url=TEST_URL)
compute_api.client = ComputeClient()
self.mox.StubOutWithMock(api, 'url_for')
# called three times? Looks like a good place for optimization
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
self.mox.ReplayAll()
compute_api = api.compute_api(self.request)
self.assertIsNotNone(compute_api)
self.assertEqual(compute_api.client.auth_token, TEST_TOKEN)
self.assertEqual(compute_api.client.management_url, TEST_URL)
self.mox.VerifyAll()
def test_flavor_get(self):
FLAVOR_ID = 6
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.get(FLAVOR_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_get(self.request, FLAVOR_ID)
self.assertIsInstance(ret_val, api.Flavor)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_server_delete(self):
INSTANCE = 'anInstance'
compute_api = self.stub_compute_api()
compute_api.servers = self.mox.CreateMockAnything()
compute_api.servers.delete(INSTANCE).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_delete(self.request, INSTANCE)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_server_reboot(self):
INSTANCE_ID = '2'
HARDNESS = 'diamond'
self.mox.StubOutWithMock(api, 'server_get')
server = self.mox.CreateMock(OSCompute.Server)
server.reboot(OSCompute.servers.REBOOT_HARD).AndReturn(TEST_RETURN)
api.server_get(IsA(http.HttpRequest), INSTANCE_ID).AndReturn(server)
server = self.mox.CreateMock(OSCompute.Server)
server.reboot(HARDNESS).AndReturn(TEST_RETURN)
api.server_get(IsA(http.HttpRequest), INSTANCE_ID).AndReturn(server)
self.mox.ReplayAll()
ret_val = api.server_reboot(self.request, INSTANCE_ID)
self.assertIsNone(ret_val)
ret_val = api.server_reboot(self.request, INSTANCE_ID,
hardness=HARDNESS)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_server_create(self):
NAME = 'server'
IMAGE = 'anImage'
FLAVOR = 'cherry'
USER_DATA = {'nuts': 'berries'}
KEY = 'user'
SECGROUP = self.mox.CreateMock(api.SecurityGroup)
server = self.mox.CreateMock(OSCompute.Server)
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.create(NAME, IMAGE, FLAVOR, userdata=USER_DATA,
security_groups=[SECGROUP], key_name=KEY)\
.AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_create(self.request, NAME, IMAGE, FLAVOR,
KEY, USER_DATA, [SECGROUP])
self.assertIsInstance(ret_val, api.Server)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class ExtrasApiTests(APITestCase):
def stub_extras_api(self, count=1):
self.mox.StubOutWithMock(api, 'extras_api')
extras_api = self.mox.CreateMock(OSExtras.Extras)
for i in range(count):
api.extras_api(IsA(http.HttpRequest)).AndReturn(extras_api)
return extras_api
def test_get_extras_api(self):
self.mox.StubOutClassWithMocks(OSExtras, 'Extras')
OSExtras.Extras(auth_token=TEST_TOKEN, management_url=TEST_URL)
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
self.mox.ReplayAll()
self.assertIsNotNone(api.extras_api(self.request))
self.mox.VerifyAll()
def test_console_create(self):
extras_api = self.stub_extras_api(count=2)
extras_api.consoles = self.mox.CreateMockAnything()
extras_api.consoles.create(
TEST_INSTANCE_ID, TEST_CONSOLE_KIND).AndReturn(TEST_RETURN)
extras_api.consoles.create(
TEST_INSTANCE_ID, 'text').AndReturn(TEST_RETURN + '2')
self.mox.ReplayAll()
ret_val = api.console_create(self.request,
TEST_INSTANCE_ID,
TEST_CONSOLE_KIND)
self.assertIsInstance(ret_val, api.Console)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
ret_val = api.console_create(self.request, TEST_INSTANCE_ID)
self.assertIsInstance(ret_val, api.Console)
self.assertEqual(ret_val._apiresource, TEST_RETURN + '2')
self.mox.VerifyAll()
def test_flavor_list(self):
flavors = (TEST_RETURN, TEST_RETURN + '2')
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.list().AndReturn(flavors)
self.mox.ReplayAll()
ret_val = api.flavor_list(self.request)
self.assertEqual(len(ret_val), len(flavors))
for flavor in ret_val:
self.assertIsInstance(flavor, api.Flavor)
self.assertIn(flavor._apiresource, flavors)
self.mox.VerifyAll()
def test_server_list(self):
servers = (TEST_RETURN, TEST_RETURN + '2')
extras_api = self.stub_extras_api()
extras_api.servers = self.mox.CreateMockAnything()
extras_api.servers.list().AndReturn(servers)
self.mox.ReplayAll()
ret_val = api.server_list(self.request)
self.assertEqual(len(ret_val), len(servers))
for server in ret_val:
self.assertIsInstance(server, api.Server)
self.assertIn(server._apiresource, servers)
self.mox.VerifyAll()
def test_usage_get(self):
extras_api = self.stub_extras_api()
extras_api.usage = self.mox.CreateMockAnything()
extras_api.usage.get(TEST_TENANT_ID, 'start',
'end').AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.usage_get(self.request, TEST_TENANT_ID, 'start', 'end')
self.assertIsInstance(ret_val, api.Usage)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_usage_list(self):
usages = (TEST_RETURN, TEST_RETURN + '2')
extras_api = self.stub_extras_api()
extras_api.usage = self.mox.CreateMockAnything()
extras_api.usage.list('start', 'end').AndReturn(usages)
self.mox.ReplayAll()
ret_val = api.usage_list(self.request, 'start', 'end')
self.assertEqual(len(ret_val), len(usages))
for usage in ret_val:
self.assertIsInstance(usage, api.Usage)
self.assertIn(usage._apiresource, usages)
self.mox.VerifyAll()
def test_server_get(self):
INSTANCE_ID = '2'
extras_api = self.stub_extras_api()
extras_api.servers = self.mox.CreateMockAnything()
extras_api.servers.get(INSTANCE_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_get(self.request, INSTANCE_ID)
self.assertIsInstance(ret_val, api.Server)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class VolumeTests(APITestCase):
def setUp(self):
super(VolumeTests, self).setUp()
volume = api.Volume(APIResource.get_instance())
volume.id = 1
volume.displayName = "displayName"
volume.attachments = [{"device": "/dev/vdb",
"serverId": 1,
"id": 1,
"volumeId": 1}]
self.volume = volume
self.volumes = [volume, ]
self.novaclient = self.stub_novaclient()
self.novaclient.volumes = self.mox.CreateMockAnything()
def test_volume_list(self):
self.novaclient.volumes.list().AndReturn(self.volumes)
self.mox.ReplayAll()
volumes = api.volume_list(self.request)
self.assertIsInstance(volumes[0], api.Volume)
self.mox.VerifyAll()
def test_volume_get(self):
self.novaclient.volumes.get(IsA(int)).AndReturn(self.volume)
self.mox.ReplayAll()
volume = api.volume_get(self.request, 1)
self.assertIsInstance(volume, api.Volume)
self.mox.VerifyAll()
def test_volume_instance_list(self):
self.novaclient.volumes.get_server_volumes(IsA(int)).AndReturn(
self.volume.attachments)
self.mox.ReplayAll()
attachments = api.volume_instance_list(self.request, 1)
self.assertEqual(attachments, self.volume.attachments)
self.mox.VerifyAll()
def test_volume_create(self):
self.novaclient.volumes.create(IsA(int), IsA(str), IsA(str)).AndReturn(
self.volume)
self.mox.ReplayAll()
new_volume = api.volume_create(self.request,
10,
"new volume",
"new description")
self.assertIsInstance(new_volume, api.Volume)
self.mox.VerifyAll()
def test_volume_delete(self):
self.novaclient.volumes.delete(IsA(int))
self.mox.ReplayAll()
ret_val = api.volume_delete(self.request, 1)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_volume_attach(self):
self.novaclient.volumes.create_server_volume(
IsA(int), IsA(int), IsA(str))
self.mox.ReplayAll()
ret_val = api.volume_attach(self.request, 1, 1, "/dev/vdb")
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_volume_detach(self):
self.novaclient.volumes.delete_server_volume(IsA(int), IsA(int))
self.mox.ReplayAll()
ret_val = api.volume_detach(self.request, 1, 1)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
class APIExtensionTests(APITestCase):
def setUp(self):
super(APIExtensionTests, self).setUp()
keypair = api.KeyPair(APIResource.get_instance())
keypair.id = 1
keypair.name = TEST_RETURN
self.keypair = keypair
self.keypairs = [keypair, ]
floating_ip = api.FloatingIp(APIResource.get_instance())
floating_ip.id = 1
floating_ip.fixed_ip = '10.0.0.4'
floating_ip.instance_id = 1
floating_ip.ip = '58.58.58.58'
self.floating_ip = floating_ip
self.floating_ips = [floating_ip, ]
server = api.Server(APIResource.get_instance(), self.request)
server.id = 1
self.server = server
self.servers = [server, ]
def test_server_snapshot_create(self):
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.create_image(IsA(int), IsA(str)).\
AndReturn(self.server)
self.mox.ReplayAll()
server = api.snapshot_create(self.request, 1, 'test-snapshot')
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_tenant_floating_ip_list(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.list().AndReturn(self.floating_ips)
self.mox.ReplayAll()
floating_ips = api.tenant_floating_ip_list(self.request)
self.assertEqual(len(floating_ips), len(self.floating_ips))
self.assertIsInstance(floating_ips[0], api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_get(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_get(self.request, 1)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_allocate(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.create().AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_allocate(self.request)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_release(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.delete(1).AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_release(self.request, 1)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_server_remove_floating_ip(self):
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers.get(IsA(int)).AndReturn(self.server)
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
novaclient.servers.remove_floating_ip(IsA(self.server.__class__),
IsA(self.floating_ip.__class__)) \
.AndReturn(self.server)
self.mox.ReplayAll()
server = api.server_remove_floating_ip(self.request, 1, 1)
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_server_add_floating_ip(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(IsA(int)).AndReturn(self.server)
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
novaclient.servers.add_floating_ip(IsA(self.server.__class__),
IsA(self.floating_ip.__class__)) \
.AndReturn(self.server)
self.mox.ReplayAll()
server = api.server_add_floating_ip(self.request, 1, 1)
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_keypair_create(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.create(IsA(str)).AndReturn(self.keypair)
self.mox.ReplayAll()
ret_val = api.keypair_create(self.request, TEST_RETURN)
self.assertIsInstance(ret_val, api.KeyPair)
self.assertEqual(ret_val.name, self.keypair.name)
self.mox.VerifyAll()
def test_keypair_import(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.create(IsA(str), IsA(str)).AndReturn(self.keypair)
self.mox.ReplayAll()
ret_val = api.keypair_import(self.request, TEST_RETURN, TEST_RETURN)
self.assertIsInstance(ret_val, api.KeyPair)
self.assertEqual(ret_val.name, self.keypair.name)
self.mox.VerifyAll()
def test_keypair_delete(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.delete(IsA(int))
self.mox.ReplayAll()
ret_val = api.keypair_delete(self.request, self.keypair.id)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_keypair_list(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.list().AndReturn(self.keypairs)
self.mox.ReplayAll()
ret_val = api.keypair_list(self.request)
self.assertEqual(len(ret_val), len(self.keypairs))
for keypair in ret_val:
self.assertIsInstance(keypair, api.KeyPair)
self.mox.VerifyAll()
class GlanceApiTests(APITestCase):
def stub_glance_api(self, count=1):
self.mox.StubOutWithMock(api, 'glance_api')
glance_api = self.mox.CreateMock(glance_client.Client)
glance_api.token = TEST_TOKEN
for i in range(count):
api.glance_api(IsA(http.HttpRequest)).AndReturn(glance_api)
return glance_api
def test_get_glance_api(self):
self.mox.StubOutClassWithMocks(glance_client, 'Client')
client_instance = glance_client.Client(TEST_HOSTNAME, TEST_PORT,
auth_tok=TEST_TOKEN)
# Normally ``auth_tok`` is set in ``Client.__init__``, but mox doesn't
# duplicate that behavior so we set it manually.
client_instance.auth_tok = TEST_TOKEN
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'image').AndReturn(TEST_URL)
self.mox.ReplayAll()
ret_val = api.glance_api(self.request)
self.assertIsNotNone(ret_val)
self.assertEqual(ret_val.auth_tok, TEST_TOKEN)
self.mox.VerifyAll()
def test_image_create(self):
IMAGE_FILE = 'someData'
IMAGE_META = {'metadata': 'foo'}
glance_api = self.stub_glance_api()
glance_api.add_image(IMAGE_META, IMAGE_FILE).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_create(self.request, IMAGE_META, IMAGE_FILE)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
self.mox.VerifyAll()
def test_image_delete(self):
IMAGE_ID = '1'
glance_api = self.stub_glance_api()
glance_api.delete_image(IMAGE_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_delete(self.request, IMAGE_ID)
self.assertEqual(ret_val, TEST_RETURN)
self.mox.VerifyAll()
def test_image_get(self):
IMAGE_ID = '1'
glance_api = self.stub_glance_api()
glance_api.get_image(IMAGE_ID).AndReturn([TEST_RETURN])
self.mox.ReplayAll()
ret_val = api.image_get(self.request, IMAGE_ID)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
def test_image_list_detailed(self):
images = (TEST_RETURN, TEST_RETURN + '2')
glance_api = self.stub_glance_api()
glance_api.get_images_detailed().AndReturn(images)
self.mox.ReplayAll()
ret_val = api.image_list_detailed(self.request)
self.assertEqual(len(ret_val), len(images))
for image in ret_val:
self.assertIsInstance(image, api.Image)
self.assertIn(image._apidict, images)
self.mox.VerifyAll()
def test_image_update(self):
IMAGE_ID = '1'
IMAGE_META = {'metadata': 'foobar'}
glance_api = self.stub_glance_api(count=2)
glance_api.update_image(IMAGE_ID, image_meta={}).AndReturn(TEST_RETURN)
glance_api.update_image(IMAGE_ID,
image_meta=IMAGE_META).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_update(self.request, IMAGE_ID)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
ret_val = api.image_update(self.request,
IMAGE_ID,
image_meta=IMAGE_META)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
self.mox.VerifyAll()
class SwiftApiTests(APITestCase):
def setUp(self):
self.mox = mox.Mox()
self.request = http.HttpRequest()
self.request.session = dict()
self.request.session['token'] = TEST_TOKEN
def tearDown(self):
self.mox.UnsetStubs()
def stub_swift_api(self, count=1):
self.mox.StubOutWithMock(api, 'swift_api')
swift_api = self.mox.CreateMock(cloudfiles.connection.Connection)
for i in range(count):
api.swift_api(IsA(http.HttpRequest)).AndReturn(swift_api)
return swift_api
def test_swift_get_containers(self):
containers = (TEST_RETURN, TEST_RETURN + '2')
swift_api = self.stub_swift_api()
swift_api.get_all_containers(limit=10000,
marker=None).AndReturn(containers)
self.mox.ReplayAll()
ret_val = api.swift_get_containers(self.request)
self.assertEqual(len(ret_val), len(containers))
for container in ret_val:
self.assertIsInstance(container, api.Container)
self.assertIn(container._apiresource, containers)
self.mox.VerifyAll()
def test_swift_create_container(self):
NAME = 'containerName'
swift_api = self.stub_swift_api()
self.mox.StubOutWithMock(api, 'swift_container_exists')
api.swift_container_exists(self.request,
NAME).AndReturn(False)
swift_api.create_container(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_create_container(self.request, NAME)
self.assertIsInstance(ret_val, api.Container)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_swift_delete_container(self):
NAME = 'containerName'
swift_api = self.stub_swift_api()
swift_api.delete_container(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_delete_container(self.request, NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_get_objects(self):
NAME = 'containerName'
swift_objects = (TEST_RETURN, TEST_RETURN + '2')
container = self.mox.CreateMock(cloudfiles.container.Container)
container.get_objects(limit=10000,
marker=None,
prefix=None).AndReturn(swift_objects)
swift_api = self.stub_swift_api()
swift_api.get_container(NAME).AndReturn(container)
self.mox.ReplayAll()
ret_val = api.swift_get_objects(self.request, NAME)
self.assertEqual(len(ret_val), len(swift_objects))
for swift_object in ret_val:
self.assertIsInstance(swift_object, api.SwiftObject)
self.assertIn(swift_object._apiresource, swift_objects)
self.mox.VerifyAll()
def test_swift_get_objects_with_prefix(self):
NAME = 'containerName'
PREFIX = 'prefacedWith'
swift_objects = (TEST_RETURN, TEST_RETURN + '2')
container = self.mox.CreateMock(cloudfiles.container.Container)
container.get_objects(limit=10000,
marker=None,
prefix=PREFIX).AndReturn(swift_objects)
swift_api = self.stub_swift_api()
swift_api.get_container(NAME).AndReturn(container)
self.mox.ReplayAll()
ret_val = api.swift_get_objects(self.request,
NAME,
prefix=PREFIX)
self.assertEqual(len(ret_val), len(swift_objects))
for swift_object in ret_val:
self.assertIsInstance(swift_object, api.SwiftObject)
self.assertIn(swift_object._apiresource, swift_objects)
self.mox.VerifyAll()
def test_swift_upload_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
OBJECT_DATA = 'someData'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.storage_object.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.create_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.write(OBJECT_DATA).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_upload_object(self.request,
CONTAINER_NAME,
OBJECT_NAME,
OBJECT_DATA)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_delete_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.delete_object(OBJECT_NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_delete_object(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_get_object_data(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
OBJECT_DATA = 'objectData'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.storage_object.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.stream().AndReturn(OBJECT_DATA)
self.mox.ReplayAll()
ret_val = api.swift_get_object_data(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertEqual(ret_val, OBJECT_DATA)
self.mox.VerifyAll()
def test_swift_object_exists(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
self.mox.ReplayAll()
ret_val = api.swift_object_exists(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertTrue(ret_val)
self.mox.VerifyAll()
def test_swift_copy_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
self.mox.StubOutWithMock(api, 'swift_object_exists')
swift_object = self.mox.CreateMock(cloudfiles.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
api.swift_object_exists(self.request,
CONTAINER_NAME,
OBJECT_NAME).AndReturn(False)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.copy_to(CONTAINER_NAME, OBJECT_NAME)
self.mox.ReplayAll()
ret_val = api.swift_copy_object(self.request, CONTAINER_NAME,
OBJECT_NAME, CONTAINER_NAME,
OBJECT_NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
|
genius1611/horizon
|
django-openstack/django_openstack/tests/api_tests.py
|
Python
|
apache-2.0
| 52,607
|
# -*- coding: utf-8 -*-
"""Definition of modules for collecting data from GRR hosts."""
import datetime
import os
import re
import threading
import time
import zipfile
from grr_api_client import errors as grr_errors
from grr_response_proto import flows_pb2, timeline_pb2
from dftimewolf.lib.collectors.grr_base import GRRBaseModule
from dftimewolf.lib.containers import containers
from dftimewolf.lib.errors import DFTimewolfError
from dftimewolf.lib.modules import manager as modules_manager
# TODO: GRRFlow should be extended by classes that actually implement
# the Process() method.
class GRRFlow(GRRBaseModule): # pylint: disable=abstract-method
"""Launches and collects GRR flows.
Modules that use GRR flows or interact with hosts should extend this class.
Attributes:
keepalive (bool): True if the GRR keepalive functionality should be used.
"""
_CHECK_APPROVAL_INTERVAL_SEC = 10
_CHECK_FLOW_INTERVAL_SEC = 10
_CLIENT_ID_REGEX = re.compile(r'^c\.[0-9a-f]{16}$', re.IGNORECASE)
def __init__(self, state, critical=False):
"""Initializes a GRR flow module.
Args:
state (DFTimewolfState): recipe state.
critical (Optional[bool]): True if the module is critical, which causes
the entire recipe to fail if the module encounters an error.
"""
super(GRRFlow, self).__init__(state, critical=critical)
self.keepalive = False
# TODO: change object to more specific GRR type information.
def _GetClientByHostname(self, hostname):
"""Searches GRR by hostname and get the latest active client.
Args:
hostname (str): hostname to search for.
Returns:
object: GRR API Client object
Raises:
DFTimewolfError: if no client ID found for hostname.
"""
# Search for the hostname in GRR
print('Searching for client: {0:s}'.format(hostname))
try:
search_result = self.grr_api.SearchClients(hostname)
except grr_errors.UnknownError as exception:
self.state.AddError('Could not search for host {0:s}: {1!s}'.format(
hostname, exception
), critical=True)
return None
result = []
for client in search_result:
if hostname.lower() in client.data.os_info.fqdn.lower():
result.append((client.data.last_seen_at, client))
if not result:
self.state.AddError('Could not get client_id for {0:s}'.format(
hostname), critical=True)
return None
last_seen, client = sorted(result, key=lambda x: x[0], reverse=True)[0]
# Remove microseconds and create datetime object
last_seen_datetime = datetime.datetime.utcfromtimestamp(
last_seen / 1000000)
# Timedelta between now and when the client was last seen, in minutes.
# First, count total seconds. This will return a float.
last_seen_seconds = (
datetime.datetime.utcnow() - last_seen_datetime).total_seconds()
last_seen_minutes = int(round(last_seen_seconds / 60))
print('{0:s}: Found active client'.format(client.client_id))
print('Found active client: {0:s}'.format(client.client_id))
print('Client last seen: {0:s} ({1:d} minutes ago)'.format(
last_seen_datetime.strftime('%Y-%m-%dT%H:%M:%S+0000'),
last_seen_minutes))
return client
# TODO: change object to more specific GRR type information.
def _FindClients(self, hosts):
"""Finds GRR clients given a list of hosts.
Args:
hosts (list[str]): FQDNs of hosts.
Returns:
list[object]: GRR client objects.
"""
# TODO(tomchop): Thread this
clients = []
for host in hosts:
clients.append(self._GetClientByHostname(host))
return [client for client in clients if client is not None]
# TODO: change object to more specific GRR type information.
def _LaunchFlow(self, client, name, args):
"""Creates the specified flow, setting KeepAlive if requested.
Args:
client (object): GRR Client object on which to launch the flow.
name (str): name of the GRR flow.
args (object): arguments specific for type of flow, as defined in GRR
flow proto (FlowArgs).
Returns:
str: GRR identifier for launched flow, or an empty string if flow could
not be launched.
"""
# Start the flow and get the flow ID
flow = self._WrapGRRRequestWithApproval(
client, client.CreateFlow, name=name, args=args)
if not flow:
return ''
flow_id = flow.flow_id
print('{0:s}: Scheduled'.format(flow_id))
if self.keepalive:
keepalive_flow = client.CreateFlow(
name='KeepAlive', args=flows_pb2.KeepAliveArgs())
print('KeepAlive Flow:{0:s} scheduled'.format(keepalive_flow.flow_id))
return flow_id
# TODO: change object to more specific GRR type information.
def _AwaitFlow(self, client, flow_id):
"""Waits for a specific GRR flow to complete.
Args:
client (object): GRR Client object in which to await the flow.
flow_id (str): GRR identifier of the flow to await.
Raises:
DFTimewolfError: if flow error encountered.
"""
print('{0:s}: Waiting to finish'.format(flow_id))
while True:
try:
status = client.Flow(flow_id).Get().data
except grr_errors.UnknownError:
msg = 'Unable to stat flow {0:s} for host {1:s}'.format(
flow_id, client.data.os_info.fqdn.lower())
self.state.AddError(msg)
raise DFTimewolfError(
'Unable to stat flow {0:s} for host {1:s}'.format(
flow_id, client.data.os_info.fqdn.lower()))
if status.state == flows_pb2.FlowContext.ERROR:
# TODO(jbn): If one artifact fails, what happens? Test.
message = status.context.backtrace
if 'ArtifactNotRegisteredError' in status.context.backtrace:
message = status.context.backtrace.split('\n')[-2]
raise DFTimewolfError(
'{0:s}: FAILED! Message from GRR:\n{1:s}'.format(
flow_id, message))
if status.state == flows_pb2.FlowContext.TERMINATED:
print('{0:s}: Complete'.format(flow_id))
break
time.sleep(self._CHECK_FLOW_INTERVAL_SEC)
# TODO: change object to more specific GRR type information.
def _DownloadFiles(self, client, flow_id):
"""Download files from the specified flow.
Args:
client (object): GRR Client object to which to download flow data from.
flow_id (str): GRR identifier of the flow.
Returns:
str: path of downloaded files.
"""
output_file_path = os.path.join(
self.output_path, '.'.join((flow_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
flow = client.Flow(flow_id)
file_archive = flow.GetFilesArchive()
file_archive.WriteToFile(output_file_path)
# Unzip archive for processing and remove redundant zip
fqdn = client.data.os_info.fqdn.lower()
client_output_file = os.path.join(self.output_path, fqdn)
if not os.path.isdir(client_output_file):
os.makedirs(client_output_file)
with zipfile.ZipFile(output_file_path) as archive:
archive.extractall(path=client_output_file)
os.remove(output_file_path)
return client_output_file
class GRRArtifactCollector(GRRFlow):
"""Artifact collector for GRR flows.
Attributes:
artifacts (list[str]): artifact definition names.
extra_artifacts (list[str]): extra artifact definition names.
hostnames (list[str]): FDQNs of the GRR client hosts.
use_tsk (bool): True if GRR should use Sleuthkit (TSK) to collect file
system artifacts.
"""
_DEFAULT_ARTIFACTS_LINUX = [
'LinuxAuditLogs', 'LinuxAuthLogs', 'LinuxCronLogs', 'LinuxWtmp',
'AllUsersShellHistory', 'ZeitgeistDatabase'
]
_DEFAULT_ARTIFACTS_DARWIN = [
'MacOSRecentItems', 'MacOSBashHistory', 'MacOSLaunchAgentsPlistFiles',
'MacOSAuditLogFiles', 'MacOSSystemLogFiles', 'MacOSAppleSystemLogFiles',
'MacOSMiscLogs', 'MacOSSystemInstallationTime', 'MacOSQuarantineEvents',
'MacOSLaunchDaemonsPlistFiles', 'MacOSInstallationHistory',
'MacOSUserApplicationLogs', 'MacOSInstallationLogFile'
]
_DEFAULT_ARTIFACTS_WINDOWS = [
'WindowsAppCompatCache', 'WindowsEventLogs', 'WindowsPrefetchFiles',
'WindowsScheduledTasks', 'WindowsSearchDatabase',
'WindowsSuperFetchFiles', 'WindowsSystemRegistryFiles',
'WindowsUserRegistryFiles', 'WindowsXMLEventLogTerminalServices'
]
artifact_registry = {
'Linux': _DEFAULT_ARTIFACTS_LINUX,
'Darwin': _DEFAULT_ARTIFACTS_DARWIN,
'Windows': _DEFAULT_ARTIFACTS_WINDOWS
}
def __init__(self, state):
super(GRRArtifactCollector, self).__init__(state)
self._clients = []
self.artifacts = []
self.extra_artifacts = []
self.hostnames = None
self.use_tsk = False
# pylint: disable=arguments-differ,too-many-arguments
def SetUp(self,
hosts, artifacts, extra_artifacts, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR artifact collector.
Args:
hosts (str): comma-separated hostnames to launch the flow on.
artifacts (str): comma-separated artifact definition names.
extra_artifacts (str): comma-separated extra artifact definition names.
use_tsk (bool): True if GRR should use Sleuthkit (TSK) to collect file
system artifacts.
reason (str): justification for GRR access.
grr_server_url (str): GRR server URL.
grr_username (str): GRR username.
grr_password (str): GRR password.
approvers (Optional[str]): list of GRR approval recipients.
verify (Optional[bool]): True to indicate GRR server's x509 certificate
should be verified.
"""
super(GRRArtifactCollector, self).SetUp(
reason, grr_server_url, grr_username, grr_password, approvers=approvers,
verify=verify)
if artifacts is not None:
self.artifacts = [item.strip() for item in artifacts.strip().split(',')]
if extra_artifacts is not None:
self.extra_artifacts = [item.strip() for item
in extra_artifacts.strip().split(',')]
self.hostnames = [item.strip() for item in hosts.strip().split(',')]
self.use_tsk = use_tsk
# TODO: change object to more specific GRR type information.
def _ProcessThread(self, client):
"""Processes a single GRR client.
This function is used as a callback for the processing thread.
Args:
client (object): a GRR client object.
"""
system_type = client.data.os_info.system
print('System type: {0:s}'.format(system_type))
# If the list is supplied by the user via a flag, honor that.
artifact_list = []
if self.artifacts:
print('Artifacts to be collected: {0!s}'.format(self.artifacts))
artifact_list = self.artifacts
else:
default_artifacts = self.artifact_registry.get(system_type, None)
if default_artifacts:
print('Collecting default artifacts for {0:s}: {1:s}'.format(
system_type, ', '.join(default_artifacts)))
artifact_list.extend(default_artifacts)
if self.extra_artifacts:
print('Throwing in an extra {0!s}'.format(self.extra_artifacts))
artifact_list.extend(self.extra_artifacts)
artifact_list = list(set(artifact_list))
if not artifact_list:
return
flow_args = flows_pb2.ArtifactCollectorFlowArgs(
artifact_list=artifact_list,
use_tsk=self.use_tsk,
ignore_interpolation_errors=True,
apply_parsers=False)
flow_id = self._LaunchFlow(client, 'ArtifactCollectorFlow', flow_args)
if not flow_id:
msg = 'Flow could not be launched on {0:s}.'.format(client.client_id)
msg += '\nArtifactCollectorFlow args: {0!s}'.format(flow_args)
self.state.AddError(msg, critical=True)
return
self._AwaitFlow(client, flow_id)
collected_flow_data = self._DownloadFiles(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
container = containers.File(
name=client.data.os_info.fqdn.lower(),
path=collected_flow_data
)
self.state.StoreContainer(container)
def Process(self):
"""Collects artifacts from a host with GRR.
Raises:
DFTimewolfError: if no artifacts specified nor resolved by platform.
"""
threads = []
for client in self._FindClients(self.hostnames):
print(client)
thread = threading.Thread(target=self._ProcessThread, args=(client, ))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
class GRRFileCollector(GRRFlow):
"""File collector for GRR flows.
Attributes:
files (list[str]): file paths.
hostnames (list[str]): FDQNs of the GRR client hosts.
use_tsk (bool): True if GRR should use Sleuthkit (TSK) to collect files.
action (FileFinderAction): Enum denoting action to take.
"""
_ACTIONS = {'download': flows_pb2.FileFinderAction.DOWNLOAD,
'hash': flows_pb2.FileFinderAction.HASH,
'stat': flows_pb2.FileFinderAction.STAT,
}
def __init__(self, state):
super(GRRFileCollector, self).__init__(state)
self._clients = []
self.files = []
self.hostnames = None
self.use_tsk = False
self.action = None
# pylint: disable=arguments-differ,too-many-arguments
def SetUp(self,
hosts, files, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True, action='download'):
"""Initializes a GRR file collector.
Args:
hosts (str): comma-separated hostnames to launch the flow on.
files (str): comma-separated file paths.
use_tsk (bool): True if GRR should use Sleuthkit (TSK) to collect files.
reason (str): justification for GRR access.
grr_server_url (str): GRR server URL.
grr_username (str): GRR username.
grr_password (str): GRR password.
approvers (Optional[str]): list of GRR approval recipients.
verify (Optional[bool]): True to indicate GRR server's x509 certificate
should be verified.
action (Optional[str]): Action (download/hash/stat) (default: download).
"""
super(GRRFileCollector, self).SetUp(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
if files is not None:
self.files = [item.strip() for item in files.strip().split(',')]
self.hostnames = [item.strip() for item in hosts.strip().split(',')]
self.use_tsk = use_tsk
if action.lower() in self._ACTIONS:
self.action = self._ACTIONS[action.lower()]
if self.action is None:
self.state.AddError("Invalid action {0!s}".format(action),
critical=True)
# TODO: change object to more specific GRR type information.
def _ProcessThread(self, client):
"""Processes a single client.
This function is used as a callback for the processing thread.
Args:
client (object): GRR client object to act on.
"""
file_list = self.files
if not file_list:
return
print('Filefinder to collect {0:d} items'.format(len(file_list)))
flow_action = flows_pb2.FileFinderAction(
action_type=self.action)
flow_args = flows_pb2.FileFinderArgs(
paths=file_list,
action=flow_action,)
flow_id = self._LaunchFlow(client, 'FileFinder', flow_args)
self._AwaitFlow(client, flow_id)
collected_flow_data = self._DownloadFiles(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
container = containers.File(
name=client.data.os_info.fqdn.lower(),
path=collected_flow_data
)
self.state.StoreContainer(container)
def Process(self):
"""Collects files from a host with GRR.
Raises:
DFTimewolfError: if no files specified.
"""
threads = []
for client in self._FindClients(self.hostnames):
thread = threading.Thread(target=self._ProcessThread, args=(client, ))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
class GRRFlowCollector(GRRFlow):
"""Flow collector.
Attributes:
client_id (str): GRR identifier of the client.
flow_id (str): GRR identifier of the flow to retrieve.
host (str): Target of GRR collection.
"""
def __init__(self, state):
super(GRRFlowCollector, self).__init__(state)
self.client_id = None
self.flow_id = None
self.host = None
# pylint: disable=arguments-differ
def SetUp(self,
host, flow_id,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR flow collector.
Args:
host (str): hostname of machine.
flow_id (str): GRR identifier of the flow to retrieve.
reason (str): justification for GRR access.
grr_server_url (str): GRR server URL.
grr_username (str): GRR username.
grr_password (str): GRR password.
approvers (Optional[str]): list of GRR approval recipients.
verify (Optional[bool]): True to indicate GRR server's x509 certificate
should be verified.
"""
super(GRRFlowCollector, self).SetUp(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
self.flow_id = flow_id
self.host = host
def Process(self):
"""Downloads the results of a GRR collection flow.
Raises:
DFTimewolfError: if no files specified
"""
client = self._GetClientByHostname(self.host)
self._AwaitFlow(client, self.flow_id)
collected_flow_data = self._DownloadFiles(client, self.flow_id)
if collected_flow_data:
print('{0:s}: Downloaded: {1:s}'.format(
self.flow_id, collected_flow_data))
container = containers.File(
name=client.data.os_info.fqdn.lower(),
path=collected_flow_data
)
self.state.StoreContainer(container)
class GRRTimelineCollector(GRRFlow):
"""Timeline collector for GRR flows.
Attributes:
root_path (str): root path.
hostnames (list[str]): FDQNs of the GRR client hosts.
"""
def __init__(self, state):
super(GRRTimelineCollector, self).__init__(state)
self._clients = []
self.root_path = None
self.hostnames = None
self._timeline_format = None
# We're overriding the behavior of GRRFlow's SetUp function to include new
# parameters.
# pylint: disable=arguments-differ
def SetUp(self,
hosts, root_path,
reason, timeline_format, grr_server_url, grr_username, grr_password,
approvers=None, verify=True):
"""Initializes a GRR timeline collector.
Args:
hosts (str): comma-separated hostnames to launch the flow on.
root_path (str): path to start the recursive timeline.
reason (str): justification for GRR access.
timeline_format (str): Timeline format (1 is BODY, 2 is RAW).
grr_server_url (str): GRR server URL.
grr_username (str): GRR username.
grr_password (str): GRR password.
approvers (Optional[str]): list of GRR approval recipients.
verify (Optional[bool]): True to indicate GRR server's x509 certificate
should be verified.
"""
super(GRRTimelineCollector, self).SetUp(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
if root_path is not None:
self.root_path = root_path.strip()
self.hostnames = [item.strip() for item in hosts.strip().split(',')]
self._timeline_format = int(timeline_format)
self.root_path = root_path.encode()
if self._timeline_format not in [1, 2]:
self.state.AddError('Timeline format must be 1 (BODY) or 2 (RAW).', True)
# TODO: change object to more specific GRR type information.
def _ProcessThread(self, client):
"""Processes a single client.
This function is used as a callback for the processing thread.
Args:
client (object): GRR client object to act on.
"""
root_path = self.root_path
if not root_path:
return
print('Timeline to start from \'{0:s}\' items'.format(root_path.decode()))
timeline_args = timeline_pb2.TimelineArgs(root=root_path,)
flow_id = self._LaunchFlow(client, 'TimelineFlow', timeline_args)
self._AwaitFlow(client, flow_id)
collected_flow_data = self._DownloadTimeline(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
container = containers.File(
name=client.data.os_info.fqdn.lower(),
path=collected_flow_data
)
self.state.StoreContainer(container)
def Process(self):
"""Collects a timeline from a host with GRR.
Raises:
DFTimewolfError: if no files specified.
"""
threads = []
for client in self._FindClients(self.hostnames):
thread = threading.Thread(target=self._ProcessThread, args=(client, ))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
def _DownloadTimeline(self, client, flow_id):
"""Download a timeline in BODY format from the specified flow.
Args:
client (object): GRR Client object to which to download flow data from.
flow_id (str): GRR identifier of the flow.
Returns:
str: path of downloaded files.
"""
extension = 'body' if self._timeline_format == 1 else 'raw'
output_file_path = os.path.join(
self.output_path, '.'.join((flow_id, extension)))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
flow = client.Flow(flow_id)
timeline = flow.GetCollectedTimeline(self._timeline_format)
timeline.WriteToFile(output_file_path)
return output_file_path
modules_manager.ModulesManager.RegisterModules([
GRRArtifactCollector,
GRRFileCollector,
GRRFlowCollector,
GRRTimelineCollector])
|
Onager/dftimewolf
|
dftimewolf/lib/collectors/grr_hosts.py
|
Python
|
apache-2.0
| 22,227
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 30 17:32:10 2017
@author: giangblackk
"""
from osgeo import ogr, osr
import networkx as nx
import numpy as np
def calculateGeometryLength(pointList, sourceSRS, destSRS):
line = ogr.Geometry(ogr.wkbLineString)
transform = osr.CoordinateTransformation(sourceSRS,destSRS)
for point in pointList:
line.AddPoint(point[0],point[1])
line.Transform(transform)
return line.Length()
# target srs for road length computation
target_srs = osr.SpatialReference()
target_srs.ImportFromProj4('+proj=utm +zone=48 +ellps=WGS84 +datum=WGS84 +units=m +no_defs ')
# read source dataset
highwayFileName = './roaddata/highway_line_singlepart.shp'
dataSource = ogr.Open(highwayFileName)
layer = dataSource.GetLayer(0)
source_srs = layer.GetSpatialRef()
featureCount = layer.GetFeatureCount()
print('featureCount: ', featureCount)
# layer.SetAttributeFilter("ONEWAY NOT IN ('yes', 'no','-1')")
# layer.SetAttributeFilter("ONEWAY IN ('-1','yes','no')")
# get attribute list
attributeList = []
layerDefinition = layer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
fieldName = layerDefinition.GetFieldDefn(i).GetName()
attributeList.append(fieldName)
attributeList.remove('TOLL')
attributeList.remove('TRACKTYPE')
attributeList.remove('DISUSED')
# create graph
G = nx.DiGraph()
nodeList = []
i = 0
for feature in layer:
geometry = feature.geometry()
geometry_projected = geometry.Clone()
geometry_projected.TransformTo(target_srs)
feature_length = geometry_projected.Length()
pointCount = geometry.GetPointCount()
pointList = geometry.GetPoints()
### first point ###########################################################
firstPoint = pointList[0]
if not firstPoint in nodeList:
nodeList.append(firstPoint)
G.add_node(i, lng=firstPoint[0], lat=firstPoint[1])
firstNodeID = i
i = i + 1
else:
for nodeidx in G.nodes_iter():
if G.node[nodeidx]['lng'] == firstPoint[0] and G.node[nodeidx]['lat'] == firstPoint[1]:
firstNodeID = nodeidx
### last point ############################################################
lastPoint = pointList[-1]
if not lastPoint in nodeList:
nodeList.append(lastPoint)
G.add_node(i, lng=lastPoint[0], lat=lastPoint[1])
lastNodeID = i
i = i + 1
else:
for nodeidx in G.nodes_iter():
if G.node[nodeidx]['lng'] == lastPoint[0] and G.node[nodeidx]['lat'] == lastPoint[1]:
lastNodeID = nodeidx
### if first point is same as last point, remove due to loop ##############
if firstNodeID == lastNodeID or firstPoint == lastPoint:
G.remove_node(firstNodeID)
nodeList.remove(firstPoint)
continue
### add edges between nodes ###############################################
middlePointList = pointList[1:-1]
if firstNodeID in middlePointList or lastNodeID in middlePointList:
# G.remove_node(firstNodeID)
# nodeList.remove(firstPoint)
# G.remove_node(lastNodeID)
# nodeList.remove(lastPoint)
continue
### create link ###########################################################
if feature.GetField('ONEWAY') == '-1':
G.add_edge(lastNodeID, firstNodeID)
for attribute in attributeList:
G[lastNodeID][firstNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''
G[lastNodeID][firstNodeID]['middle'] = middlePointList[::-1]
G[lastNodeID][firstNodeID]['length'] = feature_length
elif feature.GetField('ONEWAY') == 'yes':
G.add_edge(firstNodeID, lastNodeID)
for attribute in attributeList:
G[firstNodeID][lastNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''
G[firstNodeID][lastNodeID]['middle'] = middlePointList
G[firstNodeID][lastNodeID]['length'] = feature_length
else:
G.add_edge(firstNodeID, lastNodeID)
G.add_edge(lastNodeID, firstNodeID)
for attribute in attributeList:
G[firstNodeID][lastNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''
G[lastNodeID][firstNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''
G[firstNodeID][lastNodeID]['middle'] = middlePointList
G[lastNodeID][firstNodeID]['middle'] = middlePointList[::-1]
G[firstNodeID][lastNodeID]['length'] = feature_length
G[lastNodeID][firstNodeID]['length'] = feature_length
### intersect processing ##################################################
for edge in G.edges():
headID = edge[0]
tailID = edge[1]
attributeDict = G[headID][tailID]
middle = attributeDict['middle']
if firstPoint in middle:
if headID == firstNodeID or firstNodeID == tailID:
continue
indexFirstPoint = middle.index(firstPoint)
# copy attributes
attributeDictPart1 = attributeDict.copy()
attributeDictPart2 = attributeDict.copy()
# recalculate middle
attributeDictPart1['middle'] = middle[0:indexFirstPoint]
attributeDictPart2['middle'] = middle[indexFirstPoint+1:]
# recalucate length
roadPart1 = [(G.node[headID]['lng'],G.node[headID]['lat'])]
roadPart1.extend(middle[0:indexFirstPoint+1])
roadPart2 = middle[indexFirstPoint:]
roadPart2.append((G.node[tailID]['lng'],G.node[tailID]['lat']))
attributeDictPart1['length'] = calculateGeometryLength(roadPart1,source_srs,target_srs)
attributeDictPart2['length'] = calculateGeometryLength(roadPart2,source_srs,target_srs)
G.remove_edge(headID, tailID)
G.add_edge(headID, firstNodeID, attr_dict=attributeDictPart1)
G.add_edge(firstNodeID, tailID, attr_dict=attributeDictPart2)
elif lastPoint in middle:
if headID == lastNodeID or lastNodeID == tailID:
continue
indexLastPoint = middle.index(lastPoint)
# copy attributes
attributeDictPart1 = attributeDict.copy()
attributeDictPart2 = attributeDict.copy()
# recalculate middle
attributeDictPart1['middle'] = middle[0:indexLastPoint]
attributeDictPart2['middle'] = middle[indexLastPoint+1:]
# recalculate length
roadPart1 = [(G.node[headID]['lng'],G.node[headID]['lat'])]
roadPart1.extend(middle[0:indexLastPoint+1])
roadPart2 = middle[indexLastPoint:]
roadPart2.append((G.node[tailID]['lng'],G.node[tailID]['lat']))
attributeDictPart1['length'] = calculateGeometryLength(roadPart1,source_srs,target_srs)
attributeDictPart2['length'] = calculateGeometryLength(roadPart2,source_srs,target_srs)
G.remove_edge(headID, tailID)
G.add_edge(headID, lastNodeID, attr_dict=attributeDictPart1)
G.add_edge(lastNodeID, tailID, attr_dict=attributeDictPart2)
### remove middle properties ##################################################
for edge in G.edges_iter():
G[edge[0]][edge[1]].pop('middle')
### remove zeros neighbor nodes ###############################################
for node in G.nodes():
if G.in_degree()[node] == 0 and G.out_degree()[node] == 0:
print(node)
G.remove_node(node)
### check if 2 node same lat long #############################################
lat = G.node[0]['lat']
lng = G.node[0]['lng']
sameCount = -1
for i in G.nodes_iter():
if G.node[i]['lat'] == lat and G.node[i]['lng'] == lng:
sameCount += 1
else:
lat = G.node[i]['lat']
lng = G.node[i]['lng']
print('same location Count: ',sameCount)
### check for self loop in result graph #######################################
self_loop_count = 0
for node in G.nodes_iter():
if node in G.neighbors(node):
self_loop_count += 1
print(node, G.neighbors(node))
print('self_loop_count: ', self_loop_count)
# nx.write_gexf(G,'./highway_line_singlepart.gexf')
# nx.write_gexf(G,'./highway_line_singlepart_new_length.gexf')
# nx.write_gexf(G,'./highway_line_singlepart_new_123.gexf')
nx.write_gexf(G,'./graphdata/highway_line_singlepart_new_length.gexf')
# create links between nodes
# add metadata of links
# save graph
# release dataset
layer = None
dataSource = None
|
Giangblackk/hanoi_road_map_analysis
|
preprocess/from_road_to_graph.py
|
Python
|
apache-2.0
| 8,626
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.engine.platform import Platform
from pants.engine.process import FallibleProcessResult, Process
from pants.testutil.rule_runner import QueryRule, RuleRunner
def test_platform_on_local_epr_result() -> None:
rule_runner = RuleRunner(rules=[QueryRule(FallibleProcessResult, (Process,))])
this_platform = Platform.current
process = Process(
argv=("/bin/echo", "test"), description="Run some program that will exit cleanly."
)
result = rule_runner.request(FallibleProcessResult, [process])
assert result.exit_code == 0
assert result.platform == this_platform
|
pantsbuild/pants
|
src/python/pants/engine/platform_test.py
|
Python
|
apache-2.0
| 735
|
# Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Mapping
import tensorflow as tf
from .retrieval_metric import RetrievalMetric
from tensorflow_similarity.types import FloatTensor, IntTensor, BoolTensor
class MapAtK(RetrievalMetric):
"""Mean Average precision - mAP@K is computed as.
$$
mAP_i@K = \frac{\sum_{j = 1}^{K} {rel_i_j}\times{P_i@j}}{R}
$$
Where: K is the number of neighbors in the i_th query result set.
P is the rolling precision over the i_th query result set.
R is the cardinality of the target class.
rel is the relevance mask (indicator function) for the i_th query.
i represents the i_th query.
j represents the j_th ranked query result.
AP@K is biased towards the top ranked results and is a function of the rank
(K), the relevancy mask (rel), and the number of indexed examples for the
class (R). The denominator for the i_th query is set to the number of
indexed examples (R) for the class associated with the i_th query.
For example, if the index has has 100 embedded examples (R) of class 'a',
and our query returns 50 results (K) where the top 10 results are all TPs,
then the AP@50 will be 0.10; however, if instead the bottom 10 ranked
results are all TPs, then the AP@50 will be much lower (0.012) because we
apply a penalty for the 40 FPs that come before the relevant query results.
This metric is useful when we want to ensure that the top ranked results
are relevant to the query; however, it requires that we pass a mapping from
the class id to the number of indexed examples for that class.
Args:
r: A mapping from class id to the number of examples in the index,
e.g., r[4] = 10 represents 10 indexed examples from class 4.
name: Name associated with the metric object, e.g., avg_precision@5
canonical_name: The canonical name associated with metric, e.g.,
avg_precision@K
k: The number of nearest neighbors over which the metric is computed.
distance_threshold: The max distance below which a nearest neighbor is
considered a valid match.
average: {'micro'} Determines the type of averaging performed over the
queries.
* 'micro': Calculates metrics globally over all queries.
"""
def __init__(
self,
r: Mapping[int, int],
name: str = "map",
k: int = 5,
average: str = "micro",
**kwargs,
) -> None:
if average == "macro":
raise ValueError(
"Mean Average Precision only supports micro averaging."
)
if "canonical_name" not in kwargs:
kwargs["canonical_name"] = "map@k"
super().__init__(name=name, k=k, average=average, **kwargs)
self.r = r
def get_config(self):
config = {
"r": self.r,
}
base_config = super().get_config()
return {**base_config, **config}
def compute(
self,
*, # keyword only arguments see PEP-570
query_labels: IntTensor,
match_mask: BoolTensor,
**kwargs,
) -> FloatTensor:
"""Compute the metric
Args:
query_labels: A 1D array of the labels associated with the
embedding queries.
match_mask: A 2D mask where a 1 indicates a match between the
jth query and the kth neighbor and a 0 indicates a mismatch.
**kwargs: Additional compute args
Returns:
A rank 0 tensor containing the metric.
"""
self._check_shape(query_labels, match_mask)
k_slice = tf.cast(match_mask[:, : self.k], dtype="float")
tp = tf.math.cumsum(k_slice, axis=1)
p_at_k = tf.math.divide(tp, tf.range(1, self.k + 1, dtype="float"))
p_at_k = tf.math.multiply(k_slice, p_at_k)
if self.average == "micro":
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
list(self.r.keys()),
list(self.r.values()),
key_dtype=tf.int32,
value_dtype=tf.int32,
),
default_value=-1,
)
class_counts = table.lookup(query_labels)
avg_p_at_k = tf.math.divide(
tf.math.reduce_sum(p_at_k, axis=1),
tf.cast(class_counts, dtype="float"),
)
avg_p_at_k = tf.math.reduce_mean(avg_p_at_k)
else:
raise ValueError(
f"{self.average} is not a supported average option"
)
result: FloatTensor = avg_p_at_k
return result
|
tensorflow/similarity
|
tensorflow_similarity/retrieval_metrics/map_at_k.py
|
Python
|
apache-2.0
| 5,306
|
from src.base.test_cases import TestCases
class ReverseWordsTestCases(TestCases):
def __init__(self):
super(ReverseWordsTestCases, self).__init__()
self.__add_test_case__("Example Test 1", ("the sky is blue"), ("blue is sky the"))
self.__add_test_case__("Test 2", (" "), (""))
self.__add_test_case__("Test 3", (" a b "), ("b a"))
self.__add_test_case__("Test 4", (" a "), ("a"))
self.__add_test_case__("Test 5", (""), (""))
self.__add_test_case__("Test 6", (" 1"), ("1"))
|
hychrisli/PyAlgorithms
|
src/tests/part1/q151_test_reverse_words.py
|
Python
|
apache-2.0
| 539
|
#!/usr/bin/python
import sys
import keystoneclient.v2_0.client
import glance.context
from glance.openstack.common import cfg
import glance.openstack.common.log as logging
import glance.registry.context
import glance.db.sqlalchemy.api as db_api
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.StreamHandler())
LOG.setLevel(logging.DEBUG)
def get_owner_map(ksclient, owner_is_tenant=True):
if owner_is_tenant:
entities = ksclient.tenants.list()
else:
entities = ksclient.users.list()
# build mapping of (user or tenant) name to id
return dict([(entity.name, entity.id) for entity in entities])
def build_image_owner_map(owner_map, db, context):
image_owner_map = {}
for image in db.image_get_all(context):
image_id = image['id']
owner_name = image['owner']
if not owner_name:
LOG.info('Image %s has no owner. Skipping.' % image_id)
continue
try:
owner_id = owner_map[owner_name]
except KeyError:
msg = 'Image %s owner %s was not found. Skipping.'
LOG.error(msg % (image_id, owner_name))
continue
image_owner_map[image_id] = owner_id
msg = 'Image %s owner %s -> %s' % (image_id, owner_name, owner_id)
LOG.info(msg)
return image_owner_map
def update_image_owners(image_owner_map, db, context):
for (image_id, image_owner) in image_owner_map.items():
db.image_update(context, image_id, {'owner': image_owner})
LOG.info('Image %s successfully updated.' % image_id)
if __name__ == "__main__":
config = cfg.CONF
extra_cli_opts = [
cfg.BoolOpt('dry-run',
help='Print output but do not make db changes.'),
cfg.StrOpt('keystone-auth-uri',
help='Authentication endpoint'),
cfg.StrOpt('keystone-admin-tenant-name',
help='Administrative user\'s tenant name'),
cfg.StrOpt('keystone-admin-user',
help='Administrative user\'s id'),
cfg.StrOpt('keystone-admin-password',
help='Administrative user\'s password'),
]
config.register_cli_opts(extra_cli_opts)
config(project='glance', prog='glance-registry')
db_api.configure_db()
context = glance.common.context.RequestContext(is_admin=True)
auth_uri = config.keystone_auth_uri
admin_tenant_name = config.keystone_admin_tenant_name
admin_user = config.keystone_admin_user
admin_password = config.keystone_admin_password
if not (auth_uri and admin_tenant_name and admin_user and admin_password):
LOG.critical('Missing authentication arguments')
sys.exit(1)
ks = keystoneclient.v2_0.client.Client(username=admin_user,
password=admin_password,
tenant_name=admin_tenant_name,
auth_url=auth_uri)
owner_map = get_owner_map(ks, config.owner_is_tenant)
image_updates = build_image_owner_map(owner_map, db_api, context)
if not config.dry_run:
update_image_owners(image_updates, db_api, context)
|
tylertian/Openstack
|
openstack F/glance/tools/migrate_image_owners.py
|
Python
|
apache-2.0
| 3,200
|
#/***********************************************************************
# * Licensed Materials - Property of IBM
# *
# * IBM SPSS Products: Statistics Common
# *
# * (C) Copyright IBM Corp. 1989, 2020
# *
# * US Government Users Restricted Rights - Use, duplication or disclosure
# * restricted by GSA ADP Schedule Contract with IBM Corp.
# ************************************************************************/
# Construct a dataset listing the variables and selected properties for a collection of data files
# 05-23-2008 Original version - JKP
# 04-29-2009 Add file handle support
# 11-16-2009 Protect against UP converting escape sequences with "\" characters
#12-16-2009 Enable translation
__version__ = "1.2.1"
__author__ = "JKP, SPSS"
#try:
#import wingdbstub
#except:
#pass
import spss, os, re, locale
import spssaux
from extension import Template, Syntax
try:
from extension import processcmd
except:
print("""This command requires a newer version of extension.py. Please download it from
SPSS Developer Central and replace the existing file""")
raise
class DataStep(object):
def __enter__(self):
"""initialization for with statement"""
try:
spss.StartDataStep()
except:
spss.Submit("EXECUTE")
spss.StartDataStep()
return self
def __exit__(self, type, value, tb):
spss.EndDataStep()
return False
# The following block of code is for using the gather function as an Extension command.
def Run(args):
"""Execute the GATHERMD command"""
###print args #debug
args = args[list(args.keys())[0]]
helptext=r"""GATHERMD
Create and activate a dataset whose cases are variable names and labels
and, optionally, selected attributes from one or more data files.
GATHERMD list-of-specifications
[/OPTIONS [FILETYPES=*spss sas stata]
[DSNAME=name]
[FILENAMEPATTERN="pattern expression"]]
[ATTRLENGTH=value]
[/ATTRIBUTES list-of-attribute-names]
[HELP].
list-of-specifications is a list of one or more filenames, optionally with paths, and/or directories.
For directories, all appropriate files in the directory and its subdirectories are searched. With version 18
or later, the file specifications can include PASW Statistics file handles.
FILETYPES defaults to SPSS files (.sav and .por).
sas files are .sas7bdat, .sd7, .sd2, .ssd01, and .xpt
stata files are .dta
Files with any of the specified types found in the directories specified are searched. Since
these files are opened in SPSS, if the same file is already open in SPSS, it will be reopened
without saving any changes that may have been made.
DSNAME optionally specifies a dataset name to be assigned to the output dataset.
FILENAMEPATTERN can be specified as a quoted literal containing a regular expression pattern
to be used as a filter on filenames. For example, FILENAMEPATTERN="car" would limit the
files searched to those whose name starts with "car". FILENAMEPATTERN=".*car" would accept
any filenames containing "car". These are not the same as filename wildcards found in many operating systems.
For example, "abc*" will match any name starting with ab: it means literally ab followed by zero or more c's.
The regular expression is not case sensitive, and it is applied to the name of the
file without the extension. For a full explanation of regular expressions, one good source is
http://www.amk.ca/python/howto/regex/
/ATTRIBUTES list-of-names
specifies a list of custom variable attributes to be included in the output dataset. The variable
names will be the attribute names except if they conflict with the built-in variables source,
VariableName, and VariableLabel. If the attribute is not present, the value will be blank.
If the attribute is an array, only the first value is included.
Attribute variables in the output dataset are truncated to the length specified in ATTRLENGTH,
which defaults to 256
/HELP displays this text and does nothing else.
Examples:
GATHERMD "c:/spss17/samples".
gathermd "c:/temp/firstlevel" "c:/spss16/samples/voter.sav" /options filetypes=spss sas
dsname=gathered.
"""
# define the command syntax and enable translation
oobj = Syntax([
Template("", subc="", var="files", ktype="literal", islist=True),
Template("FILETYPES", subc="OPTIONS", var="filetypes", ktype="str", islist=True),
Template("FILENAMEPATTERN", subc="OPTIONS", var="filenamepattern", ktype="literal"),
Template("DSNAME", subc="OPTIONS", var="dsname", ktype="varname"),
Template("ATTRLENGTH", subc="OPTIONS", var="attrlength", ktype="int", vallist=(1, 32767)),
Template("", subc="ATTRIBUTES", var="attrlist", ktype="varname", islist=True)])
global _
try:
_("---")
except:
def _(msg):
return msg
if "HELP" in args:
#print helptext
helper()
else:
processcmd(oobj, args, gather)
#oobj.parsecmd(args, vardict = spssaux.VariableDict())
#gather(**oobj.parsedparams)
def helper():
"""open html help in default browser window
The location is computed from the current module name"""
import webbrowser, os.path
path = os.path.splitext(__file__)[0]
helpspec = "file://" + path + os.path.sep + \
"markdown.html"
# webbrowser.open seems not to work well
browser = webbrowser.get()
if not browser.open_new(helpspec):
print(("Help file not found:" + helpspec))
try: #override
from extension import helper
except:
pass
def gather(files, filetypes=["spss"], filenamepattern=None, dsname=None,attrlist=[], attrlength=256):
"""Create SPSS dataset listing variable names, variable labels, and source files for selected files. Return the name of the new dataset.
files is a list of files and/or directories. If an item is a file, it is processed; if it is a directory, the files and subdirectories
it contains are processed.
filetypes is a list of filetypes to process. It defaults to ["spss"] which covers sav and por. It can also include
"sas" for sas7bdat, sd7, sd2, ssd01, and xpt, and "stata" for dta
filenamepattern is an optional parameter that can contain a regular expression to be applied to the filenames to filter the
datasets that are processed. It is applied to the filename itself, omitting any directory path and file extension. The expression
is anchored to the start of the name and ignores case.
dsname is an optional name to be assigned to the new dataset. If not specified, a name will be automatically generated.
If dsname is specified, it will become the active dataset; otherwise, it need not be the active dataset.
attrlist is an optional list of custom attributes to be included in the output. For array attributes, only the first item is
recorded. The value is blank if the attribute is not present for the variable. Attribute variables are
strings of size attrlength bytes, truncated appropriately.
The output is just a dataset. It must be saved, if desired, after this function has completed.
Its name is the return value of this function.
Exception is raised if any files not found.
Examples:
gathermetadata.gather(["c:/temp/firstlevel", "c:/spss16/samples/voter.sav"], ["spss", "sas"])
searches spss and sas files in or under the temp/firstlevel directory plus the voter file.
gathermetadata.gather(["c:/temp/firstlevel"], filenamepattern="car")
searches the firstlevel directory for spss files whose names start with "car".
"""
encoding = locale.getlocale()[1]
filetypes = [f.lower() for f in filetypes]
for ft in filetypes:
if not ft in ["spss", "sas", "stata"]:
raise ValueError(_("Filetypes must be one or more of spss, sas, and stata."))
dsvars = {"source":"source", "variablename":"VariableName", "variablelabel":"variableLabel"}
with DataStep():
ds = spss.Dataset(name=None)
dsn = ds.name
varlist = ds.varlist
varlist.append("source",200)
varlist["source"].label=_("File containing the variable")
varlist.append("variableName", 64)
varlist["variableName"].label = _("Variable Name")
varlist.append("variableLabel", 256)
varlist["variableLabel"].label = _("Variable Label")
attrindexes = {}
for i, aname in enumerate(attrlist):
anamemod = addunique(dsvars, aname)
varlist.append(dsvars[anamemod], attrlength)
attrindexes[aname.lower()] = i
addvarinfo = makeaddinfo(dsn, filetypes, filenamepattern, dsvars, attrindexes, attrlength) #factory function
files = [fixescapes(f) for f in files] #UP is converting escape characters :-)
# walk the list of files and directories and open
try: # will fail if spssaux is prior to version 2.3
fh = spssaux.FileHandles()
except:
pass
notfound = []
for item in files:
try:
item = fh.resolve(item)
except:
pass
if os.path.isfile(item):
addvarinfo(item)
elif os.path.isdir(item):
for dirpath, dirnames, fnames in os.walk(item):
for f in fnames:
try:
addvarinfo(os.path.join(dirpath, f))
except EnvironmentError as e:
notfound.append(e.args[0])
else:
if not isinstance(item, str):
item = str(item, encoding)
notfound.append(_("Not found: %s") % item)
spss.Submit("DATASET ACTIVATE %s." % dsn)
if not dsname is None:
spss.Submit("DATASET NAME %s." % dsname)
dsn = dsname
if notfound:
raise ValueError("\n".join(notfound))
return dsn
def makeaddinfo(dsname, filetypes, filenamepattern, dsvars, attrindexes, attrlength):
"""create a function to add variable information to a dataset.
dsname is the dataset name to append to.
filetypes is the list of file types to include.
filenamepattern is a regular expression to filter filename roots.
dsvars is a special dictionary of variables and attributes. See function addunique.
attrindexes is a dictionary with keys of lower case attribute names and values as the dataset index starting with 0.
attrlength is the size of the attribute string variables"""
ftdict = {"spss":[".sav", ".por"], "sas":[".sas7bdat",".sd7",".sd2",".ssd01",".xpt"], "stata":[".dta"]}
spsscmd = {"spss":"GET FILE='%s'.", "sas": "GET SAS DATA='%s'.", "stata": "GET STATA FILE='%s'."}
if filenamepattern:
try:
pat = re.compile(filenamepattern, re.IGNORECASE)
except:
raise ValueError(_("Invalid filenamepattern: %s") % filenamepattern)
else:
pat = None
ll = len(dsvars)
includeAttrs = ll > 3
blanks = (ll-3) * [" "]
def addinfo(filespec):
"""open the file if appropriate type, extract variable information, and add it to dataset dsname.
filespec is the file to open
dsname is the dataset name to append to
filetypes is the list of file types to include."""
fnsplit = os.path.split(filespec)[1]
fn, ext = os.path.splitext(fnsplit)
for ft in filetypes:
if ext in ftdict[ft]:
if pat is None or pat.match(fn):
try:
spss.Submit(spsscmd[ft] % filespec)
spss.Submit("DATASET NAME @__GATHERMD__.")
except:
if not isinstance(filespec, str):
filespec = str(filespec, encoding)
raise EnvironmentError(_("File could not be opened, skipping: %s") % filespec)
break
else:
return addinfo
with DataStep():
ds = spss.Dataset(name=dsname) # not the active dataset
dssource = spss.Dataset(name="*") # The dataset to examine
numvars = spss.GetVariableCount() # active dataset
variables = dssource.varlist
for v in range(numvars):
lis = [filespec.replace("\\","/"), spss.GetVariableName(v), spss.GetVariableLabel(v)]
lis.extend(blanks)
lis = [item+ 256*" " for item in lis]
ds.cases.append(lis)
#ds.cases.append([filespec.replace("\\","/"), spss.GetVariableName(v), spss.GetVariableLabel(v), *blanks])
if includeAttrs:
attrs = variables[v].attributes.data
for a in attrs:
if a.lower() in attrindexes:
ds.cases[-1, attrindexes[a.lower()]+ 3] = attrs[a][0] + attrlength * " "# allow for standard variables
spss.Submit("DATASET CLOSE @__GATHERMD__.")
return addinfo
def addunique(dsdict, key):
"""Add modified version of key to dictionary dsdict. Return generated key.
dsdict is a dictionary whose keys will be lower case strings and whose values are unique SPSS variable names.
duplicate keys are ignored.
keys are automatically prefixed with "*" to separate them from variable names that could be identical."""
key1 = "*" + key.lower()
if key1 in dsdict:
return key1
# make a version of key that is unique in the dictionary values and a legal variable name length
i=0
keymod = spssaux.truncatestring(key, 64)
while keymod.lower() in [k.lower() for k in list(dsdict.values())]:
keymod = spssaux.truncatestring(key, 59) + "_" + str(i)
i += 1
dsdict[key1] = keymod
return key1
escapelist = [('\a', r'\a'), ('\b', r'\b'), ('\f', r'\f'), ('\n', r'\n'), ('\r', r'\r'), ('\t',r'\t'),('\v', r'\v')]
def fixescapes(item):
for esc, repl in escapelist:
item = item.replace(esc, repl)
return item
# Example.
'''dsname = gather(["c:/temp/firstlevel"], filetypes=['spss','sas'], attrlist=['importance', 'relevance', 'VariableLabel'])
spss.Submit(r"""DATASET ACTIVATE %s.
SAVE OUTFILE='c:/temp2/gathered.sav'.""" % dsname)
dsname=gather(["c:/spss16/samples/employee data.sav"])'''
|
IBMPredictiveAnalytics/GATHERMD
|
src/GATHERMD.py
|
Python
|
apache-2.0
| 14,615
|
from pexpect import pxssh
import ConfigParser
class jobChecker():
config_path = './config.lisa.ini'
s = pxssh.pxssh()
def readConfig(self):
self.config.read(self.config_path)
self.hostname = self.config.get('Credentials', 'hostname')
self.username = self.config.get('Credentials', 'username')
self.password = self.config.get('Credentials', 'password')
self.email = self.config.get('Credentials', 'email')
self.command1 = self.config.get('Commands', 'command1')
self.command2 = self.config.get('Commands', 'command2')
self.experimentDown = self.config.get('Message', 'experimentDown')
self.checkerFailed = self.config.get('Message', 'checkerFailed')
def __init__(self):
self.config = ConfigParser.RawConfigParser()
def retrieveOutput(self):
"""Connects to ssh server and inputs commands specified in the config file
"""
self.readConfig()
try:
self.s.login(self.hostname, self.username, self.password)
# self.s.sendline(self.command1) # run a command
# self.s.prompt() # match the prompt
# self.matchIndex(self.experimentDown,4)
# print self.s.before
# print self.s.before
# outputEmpty1 = 'Total Jobs: 0 Active Jobs: 0 Idle Jobs: 0 Blocked Jobs: 0'
# if outputEmpty1 in output1:
# self.errorAlert()
self.s.sendline(self.command2) # run a command
self.s.prompt() # match the prompt
self.matchIndex(self.experimentDown,8)
# outputEmpty2 = ''
# if outputEmpty2 in output2:
# self.errorAlert()
except pxssh.ExceptionPxssh, e:
print "pxssh failed on login."
print str(e)
def matchIndex(self,emailSubject,indexMinLength):
if "main-resub.sh" in self.s.before:
emailSubject = 'main script running'
self.errorAlert(emailSubject)
else:
emailSubject = 'main script NICHT running'
self.errorAlert(emailSubject)
#old:
# lines = self.s.before.split('\r\n') # \n is the linebreak character on unix, i.e. split by newline
# print lines
# if len(lines) < indexMinLength:
# self.errorAlert(emailSubject)
# else:
# pass
# except EOF:
# self.errorAlert(self.checkerFailed)
# except TIMEOUT:
# self.errorAlert(self.checkerFailed)
def errorAlert(self, emailSubject):
"""Sends an email if there are no jobs running
"""
self.s.sendline('date | mail -s "' + emailSubject + '" ' + self.email)
self.s.prompt()
def initialize(self):
self.readConfig()
self.retrieveOutput()
checker = jobChecker()
checker.initialize()
|
metamarkovic/jobChecker
|
jobChecker_lisa.py
|
Python
|
apache-2.0
| 2,923
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
TR-55 Model Implementation
A mapping between variable/parameter names found in the TR-55 document
and variables used in this program are as follows:
* `precip` is referred to as P in the report
* `runoff` is Q
* `evaptrans` maps to ET, the evapotranspiration
* `inf` is the amount of water that infiltrates into the soil (in inches)
* `init_abs` is Ia, the initial abstraction, another form of infiltration
"""
import copy
from tr55.tablelookup import lookup_cn, lookup_bmp_infiltration, \
lookup_ki, is_bmp, is_built_type, make_precolumbian, get_pollutants
from tr55.water_quality import get_volume_of_runoff, get_pollutant_load
from tr55.operations import dict_plus
def runoff_pitt(precip, land_use):
"""
The Pitt Small Storm Hydrology method. The output is a runoff
value in inches.
"""
c1 = +3.638858398e-2
c2 = -1.243464039e-1
c3 = +1.295682223e-1
c4 = +9.375868043e-1
c5 = -2.235170859e-2
c6 = +0.170228067e+0
c7 = -3.971810782e-1
c8 = +3.887275538e-1
c9 = -2.289321859e-2
p4 = pow(precip, 4)
p3 = pow(precip, 3)
p2 = pow(precip, 2)
impervious = (c1 * p3) + (c2 * p2) + (c3 * precip) + c4
urb_grass = (c5 * p4) + (c6 * p3) + (c7 * p2) + (c8 * precip) + c9
runoff_vals = {
'open_water': impervious,
'developed_low': 0.20 * impervious + 0.80 * urb_grass,
'cluster_housing': 0.20 * impervious + 0.80 * urb_grass,
'developed_med': 0.65 * impervious + 0.35 * urb_grass,
'developed_high': impervious,
'developed_open': urb_grass
}
if land_use not in runoff_vals:
raise Exception('Land use %s not a built-type.' % land_use)
else:
return min(runoff_vals[land_use], precip)
def nrcs_cutoff(precip, curve_number):
"""
A function to find the cutoff between precipitation/curve number
pairs that have zero runoff by definition, and those that do not.
"""
if precip <= -1 * (2 * (curve_number - 100.0) / curve_number):
return True
else:
return False
def runoff_nrcs(precip, evaptrans, soil_type, land_use):
"""
The runoff equation from the TR-55 document. The output is a
runoff value in inches.
"""
if land_use == 'cluster_housing':
land_use = 'developed_low'
curve_number = lookup_cn(soil_type, land_use)
if nrcs_cutoff(precip, curve_number):
return 0.0
potential_retention = (1000.0 / curve_number) - 10
initial_abs = 0.2 * potential_retention
precip_minus_initial_abs = precip - initial_abs
numerator = pow(precip_minus_initial_abs, 2)
denominator = (precip_minus_initial_abs + potential_retention)
runoff = numerator / denominator
return min(runoff, precip - evaptrans)
def simulate_cell_day(precip, evaptrans, cell, cell_count):
"""
Simulate a bunch of cells of the same type during a one-day event.
`precip` is the amount of precipitation in inches.
`evaptrans` is evapotranspiration.
`cell` is a string which contains a soil type and land use
separated by a colon.
`cell_count` is the number of cells to simulate.
The return value is a dictionary of runoff, evapotranspiration, and
infiltration as volumes of water.
"""
def clamp(runoff, et, inf, precip):
"""
This function clamps ensures that runoff + et + inf <= precip.
NOTE: infiltration is normally independent of the
precipitation level, but this function introduces a slight
dependency (that is, at very low levels of precipitation, this
function can cause infiltration to be smaller than it
ordinarily would be.
"""
total = runoff + et + inf
if (total > precip):
scale = precip / total
runoff *= scale
et *= scale
inf *= scale
return (runoff, et, inf)
precip = max(0.0, precip)
soil_type, land_use, bmp = cell.lower().split(':')
# If there is no precipitation, then there is no runoff or
# infiltration. There is evapotranspiration, however (it is
# understood that over a period of time, this can lead to the sum
# of the three values exceeding the total precipitation).
if precip == 0.0:
return {
'runoff-vol': 0.0,
# 'et-vol': cell_count * evaptrans,
'et-vol': 0.0,
'inf-vol': 0.0,
}
# Deal with the Best Management Practices (BMPs). For most BMPs,
# the infiltration is read from the table and the runoff is what
# is left over after infiltration and evapotranspiration. Rain
# gardens are treated differently.
if bmp and is_bmp(bmp) and bmp != 'rain_garden':
inf = lookup_bmp_infiltration(soil_type, bmp) # infiltration
runoff = max(0.0, precip - (evaptrans + inf)) # runoff
(runoff, evaptrans, inf) = clamp(runoff, evaptrans, inf, precip)
return {
'runoff-vol': cell_count * runoff,
'et-vol': cell_count * evaptrans,
'inf-vol': cell_count * inf
}
elif bmp and bmp == 'rain_garden':
# Here, return a mixture of 20% ideal rain garden and 80%
# high-intensity residential.
inf = lookup_bmp_infiltration(soil_type, bmp)
runoff = max(0.0, precip - (evaptrans + inf))
hi_res_cell = soil_type + ':developed_med:'
hi_res = simulate_cell_day(precip, evaptrans, hi_res_cell, 1)
hir_run = hi_res['runoff-vol']
hir_et = hi_res['et-vol']
hir_inf = hi_res['inf-vol']
final_runoff = (0.2 * runoff + 0.8 * hir_run)
final_et = (0.2 * evaptrans + 0.8 * hir_et)
final_inf = (0.2 * inf + 0.8 * hir_inf)
final = clamp(final_runoff, final_et, final_inf, precip)
(final_runoff, final_et, final_inf) = final
return {
'runoff-vol': cell_count * final_runoff,
'et-vol': cell_count * final_et,
'inf-vol': cell_count * final_inf
}
# At this point, if the `bmp` string has non-zero length, it is
# equal to either 'no_till' or 'cluster_housing'.
if bmp and bmp != 'no_till' and bmp != 'cluster_housing':
raise KeyError('Unexpected BMP: %s' % bmp)
land_use = bmp or land_use
# When the land use is a built-type and the level of precipitation
# is two inches or less, use the Pitt Small Storm Hydrology Model.
# When the land use is a built-type but the level of precipitation
# is higher, the runoff is the larger of that predicted by the
# Pitt model and NRCS model. Otherwise, return the NRCS amount.
if is_built_type(land_use) and precip <= 2.0:
runoff = runoff_pitt(precip, land_use)
elif is_built_type(land_use):
pitt_runoff = runoff_pitt(2.0, land_use)
nrcs_runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
runoff = max(pitt_runoff, nrcs_runoff)
else:
runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
inf = max(0.0, precip - (evaptrans + runoff))
(runoff, evaptrans, inf) = clamp(runoff, evaptrans, inf, precip)
return {
'runoff-vol': cell_count * runoff,
'et-vol': cell_count * evaptrans,
'inf-vol': cell_count * inf,
}
def create_unmodified_census(census):
"""
This creates a cell census, ignoring any modifications. The
output is suitable for use with `simulate_water_quality`.
"""
unmod = copy.deepcopy(census)
unmod.pop('modifications', None)
return unmod
def create_modified_census(census):
"""
This creates a cell census, with modifications, that is suitable
for use with `simulate_water_quality`.
For every type of cell that undergoes modification, the
modifications are indicated with a sub-distribution under that
cell type.
"""
mod = copy.deepcopy(census)
mod.pop('modifications', None)
for (cell, subcensus) in mod['distribution'].items():
n = subcensus['cell_count']
changes = {
'distribution': {
cell: {
'distribution': {
cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
for modification in (census.get('modifications') or []):
for (orig_cell, subcensus) in modification['distribution'].items():
n = subcensus['cell_count']
soil1, land1 = orig_cell.split(':')
soil2, land2, bmp = modification['change'].split(':')
changed_cell = '%s:%s:%s' % (soil2 or soil1, land2 or land1, bmp)
changes = {
'distribution': {
orig_cell: {
'distribution': {
orig_cell: {'cell_count': -n},
changed_cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
return mod
def simulate_water_quality(tree, cell_res, fn,
current_cell=None, precolumbian=False):
"""
Perform a water quality simulation by doing simulations on each of
the cell types (leaves), then adding them together by summing the
values of a node's subtrees and storing them at that node.
`tree` is the (sub)tree of cell distributions that is currently
under consideration.
`cell_res` is the size of each cell (used for turning inches of
water into volumes of water).
`fn` is a function that takes a cell type and a number of cells
and returns a dictionary containing runoff, et, and inf as
volumes.
`current_cell` is the cell type for the present node.
"""
# Internal node.
if 'cell_count' in tree and 'distribution' in tree:
n = tree['cell_count']
# simulate subtrees
if n != 0:
tally = {}
for cell, subtree in tree['distribution'].items():
simulate_water_quality(subtree, cell_res, fn,
cell, precolumbian)
subtree_ex_dist = subtree.copy()
subtree_ex_dist.pop('distribution', None)
tally = dict_plus(tally, subtree_ex_dist)
tree.update(tally) # update this node
# effectively a leaf
elif n == 0:
for pol in get_pollutants():
tree[pol] = 0.0
# Leaf node.
elif 'cell_count' in tree and 'distribution' not in tree:
# the number of cells covered by this leaf
n = tree['cell_count']
# canonicalize the current_cell string
split = current_cell.split(':')
if (len(split) == 2):
split.append('')
if precolumbian:
split[1] = make_precolumbian(split[1])
current_cell = '%s:%s:%s' % tuple(split)
# run the runoff model on this leaf
result = fn(current_cell, n) # runoff, et, inf
tree.update(result)
# perform water quality calculation
if n != 0:
soil_type, land_use, bmp = split
runoff_per_cell = result['runoff-vol'] / n
liters = get_volume_of_runoff(runoff_per_cell, n, cell_res)
for pol in get_pollutants():
tree[pol] = get_pollutant_load(land_use, pol, liters)
def postpass(tree):
"""
Remove volume units and replace them with inches.
"""
if 'cell_count' in tree:
if tree['cell_count'] > 0:
n = tree['cell_count']
tree['runoff'] = tree['runoff-vol'] / n
tree['et'] = tree['et-vol'] / n
tree['inf'] = tree['inf-vol'] / n
else:
tree['runoff'] = 0
tree['et'] = 0
tree['inf'] = 0
tree.pop('runoff-vol', None)
tree.pop('et-vol', None)
tree.pop('inf-vol', None)
if 'distribution' in tree:
for subtree in tree['distribution'].values():
postpass(subtree)
def simulate_modifications(census, fn, cell_res, precolumbian=False):
"""
Simulate effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`fn` is as described in `simulate_water_quality`.
`cell_res` is as described in `simulate_water_quality`.
"""
mod = create_modified_census(census)
simulate_water_quality(mod, cell_res, fn, precolumbian=precolumbian)
postpass(mod)
unmod = create_unmodified_census(census)
simulate_water_quality(unmod, cell_res, fn, precolumbian=precolumbian)
postpass(unmod)
return {
'unmodified': unmod,
'modified': mod
}
def simulate_day(census, precip, cell_res=10, precolumbian=False):
"""
Simulate a day, including water quality effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`cell_res` is as described in `simulate_water_quality`.
`precolumbian` indicates that artificial types should be turned
into forest.
"""
et_max = 0.207
if 'modifications' in census:
verify_census(census)
def fn(cell, cell_count):
# Compute et for cell type
split = cell.split(':')
if (len(split) == 2):
(land_use, bmp) = split
else:
(_, land_use, bmp) = split
et = et_max * lookup_ki(bmp or land_use)
# Simulate the cell for one day
return simulate_cell_day(precip, et, cell, cell_count)
return simulate_modifications(census, fn, cell_res, precolumbian)
def verify_census(census):
"""
Assures that there is no soil type/land cover pair
in a modification census that isn't in the AoI census.
"""
for modification in census['modifications']:
for land_cover in modification['distribution']:
if land_cover not in census['distribution']:
raise ValueError("Invalid modification census")
|
lliss/tr-55
|
tr55/model.py
|
Python
|
apache-2.0
| 14,151
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.calvinlib import base_calvinlib_object
from calvin.utilities.calvinlogger import get_logger
import operator
_log = get_logger(__name__)
class Arithmetic(base_calvinlib_object.BaseCalvinlibObject):
"""
Operations on numbers
"""
init_schema = {
"description": "Initialize module",
}
relation_schema = {
"description": "Get corresponding relation: >, <, =, !=, >=, <= (with obvious interpretation.)",
"type": "object",
"properties": {
"rel": { "type": "string" }
}
}
operator_schema = {
"description": "Get corresponding operator: +, -, /, *, div, mod (with obvious interpretation.)",
"type": "object",
"properties": {
"op": { "type": "string" }
}
}
eval_schema = {
"description": "Evaluate expression, returning result. Bindings should be a dictionary of variable mappings to use in evaluation",
"type": "object",
"properties": {
"expr": { "type": "string" },
"bindings": { "type": "object" }
}
}
def init(self):
pass
def relation(self, rel):
try:
return {
'<': operator.lt,
'<=': operator.le,
'=': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}[rel]
except KeyError:
_log.warning("Invalid operator '{}', will always return 'false'".format(rel))
return lambda x,y: False
def operator(self, op):
try:
return {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.div,
'div': operator.floordiv,
'mod': operator.mod,
}[op]
except KeyError:
_log.warning("Invalid operator '{}', will always produce 'null'".format(op))
return lambda x,y: None
def eval(self, expr, bindings):
try:
return eval(expr, {}, bindings)
except Exception as e:
return str(e)
|
EricssonResearch/calvin-base
|
calvin/runtime/south/calvinlib/mathlib/Arithmetic.py
|
Python
|
apache-2.0
| 2,888
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetChannelPartnerLink
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-channel
# [START cloudchannel_v1_generated_CloudChannelService_GetChannelPartnerLink_sync]
from google.cloud import channel_v1
def sample_get_channel_partner_link():
# Create a client
client = channel_v1.CloudChannelServiceClient()
# Initialize request argument(s)
request = channel_v1.GetChannelPartnerLinkRequest(
name="name_value",
)
# Make the request
response = client.get_channel_partner_link(request=request)
# Handle the response
print(response)
# [END cloudchannel_v1_generated_CloudChannelService_GetChannelPartnerLink_sync]
|
googleapis/python-channel
|
samples/generated_samples/cloudchannel_v1_generated_cloud_channel_service_get_channel_partner_link_sync.py
|
Python
|
apache-2.0
| 1,520
|
"""Compat module to handle files security on Windows and Linux"""
from __future__ import absolute_import
import errno
import os # pylint: disable=os-module-forbidden
import stat
import sys
from typing import List
try:
import ntsecuritycon
import win32security
import win32con
import win32api
import win32file
import pywintypes
import winerror
except ImportError:
POSIX_MODE = True
else:
POSIX_MODE = False
# Windows umask implementation, since Windows does not have a concept of umask by default.
# We choose 022 as initial value since it is the default one on most Linux distributions, and
# it is a decent choice to not have write permissions for group owner and everybody by default.
# We use a class here to avoid needing to define a global variable, and the potential mistakes
# that could happen with this kind of pattern.
class _WindowsUmask:
"""Store the current umask to apply on Windows"""
def __init__(self):
self.mask = 0o022
_WINDOWS_UMASK = _WindowsUmask()
def chmod(file_path: str, mode: int) -> None:
"""
Apply a POSIX mode on given file_path:
- for Linux, the POSIX mode will be directly applied using chmod,
- for Windows, the POSIX mode will be translated into a Windows DACL that make sense for
Certbot context, and applied to the file using kernel calls.
The definition of the Windows DACL that correspond to a POSIX mode, in the context of Certbot,
is explained at https://github.com/certbot/certbot/issues/6356 and is implemented by the
method `_generate_windows_flags()`.
:param str file_path: Path of the file
:param int mode: POSIX mode to apply
"""
if POSIX_MODE:
os.chmod(file_path, mode)
else:
_apply_win_mode(file_path, mode)
def umask(mask: int) -> int:
"""
Set the current numeric umask and return the previous umask. On Linux, the built-in umask
method is used. On Windows, our Certbot-side implementation is used.
:param int mask: The user file-creation mode mask to apply.
:rtype: int
:return: The previous umask value.
"""
if POSIX_MODE:
return os.umask(mask)
previous_umask = _WINDOWS_UMASK.mask
_WINDOWS_UMASK.mask = mask
return previous_umask
# One could ask why there is no copy_ownership() function, or even a reimplementation
# of os.chown() that would modify the ownership of file without touching the mode itself.
# This is because on Windows, it would require recalculating the existing DACL against
# the new owner, since the DACL is composed of ACEs that targets a specific user, not dynamically
# the current owner of a file. This action would be necessary to keep consistency between
# the POSIX mode applied to the file and the current owner of this file.
# Since copying and editing arbitrary DACL is very difficult, and since we actually know
# the mode to apply at the time the owner of a file should change, it is easier to just
# change the owner, then reapply the known mode, as copy_ownership_and_apply_mode() does.
def copy_ownership_and_apply_mode(src: str, dst: str, mode: int,
copy_user: bool, copy_group: bool) -> None:
"""
Copy ownership (user and optionally group on Linux) from the source to the
destination, then apply given mode in compatible way for Linux and Windows.
This replaces the os.chown command.
:param str src: Path of the source file
:param str dst: Path of the destination file
:param int mode: Permission mode to apply on the destination file
:param bool copy_user: Copy user if `True`
:param bool copy_group: Copy group if `True` on Linux (has no effect on Windows)
"""
if POSIX_MODE:
stats = os.stat(src)
user_id = stats.st_uid if copy_user else -1
group_id = stats.st_gid if copy_group else -1
# On Windows, os.chown does not exist. This is checked through POSIX_MODE value,
# but MyPy/PyLint does not know it and raises an error here on Windows.
# We disable specifically the check to fix the issue.
os.chown(dst, user_id, group_id)
elif copy_user:
# There is no group handling in Windows
_copy_win_ownership(src, dst)
chmod(dst, mode)
# Quite similar to copy_ownership_and_apply_mode, but this time the DACL is copied from
# the source file on Windows. The DACL stays consistent with the dynamic rights of the
# equivalent POSIX mode, because ownership and mode are copied altogether on the destination
# file, so no recomputing of the DACL against the new owner is needed, as it would be
# for a copy_ownership alone method.
def copy_ownership_and_mode(src: str, dst: str,
copy_user: bool = True, copy_group: bool = True) -> None:
"""
Copy ownership (user and optionally group on Linux) and mode/DACL
from the source to the destination.
:param str src: Path of the source file
:param str dst: Path of the destination file
:param bool copy_user: Copy user if `True`
:param bool copy_group: Copy group if `True` on Linux (has no effect on Windows)
"""
if POSIX_MODE:
# On Linux, we just delegate to chown and chmod.
stats = os.stat(src)
user_id = stats.st_uid if copy_user else -1
group_id = stats.st_gid if copy_group else -1
os.chown(dst, user_id, group_id)
chmod(dst, stats.st_mode)
else:
if copy_user:
# There is no group handling in Windows
_copy_win_ownership(src, dst)
_copy_win_mode(src, dst)
def check_mode(file_path: str, mode: int) -> bool:
"""
Check if the given mode matches the permissions of the given file.
On Linux, will make a direct comparison, on Windows, mode will be compared against
the security model.
:param str file_path: Path of the file
:param int mode: POSIX mode to test
:rtype: bool
:return: True if the POSIX mode matches the file permissions
"""
if POSIX_MODE:
return stat.S_IMODE(os.stat(file_path).st_mode) == mode
return _check_win_mode(file_path, mode)
def check_owner(file_path: str) -> bool:
"""
Check if given file is owned by current user.
:param str file_path: File path to check
:rtype: bool
:return: True if given file is owned by current user, False otherwise.
"""
if POSIX_MODE:
return os.stat(file_path).st_uid == os.getuid()
# Get owner sid of the file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
# Compare sids
return _get_current_user() == user
def check_permissions(file_path: str, mode: int) -> bool:
"""
Check if given file has the given mode and is owned by current user.
:param str file_path: File path to check
:param int mode: POSIX mode to check
:rtype: bool
:return: True if file has correct mode and owner, False otherwise.
"""
return check_owner(file_path) and check_mode(file_path, mode)
def open(file_path: str, flags: int, mode: int = 0o777) -> int: # pylint: disable=redefined-builtin
"""
Wrapper of original os.open function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int flags: Flags to apply on file while opened
:param int mode: POSIX mode to apply on file when opened,
Python defaults will be applied if ``None``
:returns: the file descriptor to the opened file
:rtype: int
:raise: OSError(errno.EEXIST) if the file already exists and os.O_CREAT & os.O_EXCL are set,
OSError(errno.EACCES) on Windows if the file already exists and is a directory, and
os.O_CREAT is set.
"""
if POSIX_MODE:
# On Linux, invoke os.open directly.
return os.open(file_path, flags, mode)
# Windows: handle creation of the file atomically with proper permissions.
if flags & os.O_CREAT:
# If os.O_EXCL is set, we will use the "CREATE_NEW", that will raise an exception if
# file exists, matching the API contract of this bit flag. Otherwise, we use
# "CREATE_ALWAYS" that will always create the file whether it exists or not.
disposition = win32con.CREATE_NEW if flags & os.O_EXCL else win32con.CREATE_ALWAYS
attributes = win32security.SECURITY_ATTRIBUTES()
security = attributes.SECURITY_DESCRIPTOR
user = _get_current_user()
dacl = _generate_dacl(user, mode, _WINDOWS_UMASK.mask)
# We set second parameter to 0 (`False`) to say that this security descriptor is
# NOT constructed from a default mechanism, but is explicitly set by the user.
# See https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-setsecuritydescriptorowner # pylint: disable=line-too-long
security.SetSecurityDescriptorOwner(user, 0)
# We set first parameter to 1 (`True`) to say that this security descriptor contains
# a DACL. Otherwise second and third parameters are ignored.
# We set third parameter to 0 (`False`) to say that this security descriptor is
# NOT constructed from a default mechanism, but is explicitly set by the user.
# See https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-setsecuritydescriptordacl # pylint: disable=line-too-long
security.SetSecurityDescriptorDacl(1, dacl, 0)
handle = None
try:
handle = win32file.CreateFile(file_path, win32file.GENERIC_READ,
win32file.FILE_SHARE_READ & win32file.FILE_SHARE_WRITE,
attributes, disposition, 0, None)
except pywintypes.error as err:
# Handle native windows errors into python errors to be consistent with the API
# of os.open in the situation of a file already existing or locked.
if err.winerror == winerror.ERROR_FILE_EXISTS:
raise OSError(errno.EEXIST, err.strerror)
if err.winerror == winerror.ERROR_SHARING_VIOLATION:
raise OSError(errno.EACCES, err.strerror)
raise err
finally:
if handle:
handle.Close()
# At this point, the file that did not exist has been created with proper permissions,
# so os.O_CREAT and os.O_EXCL are not needed anymore. We remove them from the flags to
# avoid a FileExists exception before calling os.open.
return os.open(file_path, flags ^ os.O_CREAT ^ os.O_EXCL)
# Windows: general case, we call os.open, let exceptions be thrown, then chmod if all is fine.
handle = os.open(file_path, flags)
chmod(file_path, mode)
return handle
def makedirs(file_path: str, mode: int = 0o777) -> None:
"""
Rewrite of original os.makedirs function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int mode: POSIX mode to apply on leaf directory when created, Python defaults
will be applied if ``None``
"""
current_umask = umask(0)
try:
# Since Python 3.7, os.makedirs does not set the given mode to the intermediate
# directories that could be created in the process. To keep things safe and consistent
# on all Python versions, we set the umask accordingly to have all directories
# (intermediate and leaf) created with the given mode.
umask(current_umask | 0o777 ^ mode)
if POSIX_MODE:
return os.makedirs(file_path, mode)
orig_mkdir_fn = os.mkdir
try:
# As we know that os.mkdir is called internally by os.makedirs, we will swap the
# function in os module for the time of makedirs execution on Windows.
os.mkdir = mkdir # type: ignore
return os.makedirs(file_path, mode)
finally:
os.mkdir = orig_mkdir_fn
finally:
umask(current_umask)
def mkdir(file_path: str, mode: int = 0o777) -> None:
"""
Rewrite of original os.mkdir function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int mode: POSIX mode to apply on directory when created, Python defaults
will be applied if ``None``
"""
if POSIX_MODE:
return os.mkdir(file_path, mode)
attributes = win32security.SECURITY_ATTRIBUTES()
security = attributes.SECURITY_DESCRIPTOR
user = _get_current_user()
dacl = _generate_dacl(user, mode, _WINDOWS_UMASK.mask)
security.SetSecurityDescriptorOwner(user, False)
security.SetSecurityDescriptorDacl(1, dacl, 0)
try:
win32file.CreateDirectory(file_path, attributes)
except pywintypes.error as err:
# Handle native windows error into python error to be consistent with the API
# of os.mkdir in the situation of a directory already existing.
if err.winerror == winerror.ERROR_ALREADY_EXISTS:
raise OSError(errno.EEXIST, err.strerror, file_path, err.winerror)
raise err
return None
def replace(src: str, dst: str) -> None:
"""
Rename a file to a destination path and handles situations where the destination exists.
:param str src: The current file path.
:param str dst: The new file path.
"""
if hasattr(os, 'replace'):
# Use replace if possible. Since we don't support Python 2 on Windows
# and os.replace() was added in Python 3.3, we can assume that
# os.replace() is always available on Windows.
getattr(os, 'replace')(src, dst)
else:
# Otherwise, use os.rename() that behaves like os.replace() on Linux.
os.rename(src, dst)
def realpath(file_path: str) -> str:
"""
Find the real path for the given path. This method resolves symlinks, including
recursive symlinks, and is protected against symlinks that creates an infinite loop.
:param str file_path: The path to resolve
:returns: The real path for the given path
:rtype: str
"""
original_path = file_path
# Since Python 3.8, os.path.realpath also resolves symlinks on Windows.
if POSIX_MODE or sys.version_info >= (3, 8):
path = os.path.realpath(file_path)
if os.path.islink(path):
# If path returned by realpath is still a link, it means that it failed to
# resolve the symlink because of a loop.
# See realpath code: https://github.com/python/cpython/blob/master/Lib/posixpath.py
raise RuntimeError('Error, link {0} is a loop!'.format(original_path))
return path
inspected_paths: List[str] = []
while os.path.islink(file_path):
link_path = file_path
file_path = os.readlink(file_path)
if not os.path.isabs(file_path):
file_path = os.path.join(os.path.dirname(link_path), file_path)
if file_path in inspected_paths:
raise RuntimeError('Error, link {0} is a loop!'.format(original_path))
inspected_paths.append(file_path)
return os.path.abspath(file_path)
def readlink(link_path: str) -> str:
"""
Return a string representing the path to which the symbolic link points.
:param str link_path: The symlink path to resolve
:return: The path the symlink points to
:returns: str
:raise: ValueError if a long path (260> characters) is encountered on Windows
"""
path = os.readlink(link_path)
if POSIX_MODE or not path.startswith('\\\\?\\'):
return path
# At this point, we know we are on Windows and that the path returned uses
# the extended form which is done for all paths in Python 3.8+
# Max length of a normal path is 260 characters on Windows, including the non printable
# termination character "<NUL>". The termination character is not included in Python
# strings, giving a max length of 259 characters, + 4 characters for the extended form
# prefix, to an effective max length 263 characters on a string representing a normal path.
if len(path) < 264:
return path[4:]
raise ValueError("Long paths are not supported by Certbot on Windows.")
# On Windows is_executable run from an unprivileged shell may claim that a path is
# executable when it is executable only if run from a privileged shell. This result
# is due to the fact that GetEffectiveRightsFromAcl calculate effective rights
# without taking into consideration if the target user has currently required the
# elevated privileges or not. However this is not a problem since certbot always
# requires to be run under a privileged shell, so the user will always benefit
# from the highest (privileged one) set of permissions on a given file.
def is_executable(path: str) -> bool:
"""
Is path an executable file?
:param str path: path to test
:return: True if path is an executable file
:rtype: bool
"""
if POSIX_MODE:
return os.path.isfile(path) and os.access(path, os.X_OK)
return _win_is_executable(path)
def has_world_permissions(path: str) -> bool:
"""
Check if everybody/world has any right (read/write/execute) on a file given its path.
:param str path: path to test
:return: True if everybody/world has any right to the file
:rtype: bool
"""
if POSIX_MODE:
return bool(stat.S_IMODE(os.stat(path).st_mode) & stat.S_IRWXO)
security = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
return bool(dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': win32security.ConvertStringSidToSid('S-1-1-0'),
}))
def compute_private_key_mode(old_key: str, base_mode: int) -> int:
"""
Calculate the POSIX mode to apply to a private key given the previous private key.
:param str old_key: path to the previous private key
:param int base_mode: the minimum modes to apply to a private key
:return: the POSIX mode to apply
:rtype: int
"""
if POSIX_MODE:
# On Linux, we keep read/write/execute permissions
# for group and read permissions for everybody.
old_mode = (stat.S_IMODE(os.stat(old_key).st_mode) &
(stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH))
return base_mode | old_mode
# On Windows, the mode returned by os.stat is not reliable,
# so we do not keep any permission from the previous private key.
return base_mode
def has_same_ownership(path1: str, path2: str) -> bool:
"""
Return True if the ownership of two files given their respective path is the same.
On Windows, ownership is checked against owner only, since files do not have a group owner.
:param str path1: path to the first file
:param str path2: path to the second file
:return: True if both files have the same ownership, False otherwise
:rtype: bool
"""
if POSIX_MODE:
stats1 = os.stat(path1)
stats2 = os.stat(path2)
return (stats1.st_uid, stats1.st_gid) == (stats2.st_uid, stats2.st_gid)
security1 = win32security.GetFileSecurity(path1, win32security.OWNER_SECURITY_INFORMATION)
user1 = security1.GetSecurityDescriptorOwner()
security2 = win32security.GetFileSecurity(path2, win32security.OWNER_SECURITY_INFORMATION)
user2 = security2.GetSecurityDescriptorOwner()
return user1 == user2
def has_min_permissions(path: str, min_mode: int) -> bool:
"""
Check if a file given its path has at least the permissions defined by the given minimal mode.
On Windows, group permissions are ignored since files do not have a group owner.
:param str path: path to the file to check
:param int min_mode: the minimal permissions expected
:return: True if the file matches the minimal permissions expectations, False otherwise
:rtype: bool
"""
if POSIX_MODE:
st_mode = os.stat(path).st_mode
return st_mode == st_mode | min_mode
# Resolve symlinks, to get a consistent result with os.stat on Linux,
# that follows symlinks by default.
path = realpath(path)
# Get owner sid of the file
security = win32security.GetFileSecurity(
path, win32security.OWNER_SECURITY_INFORMATION | win32security.DACL_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
dacl = security.GetSecurityDescriptorDacl()
min_dacl = _generate_dacl(user, min_mode)
for index in range(min_dacl.GetAceCount()):
min_ace = min_dacl.GetAce(index)
# On a given ACE, index 0 is the ACE type, 1 is the permission mask, and 2 is the SID.
# See: http://timgolden.me.uk/pywin32-docs/PyACL__GetAce_meth.html
mask = min_ace[1]
user = min_ace[2]
effective_mask = dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': user,
})
if effective_mask != effective_mask | mask:
return False
return True
def _win_is_executable(path):
if not os.path.isfile(path):
return False
security = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
mode = dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': _get_current_user(),
})
return mode & ntsecuritycon.FILE_GENERIC_EXECUTE == ntsecuritycon.FILE_GENERIC_EXECUTE
def _apply_win_mode(file_path, mode):
"""
This function converts the given POSIX mode into a Windows ACL list, and applies it to the
file given its path. If the given path is a symbolic link, it will resolved to apply the
mode on the targeted file.
"""
file_path = realpath(file_path)
# Get owner sid of the file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
# New DACL, that will overwrite existing one (including inherited permissions)
dacl = _generate_dacl(user, mode)
# Apply the new DACL
security.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(file_path, win32security.DACL_SECURITY_INFORMATION, security)
def _generate_dacl(user_sid, mode, mask=None):
if mask:
mode = mode & (0o777 - mask)
analysis = _analyze_mode(mode)
# Get standard accounts from "well-known" sid
# See the list here:
# https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems
system = win32security.ConvertStringSidToSid('S-1-5-18')
admins = win32security.ConvertStringSidToSid('S-1-5-32-544')
everyone = win32security.ConvertStringSidToSid('S-1-1-0')
# New dacl, without inherited permissions
dacl = win32security.ACL()
# If user is already system or admins, any ACE defined here would be superseded by
# the full control ACE that will be added after.
if user_sid not in [system, admins]:
# Handle user rights
user_flags = _generate_windows_flags(analysis['user'])
if user_flags:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, user_flags, user_sid)
# Handle everybody rights
everybody_flags = _generate_windows_flags(analysis['all'])
if everybody_flags:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, everybody_flags, everyone)
# Handle administrator rights
full_permissions = _generate_windows_flags({'read': True, 'write': True, 'execute': True})
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, full_permissions, system)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, full_permissions, admins)
return dacl
def _analyze_mode(mode):
return {
'user': {
'read': mode & stat.S_IRUSR,
'write': mode & stat.S_IWUSR,
'execute': mode & stat.S_IXUSR,
},
'all': {
'read': mode & stat.S_IROTH,
'write': mode & stat.S_IWOTH,
'execute': mode & stat.S_IXOTH,
},
}
def _copy_win_ownership(src, dst):
# Resolve symbolic links
src = realpath(src)
security_src = win32security.GetFileSecurity(src, win32security.OWNER_SECURITY_INFORMATION)
user_src = security_src.GetSecurityDescriptorOwner()
security_dst = win32security.GetFileSecurity(dst, win32security.OWNER_SECURITY_INFORMATION)
# Second parameter indicates, if `False`, that the owner of the file is not provided by some
# default mechanism, but is explicitly set instead. This is obviously what we are doing here.
security_dst.SetSecurityDescriptorOwner(user_src, False)
win32security.SetFileSecurity(dst, win32security.OWNER_SECURITY_INFORMATION, security_dst)
def _copy_win_mode(src, dst):
# Resolve symbolic links
src = realpath(src)
# Copy the DACL from src to dst.
security_src = win32security.GetFileSecurity(src, win32security.DACL_SECURITY_INFORMATION)
dacl = security_src.GetSecurityDescriptorDacl()
security_dst = win32security.GetFileSecurity(dst, win32security.DACL_SECURITY_INFORMATION)
security_dst.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(dst, win32security.DACL_SECURITY_INFORMATION, security_dst)
def _generate_windows_flags(rights_desc):
# Some notes about how each POSIX right is interpreted.
#
# For the rights read and execute, we have a pretty bijective relation between
# POSIX flags and their generic counterparts on Windows, so we use them directly
# (respectively ntsecuritycon.FILE_GENERIC_READ and ntsecuritycon.FILE_GENERIC_EXECUTE).
#
# But ntsecuritycon.FILE_GENERIC_WRITE does not correspond to what one could expect from a
# write access on Linux: for Windows, FILE_GENERIC_WRITE does not include delete, move or
# rename. This is something that requires ntsecuritycon.FILE_ALL_ACCESS.
# So to reproduce the write right as POSIX, we will apply ntsecuritycon.FILE_ALL_ACCESS
# subtracted of the rights corresponding to POSIX read and POSIX execute.
#
# Finally, having read + write + execute gives a ntsecuritycon.FILE_ALL_ACCESS,
# so a "Full Control" on the file.
#
# A complete list of the rights defined on NTFS can be found here:
# https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2003/cc783530(v=ws.10)#permissions-for-files-and-folders
flag = 0
if rights_desc['read']:
flag = flag | ntsecuritycon.FILE_GENERIC_READ
if rights_desc['write']:
flag = flag | (ntsecuritycon.FILE_ALL_ACCESS
^ ntsecuritycon.FILE_GENERIC_READ
^ ntsecuritycon.FILE_GENERIC_EXECUTE)
if rights_desc['execute']:
flag = flag | ntsecuritycon.FILE_GENERIC_EXECUTE
return flag
def _check_win_mode(file_path, mode):
# Resolve symbolic links
file_path = realpath(file_path)
# Get current dacl file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION
| win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
# Get current file owner sid
user = security.GetSecurityDescriptorOwner()
if not dacl:
# No DACL means full control to everyone
# This is not a deterministic permissions set.
return False
# Calculate the target dacl
ref_dacl = _generate_dacl(user, mode)
return _compare_dacls(dacl, ref_dacl)
def _compare_dacls(dacl1, dacl2):
"""
This method compare the two given DACLs to check if they are identical.
Identical means here that they contains the same set of ACEs in the same order.
"""
return ([dacl1.GetAce(index) for index in range(dacl1.GetAceCount())] ==
[dacl2.GetAce(index) for index in range(dacl2.GetAceCount())])
def _get_current_user():
"""
Return the pySID corresponding to the current user.
"""
# We craft the account_name ourselves instead of calling for instance win32api.GetUserNameEx,
# because this function returns nonsense values when Certbot is run under NT AUTHORITY\SYSTEM.
# To run Certbot under NT AUTHORITY\SYSTEM, you can open a shell using the instructions here:
# https://blogs.technet.microsoft.com/ben_parker/2010/10/27/how-do-i-run-powershell-execommand-prompt-as-the-localsystem-account-on-windows-7/
account_name = r"{0}\{1}".format(win32api.GetDomainName(), win32api.GetUserName())
# LookupAccountName() expects the system name as first parameter. By passing None to it,
# we instruct Windows to first search the matching account in the machine local accounts,
# then into the primary domain accounts, if the machine has joined a domain, then finally
# into the trusted domains accounts. This is the preferred lookup mechanism to use in Windows
# if there is no reason to use a specific lookup mechanism.
# See https://docs.microsoft.com/en-us/windows/desktop/api/winbase/nf-winbase-lookupaccountnamea
return win32security.LookupAccountName(None, account_name)[0]
|
stweil/letsencrypt
|
certbot/certbot/compat/filesystem.py
|
Python
|
apache-2.0
| 29,674
|
from . import logger
from . import TemplateHelper as TH
from . import RegexHelper as re
from . import Actions as A
from . import ClassActions as CA
from . import SublimeHelper as SH
from .SublimeHelper import ViewHelper as VH
log = logger.get(__name__)
class PropertyAction(A.Action):
"""Stores info on property actions"""
def __init__(self, name):
super(PropertyAction, self).__init__(name)
def get_class_code(self):
return self.full_region(self.code_region)
def get_prop_name(self):
result = re.findPropName(self.to_text(self.code_region))
log.debug('property name >> ' + result)
return result
def get_prop_type(self):
result = re.findPropType(self.to_text(self.code_region))
log.debug('property type >> ' + result)
return result
def is_prop_static(self):
result = re.findPropIsStatic(self.to_text(self.code_region))
log.debug('property static >> ', result)
return result
def generate_code(self):
raise Exception("generate_code not defined")
def is_applicable(self):
return re.is_prop_def(self.to_text(), True)
class AddGetterAction(PropertyAction):
def __init__(self):
super(AddGetterAction, self).__init__(A.ADD_GETTER)
def generate_code(self, edit):
template = TH.Template('other/getter')
template.addVar('type', self.get_prop_type())
template.addVar('varName', self.get_prop_name())
template.addVar('static', self.is_prop_static())
template.addVar('indent', self.get_inner_indent())
self.view.insert(edit, self.find_end_of_class().begin(), template.compile())
def is_applicable(self):
result = re.is_prop_def(self.to_text())
return result and re.findGetter(self.to_text(self.get_class_code()), self.get_prop_name()) is None
class AddSetterAction(PropertyAction):
def __init__(self):
super(AddSetterAction, self).__init__(A.ADD_SETTER)
def generate_code(self, edit):
template = TH.Template('other/setter')
template.addVar('type', self.get_prop_type())
template.addVar('varName', self.get_prop_name())
template.addVar('static', self.is_prop_static())
template.addVar('indent', self.get_inner_indent())
self.view.insert(edit, self.find_end_of_class().begin(), template.compile())
def is_applicable(self):
result = re.is_prop_def(self.to_text())
return result and re.findSetter(self.to_text(self.get_class_code()), self.get_prop_name()) is None
class AddGetterSetterAction(PropertyAction):
def __init__(self):
super(AddGetterSetterAction, self).__init__(A.ADD_GETTER_SETTER)
self.getter = AddGetterAction()
self.setter = AddSetterAction()
def setView(self, view):
super(AddGetterSetterAction, self).setView(view)
self.getter.setView(view)
self.setter.setView(view)
def setCode(self, code_region):
super(AddGetterSetterAction, self).setCode(code_region)
self.getter.setCode(code_region)
self.setter.setCode(code_region)
def is_applicable(self):
return self.getter.is_applicable() and self.setter.is_applicable()
def generate_code(self, edit):
self.getter.generate_code(edit)
self.setter.generate_code(edit)
class AddConstructorParameterAction(PropertyAction):
def __init__(self):
super(AddConstructorParameterAction, self).__init__(A.ADD_CONSTRUCTOR_PARAMETER)
def run(self, edit, args):
if 'constr_start' in args:
self.generate_code(edit, args['constr_start'])
else:
self.choose_constructor(edit)
def choose_constructor(self, edit):
constr_regions = self.find_constructors()
if not constr_regions:
constructorAction = CA.AddConstructorAction()
constructorAction.setView(self.view)
constructorAction.setCode(self.find_class_def())
constructorAction.generate_code(edit)
constr_regions = self.find_constructors()
if 1 == len(constr_regions):
self.generate_code(edit, constr_regions[0])
else:
constrs = []
for constr_region in constr_regions:
constrs.append(self.to_text(self.view.line(constr_region.begin())).strip())
self.vh.open_menu(list(constrs), self.handle_constr_choice)
def handle_constr_choice(self, index):
if -1 == index:
return
constr_regions = self.find_constructors()
args = {
'action_name': self.name,
'subl_line_start': self.code_region.begin(),
'subl_line_end': self.code_region.end(),
'constr_start': constr_regions[index].begin()
}
self.view.run_command('run_action', args)
def generate_code(self, edit, constr_start):
start = constr_start
def_line = self.view.line(start)
def_str = self.view.substr(def_line)
log.info('def_str >> ' + def_str)
args = re.findConstructorArgs(def_str)
log.info('args >> ' + str(args))
arg_def = self.get_prop_type() + ' ' + self.get_prop_name()
if args is not None:
arg_def = ', ' + arg_def
def_str = def_str.replace(')',
arg_def + ')')
self.view.replace(edit, def_line, def_str)
def_line = self.view.line(start)
indent = self.get_inner_indent() + '\t'
insert_to = def_line.end() + 1
first_line = self.view.line(insert_to)
if re.contains_regex(self.to_text(first_line), r'super\s*\('):
insert_to = first_line.end() + 1
text = '{indent}this.{varname} = {varname};\n'.format(indent=indent, varname=self.get_prop_name())
self.view.insert(edit, insert_to, text)
def is_applicable(self):
result = re.is_prop_def(self.to_text(), allow_get_set=True, allow_static=False)
result = result and re.findConstructorWithParam(
self.to_text(self.get_class_code()),
self.find_class_name(),
self.get_prop_name(),
self.get_prop_type()) is None
return result
class AddGetSetProps(PropertyAction):
def __init__(self):
super(AddGetSetProps, self).__init__(A.ADD_GET_SET_PROPS)
def generate_code(self, edit):
use_access = SH.get_setting('generate_access_get_set')
if use_access:
to_insert = ' { ${1:public} get; ${2:public} set; }'
else:
to_insert = ' { get; set; }'
line_text = self.to_text()
index_of_end = line_text.rfind(';')
index_of_end = self.begin() + index_of_end
sublimeHelper = VH(self.view)
sublimeHelper.insert_snippet(to_insert, (index_of_end, index_of_end + 1))
def is_applicable(self):
result = super(AddGetSetProps, self).is_applicable()
getter = AddGetterAction()
getter.setView(self.view)
getter.setCode(self.code_region)
setter = AddSetterAction()
setter.setView(self.view)
setter.setCode(self.code_region)
result = result and setter.is_applicable() and getter.is_applicable()
return result
|
nchursin/ApexIntentionActions
|
helpers/PropertyActions.py
|
Python
|
apache-2.0
| 6,366
|
#
# Copyright (c) 2013-2019 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
#===============================================================================================
from ..query_constraints_factory import QueryConstraintsFactory
from ..query_constraints import QueryConstraints
from .spatial_temporal_constraints_builder import SpatialTemporalConstraintsBuilder
class VectorQueryConstraintsFactory(QueryConstraintsFactory):
"""
A query constraints factory with additional methods for creating spatial and/or
temporal constraints for vector data. Do not construct this class manually, instead,
get the constraints factory by using the `constraints_factory()` method of the
query builder.
"""
def spatial_temporal_constraints(self):
"""
Creates a spatial temporal constraints builder that can be used to construct
spatial and/or temporal constraints.
Returns:
A new `pygw.query.vector.spatial_temporal_constraints_builder.SpatialTemporalConstraintsBuilder`.
"""
return SpatialTemporalConstraintsBuilder(self._java_ref.spatialTemporalConstraints())
def filter_constraints(self, filter):
"""
Constrain a query using a filter created by pygw.query.FilterFactory.
Args:
filter (filter): The filter to constrain the query by.
Returns:
A `pygw.query.query_constraints.QueryConstraints` with the given filter.
"""
return QueryConstraints(self._java_ref.filterConstraints(filter))
def cql_constraints(self, cql_expression):
"""
Constrain a query using a CQL expression.
Args:
cql_expression (str): The CQL expression to constrain the query by.
Returns:
A `pygw.query.query_constraints.QueryConstraints` with the given CQL expression.
"""
return QueryConstraints(self._java_ref.cqlConstraints(cql_expression))
|
spohnan/geowave
|
python/src/main/python/pygw/query/vector/vector_query_constraints_factory.py
|
Python
|
apache-2.0
| 2,300
|
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities
'''
# Import salt libs
import salt.utils
import socket
# Import python libs
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return 'dnsutil'
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.fopen(hostsfile, 'r') as fp_:
hosts = fp_.read()
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.fopen(hostsfile, 'a') as fp_:
fp_.write(append_line)
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.fopen(hostsfile, 'r') as fp_:
hosts = fp_.read()
host_list = entries.split(',')
out_file = salt.utils.fopen(hostsfile, 'w')
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write('{0}\n'.format(line))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(' '.join(comps))
out_file.write('\n')
out_file.close()
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.fopen(zonefile, 'r') as fp_:
zone = fp_.read()
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict.keys():
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.'):
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if not 'MX' in zonedict.keys():
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(time):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
time = time.upper()
if 'H' in time:
time = int(time.replace('H', '')) * 3600
elif 'D' in time:
time = int(time.replace('D', '')) * 86400
elif 'W' in time:
time = 604800
else:
try:
time = int(time)
except Exception:
time = 604800
if time < 604800:
time = 604800
return time
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dig.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record for 'host'.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dig.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
(hostname, aliases, addresses) = socket.gethostbyname_ex(host)
return addresses
except socket.error:
return 'Unabled to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dig.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dig.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dig.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
|
victorywang80/Maintenance
|
saltstack/src/salt/modules/dnsutil.py
|
Python
|
apache-2.0
| 8,951
|
"""Websocket API for Z-Wave JS."""
import json
from aiohttp import hdrs, web, web_exceptions
import voluptuous as vol
from zwave_js_server import dump
from homeassistant.components import websocket_api
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.websocket_api.connection import ActiveConnection
from homeassistant.const import CONF_URL
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DATA_CLIENT, DOMAIN, EVENT_DEVICE_ADDED_TO_REGISTRY
ID = "id"
ENTRY_ID = "entry_id"
NODE_ID = "node_id"
TYPE = "type"
@callback
def async_register_api(hass: HomeAssistant) -> None:
"""Register all of our api endpoints."""
websocket_api.async_register_command(hass, websocket_network_status)
websocket_api.async_register_command(hass, websocket_node_status)
websocket_api.async_register_command(hass, websocket_add_node)
websocket_api.async_register_command(hass, websocket_stop_inclusion)
websocket_api.async_register_command(hass, websocket_remove_node)
websocket_api.async_register_command(hass, websocket_stop_exclusion)
hass.http.register_view(DumpView) # type: ignore
@websocket_api.require_admin
@websocket_api.websocket_command(
{vol.Required(TYPE): "zwave_js/network_status", vol.Required(ENTRY_ID): str}
)
@callback
def websocket_network_status(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Get the status of the Z-Wave JS network."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
data = {
"client": {
"ws_server_url": client.ws_server_url,
"state": "connected" if client.connected else "disconnected",
"driver_version": client.version.driver_version,
"server_version": client.version.server_version,
},
"controller": {
"home_id": client.driver.controller.data["homeId"],
"nodes": list(client.driver.controller.nodes),
},
}
connection.send_result(
msg[ID],
data,
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/node_status",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@callback
def websocket_node_status(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Get the status of a Z-Wave JS node."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
node_id = msg[NODE_ID]
node = client.driver.controller.nodes[node_id]
data = {
"node_id": node.node_id,
"is_routing": node.is_routing,
"status": node.status,
"is_secure": node.is_secure,
"ready": node.ready,
}
connection.send_result(
msg[ID],
data,
)
@websocket_api.require_admin # type: ignore
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/add_node",
vol.Required(ENTRY_ID): str,
vol.Optional("secure", default=False): bool,
}
)
async def websocket_add_node(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Add a node to the Z-Wave network."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
controller = client.driver.controller
include_non_secure = not msg["secure"]
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def node_added(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
"status": node.status,
"ready": node.ready,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node added", "node": node_details}
)
)
@callback
def device_registered(device: DeviceEntry) -> None:
device_details = {"name": device.name, "id": device.id}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "device registered", "device": device_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
unsubs = [
controller.on("inclusion started", forward_event),
controller.on("inclusion failed", forward_event),
controller.on("inclusion stopped", forward_event),
controller.on("node added", node_added),
async_dispatcher_connect(
hass, EVENT_DEVICE_ADDED_TO_REGISTRY, device_registered
),
]
result = await controller.async_begin_inclusion(include_non_secure)
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin # type: ignore
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/stop_inclusion",
vol.Required(ENTRY_ID): str,
}
)
async def websocket_stop_inclusion(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Cancel adding a node to the Z-Wave network."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
controller = client.driver.controller
result = await controller.async_stop_inclusion()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin # type: ignore
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/stop_exclusion",
vol.Required(ENTRY_ID): str,
}
)
async def websocket_stop_exclusion(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Cancel removing a node from the Z-Wave network."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
controller = client.driver.controller
result = await controller.async_stop_exclusion()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin # type:ignore
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/remove_node",
vol.Required(ENTRY_ID): str,
}
)
async def websocket_remove_node(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Remove a node from the Z-Wave network."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
controller = client.driver.controller
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def node_removed(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node removed", "node": node_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
unsubs = [
controller.on("exclusion started", forward_event),
controller.on("exclusion failed", forward_event),
controller.on("exclusion stopped", forward_event),
controller.on("node removed", node_removed),
]
result = await controller.async_begin_exclusion()
connection.send_result(
msg[ID],
result,
)
class DumpView(HomeAssistantView):
"""View to dump the state of the Z-Wave JS server."""
url = "/api/zwave_js/dump/{config_entry_id}"
name = "api:zwave_js:dump"
async def get(self, request: web.Request, config_entry_id: str) -> web.Response:
"""Dump the state of Z-Wave."""
hass = request.app["hass"]
if config_entry_id not in hass.data[DOMAIN]:
raise web_exceptions.HTTPBadRequest
entry = hass.config_entries.async_get_entry(config_entry_id)
msgs = await dump.dump_msgs(entry.data[CONF_URL], async_get_clientsession(hass))
return web.Response(
body="\n".join(json.dumps(msg) for msg in msgs) + "\n",
headers={
hdrs.CONTENT_TYPE: "application/jsonl",
hdrs.CONTENT_DISPOSITION: 'attachment; filename="zwave_js_dump.jsonl"',
},
)
|
turbokongen/home-assistant
|
homeassistant/components/zwave_js/api.py
|
Python
|
apache-2.0
| 8,923
|
__author__ = 'cmantas'
from tools import *
from json import loads
ms = take_single("select metrics from mahout_kmeans_text where k=15 and documents=90300 and dimensions=53235;")[0]
mj = loads(ms)
cols = iter(["#727272", '#f1595f', '#79c36a', '#599ad3', '#f9a65a','#9e66ab','#cd7058', '#d77fb3'])
def timeline2vaslues(fieldname, metrics):
times =[]
values =[]
for k,v in metrics:
times.append(k)
values.append(v[fieldname])
return times, values
def sum_timeline_vals(fieldnames, metrics):
times =[]
values =[]
for k,v in metrics:
times.append(k)
sum = 0
for i in fieldnames:
if i.startswith("kbps"):
v[i]=int(v[i])
sum += v[i]
values.append(sum)
return times, values
# figure()
fig, ax1 = plt.subplots()
times, values = timeline2vaslues("cpu", mj)
d, = ax1.plot(times, values, color=next(cols))
ax1.set_ylabel('percentage (%)')
times, values = timeline2vaslues("mem", mj)
a, = ax1.plot(times, values, color=next(cols))
ax2 = ax1.twinx()
times, values = sum_timeline_vals(["kbps_read", "kbps_write"], mj)
ax2.set_ylabel("KB/s")
b, = ax2.plot(times, values, color=next(cols))
times, values = sum_timeline_vals(["net_in", "net_out"], mj)
c, = ax2.plot(times, values, color=next(cols))
plt.title("Mahout K-means Cluster Metrics")
plt.legend([d, a, b,c], ["CPU", "MEM", "Disk IO", "Net IO"], loc=3)
show()
|
project-asap/IReS-Platform
|
asap-tools/experiments/depricated/handler/metrics.py
|
Python
|
apache-2.0
| 1,434
|
from fingered import *
import random
import csv
import sys
def caac():
records = random.randrange(200,500)
inst3=Xf("r")
inst3.setStats(records,2,(2,records/10),[-1,0],[False,False],0,40000)
inst3.FormData()
inst4=Xf("s")
inst4.setStats(100,2,(2,10),[-1,0],[False,True],0,40000)
inst4.FormData()
print inst3
print inst4
#print "Predicted Cost of Fingered Join from Stats: "
#print "recorSize of file1=" + str(records)
pCost = inst3.getSize() + (inst4.getSize() * inst3.getRuns(1) )+ (inst3.getRuns(1) * inst4.getSize())
#print pCost
#print inst3.eJoin(inst4,1,1)
#print "\n Fingered Join:"
j=JoinReq(inst3,inst4,1,1,True)
tup=j.pull()
while tup is not "eoo":
#print str(tup)
tup=j.pull()
#print "Cost : " + str(j.getCost())
"""
print "\nNested Loop Join:\n"
inst3.reset()
inst4.reset()
k=JoinReq(inst3,inst4,1,1,False)
tup=k.pull()
while tup is not "eoo":
print str(tup)
tup=k.pull()
print "Cost : " + str(k.getCost())
"""
print "Summary:"
print "selected file1size: " + str(records)
print "selected number of runs for file1: " + str(inst3.getRuns(1))
print "Predicted Cost Finger:" + str(pCost)
print "Actual Cost Finger:" + str(j.getCost())
#print "Actual Cost NLJ:" + str(k.getCost())
print "("+ str(records) +","+ str(inst3.getRuns(1)) +","+ str(inst4.getSize()) +","+ str(pCost) +","+ str(j.getCost())+")"
tup = [ str(records), str(inst3.getRuns(1)),str(inst4.getSize()),str(pCost),str(j.getCost())]
print tup
fp = open("toexcel.csv","ab")
writer = csv.writer(fp)
data = [tup]
writer.writerows(data)
for i in range(2):
caac()
|
vishnuprathish/constrained-data-generator
|
file1.py
|
Python
|
apache-2.0
| 1,614
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from webob import exc
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack import wsgi
from jacket.compute import cloud
from jacket.compute import exception
from jacket.i18n import _
class Controller(object):
"""The server metadata API controller for the OpenStack API."""
def __init__(self):
self.compute_api = cloud.API()
super(Controller, self).__init__()
def _get_metadata(self, context, server_id):
try:
server = common.get_instance(self.compute_api, context, server_id)
meta = self.compute_api.get_instance_metadata(context, server)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
meta_dict = {}
for key, value in six.iteritems(meta):
meta_dict[key] = value
return meta_dict
def index(self, req, server_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['compute.context']
return {'metadata': self._get_metadata(context, server_id)}
def create(self, req, server_id, body):
try:
metadata = body['metadata']
except (KeyError, TypeError):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
if not isinstance(metadata, dict):
msg = _("Malformed request body. metadata must be object")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['compute.context']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=False)
return {'metadata': new_metadata}
def update(self, req, server_id, id, body):
try:
meta_item = body['meta']
except (TypeError, KeyError):
expl = _('Malformed request body')
raise exc.HTTPBadRequest(explanation=expl)
if not isinstance(meta_item, dict):
msg = _("Malformed request body. meta item must be object")
raise exc.HTTPBadRequest(explanation=msg)
if id not in meta_item:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(meta_item) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
context = req.environ['compute.context']
self._update_instance_metadata(context,
server_id,
meta_item,
delete=False)
return {'meta': meta_item}
def update_all(self, req, server_id, body):
try:
metadata = body['metadata']
except (TypeError, KeyError):
expl = _('Malformed request body')
raise exc.HTTPBadRequest(explanation=expl)
if not isinstance(metadata, dict):
msg = _("Malformed request body. metadata must be object")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['compute.context']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=True)
return {'metadata': new_metadata}
def _update_instance_metadata(self, context, server_id, metadata,
delete=False):
try:
server = common.get_instance(self.compute_api, context, server_id)
return self.compute_api.update_instance_metadata(context,
server,
metadata,
delete)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
except (ValueError, AttributeError):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InvalidMetadata as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'update metadata', server_id)
def show(self, req, server_id, id):
"""Return a single metadata item."""
context = req.environ['compute.context']
data = self._get_metadata(context, server_id)
try:
return {'meta': {id: data[id]}}
except KeyError:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(204)
def delete(self, req, server_id, id):
"""Deletes an existing metadata."""
context = req.environ['compute.context']
metadata = self._get_metadata(context, server_id)
if id not in metadata:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
server = common.get_instance(self.compute_api, context, server_id)
try:
self.compute_api.delete_instance_metadata(context, server, id)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete metadata', server_id)
def create_resource():
return wsgi.Resource(Controller())
|
HybridF5/jacket
|
jacket/api/compute/openstack/compute/legacy_v2/server_metadata.py
|
Python
|
apache-2.0
| 7,172
|
#!/usr/bin/env python
""" """
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2011 Hendrix Demers"
__license__ = ""
# Standard library modules.
import unittest
import logging
import os.path
import tempfile
import shutil
import time
# Third party modules.
from nose.plugins.skip import SkipTest
# Local modules.
from pymcxray import get_current_module_path
# Project modules
import pymcxray.serialization._Serialization as _Serialization
# Globals and constants variables.
class Test_Serialization(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.serialization = _Serialization._Serialization()
self.tempPath = tempfile.mkdtemp(prefix="Test_Serialization_")
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tempPath)
def testSkeleton(self):
#self.fail("Test if the testcase is working.")
self.assert_(True)
def test_init(self):
serialization = _Serialization._Serialization()
self.assertEquals(None, serialization._filename)
self.assertEquals(True, serialization._verbose)
serialization = _Serialization._Serialization(verbose=True)
self.assertEquals(None, serialization._filename)
self.assertEquals(True, serialization._verbose)
serialization = _Serialization._Serialization(verbose=False)
self.assertEquals(None, serialization._filename)
self.assertEquals(False, serialization._verbose)
filenameRef = "_Serialization.ser"
serialization = _Serialization._Serialization(filename=filenameRef)
self.assertEquals(filenameRef, serialization._filename)
self.assertEquals(True, serialization._verbose)
filenameRef = "_Serialization2.ser"
serialization = _Serialization._Serialization(filenameRef)
self.assertEquals(filenameRef, serialization._filename)
self.assertEquals(True, serialization._verbose)
#self.fail("Test if the testcase is working.")
def test_getFilepath(self):
serialization = _Serialization._Serialization()
self.assertRaises(ValueError, serialization.getFilepath)
filenameRef = "_Serialization.ser"
filepathRef = filenameRef
filepathRef = os.path.normpath(filepathRef)
serialization = _Serialization._Serialization(filename=filenameRef)
filepath = serialization.getFilepath()
self.assertEquals(filepathRef, filepath)
filenameRef = "_Serialization.ser"
pathRef = "/casd/csadf/asdfsdaf/"
filepathRef = os.path.join(pathRef, filenameRef)
filepathRef = os.path.normpath(filepathRef)
serialization = _Serialization._Serialization()
serialization.setFilename(filenameRef)
serialization.setPathname(pathRef)
filepath = serialization.getFilepath()
self.assertEquals(filepathRef, filepath)
filenameRef = "_Serialization.ser"
pathRef = "/casd/csadf/asdfsdaf/"
filepathRef = os.path.join(filepathRef, filenameRef)
filepathRef = os.path.normpath(filepathRef)
serialization = _Serialization._Serialization()
serialization.setFilepath(filepathRef)
filepath = serialization.getFilepath()
self.assertEquals(filepathRef, filepath)
#self.fail("Test if the testcase is working.")
def test_setCurrentVersion(self):
version = "1.2.3"
self.serialization.setCurrentVersion(version)
self.assertEquals(version, self.serialization._currentVersion)
self.assertEquals(version, self.serialization.getCurrentVersion())
version = 1.2
self.assertRaises(TypeError, self.serialization.setCurrentVersion, version)
#self.fail("Test if the testcase is working.")
def test_isFile(self):
filepathRef = "/casd/csadf/asdfsdaf/sadfsdaf.ser"
self.serialization.setFilepath(filepathRef)
self.assertFalse(self.serialization.isFile())
filepathRef = get_current_module_path(__file__, "../../test_data/serialization/empty.ser")
if not os.path.isfile(filepathRef):
raise SkipTest
self.serialization.setFilepath(filepathRef)
self.assertTrue(self.serialization.isFile())
#self.fail("Test if the testcase is working.")
def test_deleteFile(self):
filename = "empty.ser"
filepathRef = get_current_module_path(__file__, "../../test_data/serialization/")
filepathRef = os.path.join(filepathRef, filename)
if not os.path.isfile(filepathRef):
raise SkipTest
filepath = os.path.join(self.tempPath, filename)
shutil.copy2(filepathRef, filepath)
self.serialization.setFilepath(filepath)
self.assertTrue(os.path.isfile(filepath))
self.serialization.deleteFile()
self.assertFalse(os.path.isfile(filepath))
#self.fail("Test if the testcase is working.")
def test_isOlderThan(self):
filename = "empty"
filepathRef = get_current_module_path(__file__, "../../test_data/serialization/")
filepathRef = os.path.join(filepathRef, filename+'.ser')
if not os.path.isfile(filepathRef):
raise SkipTest
filepath1 = os.path.join(self.tempPath, filename+'_1'+'.ser')
time.sleep(1.0)
shutil.copy(filepathRef, filepath1)
filepath2 = os.path.join(self.tempPath, filename+'_2'+'.ser')
time.sleep(1.0)
shutil.copy(filepathRef, filepath2)
filepath3 = os.path.join(self.tempPath, filename+'_3'+'.ser')
time.sleep(1.0)
shutil.copy(filepathRef, filepath3)
self.serialization.setFilepath(filepath2)
self.assertFalse(self.serialization.isOlderThan(filepath1))
self.assertFalse(self.serialization.isOlderThan(filepath2))
self.assertTrue(self.serialization.isOlderThan(filepath3))
filepath = "/casd/csadf/asdfsdaf/sadfsdaf.ser"
self.assertFalse(self.serialization.isOlderThan(filepath))
filepath = "/casd/csadf/asdfsdaf/sadfsdaf.ser"
self.serialization.setFilepath(filepath)
self.assertTrue(self.serialization.isOlderThan(filepath3))
#self.fail("Test if the testcase is working.")
if __name__ == '__main__': #pragma: no cover
import nose
nose.runmodule()
|
drix00/pymcxray
|
pymcxray/serialization/test_Serialization.py
|
Python
|
apache-2.0
| 6,459
|
#!/usr/bin/python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""query switch."""
import optparse
import Queue
import threading
import time
from compass.apiclient.restful import Client
class AddSwitch(object):
"""A utility class that handles adding a switch and retrieving
corresponding machines associated with the switch.
"""
def __init__(self, server_url):
print server_url, " ...."
self._client = Client(server_url)
def add_switch(self, queue, ip, snmp_community):
"""Add a switch with SNMP credentials and retrieve attached
server machines.
:param queue: The result holder for the machine details.
:type queue: A Queue object(thread-safe).
:param ip: The IP address of the switch.
:type ip: string.
:param snmp_community: The SNMP community string.
:type snmp_community: string.
"""
status, resp = self._client.add_switch(ip,
version="2c",
community=snmp_community)
if status > 409:
queue.put((ip, (False,
"Failed to add the switch (status=%d)" % status)))
return
if status == 409:
# This is the case where the switch with the same IP already
# exists in the system. We now try to update the switch
# with the given credential.
switch_id = resp['failedSwitch']
status, resp = self._client.update_switch(switch_id,
version="2c",
community=snmp_community)
if status > 202:
queue.put((ip, (False,
"Failed to update the switch (status=%d)" %
status)))
return
switch = resp['switch']
state = switch['state']
switch_id = switch['id']
# if the switch state is not in under_monitoring,
# wait for the poll switch task
while True:
status, resp = self._client.get_switch(switch_id)
if status > 400:
queue.put((ip, (False, "Failed to get switch status")))
return
switch = resp['switch']
state = switch['state']
if state == 'initialized' or state == 'repolling':
time.sleep(5)
else:
break
if state == 'under_monitoring':
# get machines connected to the switch.
status, response = self._client.get_machines(switch_id=switch_id)
if status == 200:
for machine in response['machines']:
queue.put((ip, "mac=%s, vlan=%s, port=%s dbid=%d" % (
machine['mac'],
machine['vlan'],
machine['port'],
machine['id'])))
else:
queue.put((ip, (False,
"Failed to get machines %s" %
response['status'])))
else:
queue.put((ip, (False, "Switch state is %s" % state)))
if __name__ == "__main__":
usage = "usage: %prog [options] switch_ips"
parser = optparse.OptionParser(usage)
parser.add_option("-u", "--server-url", dest="server_url",
default="http://localhost/api",
help="The Compass Server URL")
parser.add_option("-c", "--community", dest="community",
default="public",
help="Switch SNMP community string")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Wrong number of arguments")
threads = []
queue = Queue.Queue()
add_switch = AddSwitch(options.server_url)
print "Add switch to the server. This may take a while ..."
for switch in args[0].split(','):
t = threading.Thread(target=add_switch.add_switch,
args=(queue, switch, options.community))
threads.append(t)
t.start()
for t in threads:
t.join(60)
while True:
try:
ip, result = queue.get(block=False)
print ip, " : ", result
except Queue.Empty:
break
|
kidchang/compassv2-api
|
bin/query_switch.py
|
Python
|
apache-2.0
| 4,969
|
# Copyright (c) 2012-2016, Marco Elver <me AT marcoelver.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sync command.
"""
import logging
import os
import datetime
import shutil
import string
from bibman.util import gen_hash_md5, gen_filename_from_bib
class SyncCommand:
def __init__(self, conf, bibfile, excludefiles):
self.conf = conf
self.bibfmt_main = bibfmt_module.BibFmt(bibfile)
indices = [bibfmt_module.FILE, bibfmt_module.CITEKEY]
if self.conf.args.hash:
indices.append(bibfmt_module.HASH)
self.bibfmt_main.build_index(*indices)
self.bibfmt_efs = []
for fh in excludefiles:
bi = bibfmt_module.BibFmt(fh)
bi.build_index(*indices)
self.bibfmt_efs.append(bi)
# Sanity check data and warn
for idx in indices:
main_set = frozenset(self.bibfmt_main.index[idx])
for bi in self.bibfmt_efs:
duplicate_set = set(bi.index[idx])
duplicate_set &= main_set
if len(duplicate_set) != 0:
logging.warning("Duplicates found in '{}': {} = {}".format(
bi.bibfile.name, idx, duplicate_set))
def walk_path(self):
for path in self.conf.args.paths:
if not os.path.isdir(path):
logging.error("Could not find directory: {}".format(path))
continue
for root, dirs, files in os.walk(path):
for f in files:
fullpath = os.path.abspath(os.path.join(root, f))
if fullpath.startswith(os.environ['HOME']):
fullpath = fullpath.replace(os.environ['HOME'], "~", 1)
if fullpath.split(".")[-1] in self.conf.args.extlist:
yield fullpath
def query_exists_in(self, index, value):
"""
Generates all found matches.
"""
for bi in [self.bibfmt_main] + self.bibfmt_efs:
if value in bi.index[index]:
yield bi
def query_exists(self, *args, **kwargs):
"""
Returns first found match only.
"""
for bi in self.query_exists_in(*args, **kwargs):
return bi
return None
def check_hash(self, digest, path):
found = False
for bi in self.query_exists_in(bibfmt_module.HASH, digest):
found = True
query_filepos = bi.query(bibfmt_module.HASH, digest)
query_result = bi.read_entry_dict(query_filepos)
duplicate = query_result["file"]
citekey = query_result["citekey"]
if not os.path.exists(duplicate) and bi.bibfile.writable():
if not self.conf.args.append or not bi.update_in_place(query_filepos,
bibfmt_module.FILE, duplicate, path):
logging.warning("File '{}' missing; suggested fix: update '{}' in '{}' with '{}'".format(
duplicate, citekey, bi.bibfile.name, path))
else:
# Could update in-place
logging.info("Updated entry for '{}' with '{}'".format(
citekey, path))
else:
logging.warning("Duplicate for '{}' found in '{}': citekey = '{}'".format(
path, bi.bibfile.name, citekey))
return found
def verify_hash(self, path):
# Only verify entries in main.
query_filepos = self.bibfmt_main.query(bibfmt_module.FILE, path)
if query_filepos is None: return # not in main
query_result = self.bibfmt_main.read_entry_dict(query_filepos)
digest = gen_hash_md5(os.path.expanduser(path)).hexdigest()
if digest != query_result["md5"]:
logging.warn("MD5 checksum mismatch: {} ({} != {})".format(
path, digest, query_result["md5"]))
def interactive_corrections(self, new_entry_args):
logging.info("Entering interactive corrections mode. Leave blank for default.")
self.bibfmt_main.print_new_entry(**new_entry_args)
for key in new_entry_args:
if key in ["file", "date_added", "md5"]:
continue
user_data = input("'{}' correction: ".format(key))
if len(user_data) > 0:
new_entry_args[key] = user_data
print()
return new_entry_args
def __call__(self):
for path in self.walk_path():
# Check existing entries
if self.query_exists(bibfmt_module.FILE, path) is not None:
if self.conf.args.verify: self.verify_hash(path)
continue
# Generate new entry
new_entry_args = dict(
reftype="misc",
citekey="TODO:{}".format(os.path.basename(path)),
author="",
title="",
year="",
keywords="",
file=path,
annotation="",
date_added=datetime.date.today().strftime("%Y-%m-%d"))
if self.conf.args.hash:
new_entry_args["md5"] = gen_hash_md5(
os.path.expanduser(path)).hexdigest()
# Before we proceed, check if this file is a duplicate of an
# already existing file, and if so, check existing entry is
# still valid; if not valid replace file, otherwise warn user.
if self.check_hash(new_entry_args["md5"], path):
continue
if self.conf.args.remote:
logging.info("Attempting to fetch bibliography information remotely: {}".format(
path))
new_entry_args.update(self.conf.bibfetch(filename=path))
if self.conf.args.interactive:
new_entry_args = self.interactive_corrections(new_entry_args)
if self.conf.args.interactive and self.conf.args.rename:
newpath = os.path.join(os.path.dirname(path), gen_filename_from_bib(new_entry_args))
logging.info("Rename: {} to {}".format(path, newpath))
shutil.move(os.path.expanduser(path), os.path.expanduser(newpath))
new_entry_args["file"] = newpath
path = newpath
# Before we add the new entry, check for duplicate cite-keys
citekey_exists_in = self.query_exists(bibfmt_module.CITEKEY,
new_entry_args["citekey"])
if citekey_exists_in is not None:
logging.debug("Cite-key already exists in '{}': {}".format(
citekey_exists_in.bibfile.name, new_entry_args["citekey"]))
for c in string.ascii_letters:
newcitekey = new_entry_args["citekey"] + c
if self.query_exists(bibfmt_module.CITEKEY, newcitekey) is None:
break
new_entry_args["citekey"] = newcitekey
# Finally, generate new entry
if self.conf.args.append:
logging.info("Appending new entry for: {}".format(path))
self.bibfmt_main.append_new_entry(**new_entry_args)
else:
self.bibfmt_main.print_new_entry(**new_entry_args)
def main(conf):
global bibfmt_module
bibfmt_module = conf.bibfmt_module
try:
bibfile = open(conf.args.bibfile, 'r+')
excludefiles = []
if conf.args.excludes is not None:
for filename in conf.args.excludes:
excludefiles.append(open(filename, "r"))
except Exception as e:
logging.critical("Could not open file: {}".format(e))
return 1
try:
sync_cmd = SyncCommand(conf, bibfile, excludefiles)
sync_cmd()
finally:
bibfile.close()
for fh in excludefiles:
fh.close()
def register_args(parser):
parser.add_argument("-p", "--path", metavar="PATH", type=str,
dest="paths", nargs="+", required=True,
help="Paths to scan and synchronise BIBFILE with.")
parser.add_argument("--extlist", type=str,
dest="extlist", default="pdf", nargs="+",
help="File-extensions to consider for sync. [Default:pdf]")
parser.add_argument("-a", "--append", action='store_true',
dest="append", default=False,
help="Append to BIBFILE instead of printing to stdout.")
parser.add_argument("-e", "--exclude", metavar="EXCLUDE", type=str,
dest="excludes", default=None, nargs="+",
help="Bibliography files to exclude new entries from.")
parser.add_argument("--nohash", action="store_false",
dest="hash", default=True,
help="Do not generate MD5 sums and check duplicates.")
parser.add_argument("-i", "--interactive", action="store_true",
dest="interactive", default=False,
help="Interactive synchronisation, prompting the user for entry corrections.")
parser.add_argument("--remote", action="store_true",
dest="remote", default=False,
help="Enable remote fetching of bibliography entries.")
parser.add_argument("--rename", action="store_true",
dest="rename", default=False,
help="Rename file to be more descriptive; only valid with --interactive.")
parser.add_argument("--verify", action="store_true",
dest="verify", default=False,
help="Verify checksum of all existing entries.")
parser.set_defaults(func=main)
|
melver/bibmanage
|
lib/python/bibman/commands/sync.py
|
Python
|
apache-2.0
| 10,182
|
from rest_framework import serializers as ser
from modularodm.exceptions import ValidationValueError
from api.base.exceptions import InvalidModelValueError
from api.base.serializers import AllowMissing, JSONAPIRelationshipSerializer, HideIfDisabled, \
PrefetchRelationshipsSerializer
from website.models import User
from api.base.serializers import (
JSONAPISerializer, LinksField, RelationshipField, DevOnly, IDField, TypeField,
DateByVersion,
)
from api.base.utils import absolute_reverse, get_user_auth
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'full_name',
'given_name',
'middle_names',
'family_name',
'id'
])
non_anonymized_fields = ['type']
id = IDField(source='_id', read_only=True)
type = TypeField()
full_name = ser.CharField(source='fullname', required=True, label='Full name', help_text='Display name used in the general user interface')
given_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
middle_names = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
family_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
suffix = HideIfDisabled(ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations'))
date_registered = HideIfDisabled(DateByVersion(read_only=True))
active = HideIfDisabled(ser.BooleanField(read_only=True, source='is_active'))
# Social Fields are broken out to get around DRF complex object bug and to make API updating more user friendly.
github = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.github',
allow_blank=True, help_text='GitHub Handle'), required=False, source='social.github')))
scholar = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.scholar',
allow_blank=True, help_text='Google Scholar Account'), required=False, source='social.scholar')))
personal_website = DevOnly(HideIfDisabled(AllowMissing(ser.URLField(required=False, source='social.personal',
allow_blank=True, help_text='Personal Website'), required=False, source='social.personal')))
twitter = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.twitter',
allow_blank=True, help_text='Twitter Handle'), required=False, source='social.twitter')))
linkedin = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.linkedIn',
allow_blank=True, help_text='LinkedIn Account'), required=False, source='social.linkedIn')))
impactstory = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.impactStory',
allow_blank=True, help_text='ImpactStory Account'), required=False, source='social.impactStory')))
orcid = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.orcid',
allow_blank=True, help_text='ORCID'), required=False, source='social.orcid')))
researcherid = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.researcherId',
allow_blank=True, help_text='ResearcherId Account'), required=False, source='social.researcherId')))
researchgate = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.researchGate',
allow_blank=True, help_text='ResearchGate Account'), required=False, source='social.researchGate')))
academia_institution = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.academiaInstitution',
allow_blank=True, help_text='AcademiaInstitution Field'), required=False, source='social.academiaInstitution')))
academia_profile_id = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.academiaProfileID',
allow_blank=True, help_text='AcademiaProfileID Field'), required=False, source='social.academiaProfileID')))
baiduscholar = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.baiduScholar',
allow_blank=True, help_text='Baidu Scholar Account'), required=False, source='social.baiduScholar')))
ssrn = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.ssrn',
allow_blank=True, help_text='SSRN Account'), required=False, source='social.ssrn')))
timezone = HideIfDisabled(ser.CharField(required=False, help_text="User's timezone, e.g. 'Etc/UTC"))
locale = HideIfDisabled(ser.CharField(required=False, help_text="User's locale, e.g. 'en_US'"))
links = HideIfDisabled(LinksField(
{
'html': 'absolute_url',
'profile_image': 'profile_image_url',
}
))
nodes = HideIfDisabled(RelationshipField(
related_view='users:user-nodes',
related_view_kwargs={'user_id': '<_id>'},
related_meta={'projects_in_common': 'get_projects_in_common'},
))
registrations = DevOnly(HideIfDisabled(RelationshipField(
related_view='users:user-registrations',
related_view_kwargs={'user_id': '<_id>'},
)))
institutions = HideIfDisabled(RelationshipField(
related_view='users:user-institutions',
related_view_kwargs={'user_id': '<_id>'},
self_view='users:user-institutions-relationship',
self_view_kwargs={'user_id': '<_id>'},
))
class Meta:
type_ = 'users'
def get_projects_in_common(self, obj):
user = get_user_auth(self.context['request']).user
if obj == user:
return user.contributor_to.count()
return obj.n_projects_in_common(user)
def absolute_url(self, obj):
if obj is not None:
return obj.absolute_url
return None
def get_absolute_url(self, obj):
return absolute_reverse('users:user-detail', kwargs={
'user_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def profile_image_url(self, user):
size = self.context['request'].query_params.get('profile_image_size')
return user.profile_image_url(size=size)
def update(self, instance, validated_data):
assert isinstance(instance, User), 'instance must be a User'
for attr, value in validated_data.items():
if 'social' == attr:
for key, val in value.items():
instance.social[key] = val
else:
setattr(instance, attr, value)
try:
instance.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
return instance
class UserAddonSettingsSerializer(JSONAPISerializer):
"""
Overrides UserSerializer to make id required.
"""
id = ser.CharField(source='config.short_name', read_only=True)
user_has_auth = ser.BooleanField(source='has_auth', read_only=True)
links = LinksField({
'self': 'get_absolute_url',
'accounts': 'account_links'
})
class Meta:
type_ = 'user_addons'
def get_absolute_url(self, obj):
return absolute_reverse(
'users:user-addon-detail',
kwargs={
'provider': obj.config.short_name,
'user_id': self.context['request'].parser_context['kwargs']['user_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
def account_links(self, obj):
# TODO: [OSF-4933] remove this after refactoring Figshare
if hasattr(obj, 'external_accounts'):
return {
account._id: {
'account': absolute_reverse('users:user-external_account-detail', kwargs={
'user_id': obj.owner._id,
'provider': obj.config.short_name,
'account_id': account._id,
'version': self.context['request'].parser_context['kwargs']['version']
}),
'nodes_connected': [n.absolute_api_v2_url for n in obj.get_attached_nodes(account)]
}
for account in obj.external_accounts.all()
}
return {}
class UserDetailSerializer(UserSerializer):
"""
Overrides UserSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class RelatedInstitution(JSONAPIRelationshipSerializer):
id = ser.CharField(required=False, allow_null=True, source='_id')
class Meta:
type_ = 'institutions'
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class UserInstitutionsRelationshipSerializer(PrefetchRelationshipsSerializer):
data = ser.ListField(child=RelatedInstitution())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return absolute_reverse('users:user-institutions-relationship', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_related_url(self, obj):
return absolute_reverse('users:user-institutions', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class Meta:
type_ = 'institutions'
|
acshi/osf.io
|
api/users/serializers.py
|
Python
|
apache-2.0
| 10,270
|
# Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hardware reset functions for the D'Kitty."""
import numpy as np
from robel.components.builder import ComponentBuilder
from robel.components.robot import RobotComponentBuilder, RobotState
from robel.components.robot.dynamixel_robot import DynamixelRobotComponent
from robel.components.tracking import TrackerComponentBuilder
from robel.components.tracking.tracker import TrackerComponent
from robel.utils.reset_procedure import ResetProcedure
# Maximum values for each joint.
BASEMAX = .8
MIDMAX = 2.4
FOOTMAX = 2.5
# Common parameters for all `set_state` commands.
SET_PARAMS = dict(
error_tol=5 * np.pi / 180, # 5 degrees
last_diff_tol=.1 * np.pi / 180, # 5 degrees
)
# Convenience constants.
PI = np.pi
PI2 = np.pi / 2
PI4 = np.pi / 4
PI6 = np.pi / 6
OUTWARD_TUCK_POSE = np.array([0, -MIDMAX, FOOTMAX, 0, MIDMAX, -FOOTMAX])
INWARD_TUCK_POSE = np.array([0, MIDMAX, -FOOTMAX, 0, -MIDMAX, FOOTMAX])
class ScriptedDKittyResetProcedure(ResetProcedure):
"""Scripted reset procedure for D'Kitty.
This resets the D'Kitty to a standing position.
"""
def __init__(self,
upright_threshold: float = 0.9,
standing_height: float = 0.35,
height_tolerance: float = 0.05,
max_attempts: int = 5):
super().__init__()
self._upright_threshold = upright_threshold
self._standing_height = standing_height
self._height_tolerance = height_tolerance
self._max_attempts = max_attempts
self._robot = None
self._tracker = None
def configure_reset_groups(self, builder: ComponentBuilder):
"""Configures the component groups needed for reset."""
if isinstance(builder, RobotComponentBuilder):
builder.add_group('left', motor_ids=[20, 21, 22, 30, 31, 32])
builder.add_group('right', motor_ids=[10, 11, 12, 40, 41, 42])
builder.add_group('front', motor_ids=[10, 11, 12, 20, 21, 22])
builder.add_group('back', motor_ids=[30, 31, 32, 40, 41, 42])
elif isinstance(builder, TrackerComponentBuilder):
assert 'torso' in builder.group_configs
def reset(self, robot: DynamixelRobotComponent, tracker: TrackerComponent):
"""Performs the reset procedure."""
self._robot = robot
self._tracker = tracker
attempts = 0
while not self._is_standing():
attempts += 1
if attempts > self._max_attempts:
break
if self._is_upside_down():
self._perform_flip_over()
self._perform_tuck_under()
self._perform_stand_up()
def _is_standing(self) -> bool:
"""Returns True if the D'Kitty is fully standing."""
state = self._tracker.get_state('torso', raw_states=True)
height = state.pos[2]
upright = state.rot[2, 2]
print('Upright: {:2f}, height: {:2f}'.format(upright, height))
if upright < self._upright_threshold:
return False
if (np.abs(height - self._standing_height) > self._height_tolerance):
return False
return True
def _get_uprightedness(self) -> float:
"""Returns the uprightedness of the D'Kitty."""
return self._tracker.get_state('torso', raw_states=True).rot[2, 2]
def _is_upside_down(self) -> bool:
"""Returns whether the D'Kitty is upside-down."""
return self._get_uprightedness() < 0
def _perform_flip_over(self):
"""Attempts to flip the D'Kitty over."""
while self._is_upside_down():
print('Is upside down {}; attempting to flip over...'.format(
self._get_uprightedness()))
# Spread flat and extended.
self._perform_flatten()
# If we somehow flipped over from that, we're done.
if not self._is_upside_down():
return
# Tuck in one side while pushing down on the other side.
self._robot.set_state(
{
'left':
RobotState(qpos=np.array([-PI4, -MIDMAX, FOOTMAX] * 2)),
'right': RobotState(qpos=np.array([-PI - PI4, 0, 0] * 2)),
},
timeout=4,
**SET_PARAMS,
)
# Straighten out the legs that were pushing down.
self._robot.set_state(
{
'left': RobotState(qpos=np.array([PI2, 0, 0] * 2)),
'right': RobotState(qpos=np.array([-PI2, 0, 0] * 2)),
},
timeout=4,
**SET_PARAMS,
)
def _perform_tuck_under(self):
"""Tucks the D'Kitty's legs so that they're under itself."""
# Bring in both sides of the D'Kitty while remaining flat.
self._perform_flatten()
# Tuck one side at a time.
for side in ('left', 'right'):
self._robot.set_state(
{side: RobotState(qpos=np.array(INWARD_TUCK_POSE))},
timeout=4,
**SET_PARAMS,
)
def _perform_flatten(self):
"""Makes the D'Kitty go into a flat pose."""
left_pose = INWARD_TUCK_POSE.copy()
left_pose[[0, 3]] = PI2
right_pose = INWARD_TUCK_POSE.copy()
right_pose[[0, 3]] = -PI2
self._robot.set_state(
{
'left': RobotState(qpos=left_pose),
'right': RobotState(qpos=right_pose),
},
timeout=4,
**SET_PARAMS,
)
def _perform_stand_up(self):
"""Makes the D'Kitty stand up."""
# Flip the back and front.
self._robot.set_state(
{
'back': RobotState(
qpos=np.array(OUTWARD_TUCK_POSE[3:].tolist() * 2)),
},
timeout=4,
**SET_PARAMS,
)
self._robot.set_state(
{
'front': RobotState(
qpos=np.array(OUTWARD_TUCK_POSE[:3].tolist() * 2)),
},
timeout=4,
**SET_PARAMS,
)
# Stand straight up.
self._robot.set_state(
{
'dkitty': RobotState(qpos=np.zeros(12)),
},
timeout=3,
**SET_PARAMS,
)
# Tuck a bit.
self._robot.set_state(
{
'dkitty': RobotState(qpos=np.array([0, PI6, -PI6] * 4)),
},
timeout=1,
**SET_PARAMS,
)
# Stand straight up.
self._robot.set_state(
{
'dkitty': RobotState(qpos=np.zeros(12)),
},
timeout=3,
**SET_PARAMS,
)
|
google-research/robel
|
robel/dkitty/utils/scripted_reset.py
|
Python
|
apache-2.0
| 7,355
|
import sys
import models
import model_utils
import math
import numpy as np
import video_level_models
import tensorflow as tf
import utils
import tensorflow.contrib.slim as slim
from tensorflow import flags
FLAGS = flags.FLAGS
class BiUniLstmModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of Bi-Uni LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
lstm_size = int(FLAGS.lstm_cells)
## Batch normalize the input
fw_cell = tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
bw_cell = tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
cell = tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
loss = 0.0
with tf.variable_scope("RNN"):
l1_outputs, l1_states = tf.nn.bidirectional_dynamic_rnn(cell_fw = fw_cell, cell_bw = bw_cell,
inputs = model_input,
sequence_length=num_frames,
swap_memory=FLAGS.rnn_swap_memory,
dtype=tf.float32)
l1_outputs = tf.concat(l1_outputs, axis = 2)
l2_outputs, l2_states = tf.nn.dynamic_rnn(cell=cell,
inputs=l1_outputs,
sequence_length=num_frames,
swap_memory=FLAGS.rnn_swap_memory,
dtype=tf.float32)
state_fw, state_bw = l1_states
state = tf.concat([state_fw, state_bw, l2_states], axis = 1)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state,
original_input=model_input,
vocab_size=vocab_size,
**unused_params)
|
wangheda/youtube-8m
|
youtube-8m-wangheda/all_frame_models/biunilstm_model.py
|
Python
|
apache-2.0
| 2,542
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import six
import testscenarios
from oslo import messaging
from oslo_messaging._drivers import common as exceptions
from oslo_messaging.tests import utils as test_utils
from oslo_serialization import jsonutils
load_tests = testscenarios.load_tests_apply_scenarios
EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins'
class NovaStyleException(Exception):
format = 'I am Nova'
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
message = self.format % kwargs
super(NovaStyleException, self).__init__(message)
class KwargsStyleException(NovaStyleException):
format = 'I am %(who)s'
def add_remote_postfix(ex):
ex_type = type(ex)
message = str(ex)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
{'__str__': str_override,
'__unicode__': str_override})
new_ex_type.__module__ = '%s_Remote' % ex.__class__.__module__
try:
ex.__class__ = new_ex_type
except TypeError:
ex.args = (message,) + ex.args[1:]
return ex
class SerializeRemoteExceptionTestCase(test_utils.BaseTestCase):
_log_failure = [
('log_failure', dict(log_failure=True)),
('do_not_log_failure', dict(log_failure=False)),
]
_add_remote = [
('add_remote', dict(add_remote=True)),
('do_not_add_remote', dict(add_remote=False)),
]
_exception_types = [
('bog_standard', dict(cls=Exception,
args=['test'],
kwargs={},
clsname='Exception',
modname=EXCEPTIONS_MODULE,
msg='test')),
('nova_style', dict(cls=NovaStyleException,
args=[],
kwargs={},
clsname='NovaStyleException',
modname=__name__,
msg='I am Nova')),
('nova_style_with_msg', dict(cls=NovaStyleException,
args=['testing'],
kwargs={},
clsname='NovaStyleException',
modname=__name__,
msg='testing')),
('kwargs_style', dict(cls=KwargsStyleException,
args=[],
kwargs={'who': 'Oslo'},
clsname='KwargsStyleException',
modname=__name__,
msg='I am Oslo')),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._log_failure,
cls._add_remote,
cls._exception_types)
def setUp(self):
super(SerializeRemoteExceptionTestCase, self).setUp()
def test_serialize_remote_exception(self):
errors = []
def stub_error(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
errors.append(str(msg) % a)
self.stubs.Set(exceptions.LOG, 'error', stub_error)
try:
try:
raise self.cls(*self.args, **self.kwargs)
except Exception as ex:
cls_error = ex
if self.add_remote:
ex = add_remote_postfix(ex)
raise ex
except Exception:
exc_info = sys.exc_info()
serialized = exceptions.serialize_remote_exception(
exc_info, log_failure=self.log_failure)
failure = jsonutils.loads(serialized)
self.assertEqual(self.clsname, failure['class'], failure)
self.assertEqual(self.modname, failure['module'])
self.assertEqual(self.msg, failure['message'])
self.assertEqual([self.msg], failure['args'])
self.assertEqual(self.kwargs, failure['kwargs'])
# Note: _Remote prefix not stripped from tracebacks
tb = cls_error.__class__.__name__ + ': ' + self.msg
self.assertIn(tb, ''.join(failure['tb']))
if self.log_failure:
self.assertTrue(len(errors) > 0, errors)
else:
self.assertEqual(0, len(errors), errors)
SerializeRemoteExceptionTestCase.generate_scenarios()
class DeserializeRemoteExceptionTestCase(test_utils.BaseTestCase):
_standard_allowed = [__name__]
scenarios = [
('bog_standard',
dict(allowed=_standard_allowed,
clsname='Exception',
modname=EXCEPTIONS_MODULE,
cls=Exception,
args=['test'],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='Exception',
remote_args=('test\ntraceback\ntraceback\n', ),
remote_kwargs={})),
('nova_style',
dict(allowed=_standard_allowed,
clsname='NovaStyleException',
modname=__name__,
cls=NovaStyleException,
args=[],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='NovaStyleException_Remote',
remote_args=('I am Nova', ),
remote_kwargs={})),
('nova_style_with_msg',
dict(allowed=_standard_allowed,
clsname='NovaStyleException',
modname=__name__,
cls=NovaStyleException,
args=['testing'],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='NovaStyleException_Remote',
remote_args=('testing', ),
remote_kwargs={})),
('kwargs_style',
dict(allowed=_standard_allowed,
clsname='KwargsStyleException',
modname=__name__,
cls=KwargsStyleException,
args=[],
kwargs={'who': 'Oslo'},
str='test\ntraceback\ntraceback\n',
remote_name='KwargsStyleException_Remote',
remote_args=('I am Oslo', ),
remote_kwargs={})),
('not_allowed',
dict(allowed=[],
clsname='NovaStyleException',
modname=__name__,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: NovaStyleException test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: NovaStyleException test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'NovaStyleException',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_module',
dict(allowed=['notexist'],
clsname='Exception',
modname='notexist',
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'Exception',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_exception',
dict(allowed=[],
clsname='FarcicalError',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: FarcicalError test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: FarcicalError test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'FarcicalError',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_kwarg',
dict(allowed=[],
clsname='Exception',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={'foobar': 'blaa'},
str=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'Exception',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('system_exit',
dict(allowed=[],
clsname='SystemExit',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: SystemExit test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: SystemExit test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'SystemExit',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
]
def test_deserialize_remote_exception(self):
failure = {
'class': self.clsname,
'module': self.modname,
'message': 'test',
'tb': ['traceback\ntraceback\n'],
'args': self.args,
'kwargs': self.kwargs,
}
serialized = jsonutils.dumps(failure)
ex = exceptions.deserialize_remote_exception(serialized, self.allowed)
self.assertIsInstance(ex, self.cls)
self.assertEqual(self.remote_name, ex.__class__.__name__)
self.assertEqual(self.str, six.text_type(ex))
if hasattr(self, 'msg'):
self.assertEqual(self.msg, six.text_type(ex))
self.assertEqual((self.msg,) + self.remote_args, ex.args)
else:
self.assertEqual(self.remote_args, ex.args)
|
hkumarmk/oslo.messaging
|
tests/test_exception_serialization.py
|
Python
|
apache-2.0
| 11,234
|
# Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import aggregates as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.compute import base_compute_client
class AggregatesClient(base_compute_client.BaseComputeClient):
def list_aggregates(self):
"""Get aggregate list."""
resp, body = self.get("os-aggregates")
body = json.loads(body)
self.validate_response(schema.list_aggregates, resp, body)
return rest_client.ResponseBody(resp, body)
def show_aggregate(self, aggregate_id):
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % aggregate_id)
body = json.loads(body)
self.validate_response(schema.get_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def create_aggregate(self, **kwargs):
"""Create a new aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#createAggregate
"""
post_body = json.dumps({'aggregate': kwargs})
resp, body = self.post('os-aggregates', post_body)
body = json.loads(body)
self.validate_response(schema.create_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def update_aggregate(self, aggregate_id, **kwargs):
"""Update an aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#updateAggregate
"""
put_body = json.dumps({'aggregate': kwargs})
resp, body = self.put('os-aggregates/%s' % aggregate_id, put_body)
body = json.loads(body)
self.validate_response(schema.update_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_aggregate(self, aggregate_id):
"""Delete the given aggregate."""
resp, body = self.delete("os-aggregates/%s" % aggregate_id)
self.validate_response(schema.delete_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_aggregate(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Return the primary type of resource this client works with."""
return 'aggregate'
def add_host(self, aggregate_id, **kwargs):
"""Add a host to the given aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#addHost
"""
post_body = json.dumps({'add_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_add_remove_host, resp, body)
return rest_client.ResponseBody(resp, body)
def remove_host(self, aggregate_id, **kwargs):
"""Remove a host from the given aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#removeAggregateHost
"""
post_body = json.dumps({'remove_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_add_remove_host, resp, body)
return rest_client.ResponseBody(resp, body)
def set_metadata(self, aggregate_id, **kwargs):
"""Replace the aggregate's existing metadata with new metadata.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#addAggregateMetadata
"""
post_body = json.dumps({'set_metadata': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_set_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
|
sebrandon1/tempest
|
tempest/lib/services/compute/aggregates_client.py
|
Python
|
apache-2.0
| 4,901
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""decorator_utils tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from mxconsole.platform import test
from mxconsole.platform import tf_logging as logging
from mxconsole.util import decorator_utils
def _test_function(unused_arg=0):
pass
class GetQualifiedNameTest(test.TestCase):
def test_method(self):
self.assertEqual(
"GetQualifiedNameTest.test_method",
decorator_utils.get_qualified_name(GetQualifiedNameTest.test_method))
def test_function(self):
self.assertEqual("_test_function",
decorator_utils.get_qualified_name(_test_function))
class AddNoticeToDocstringTest(test.TestCase):
def _check(self, doc, expected):
self.assertEqual(
decorator_utils.add_notice_to_docstring(
doc=doc,
instructions="Instructions",
no_doc_str="Nothing here",
suffix_str="(suffix)",
notice=["Go away"]),
expected)
def test_regular(self):
expected = ("Brief (suffix)\n\nGo away\nInstructions\n\nDocstring\n\n"
"Args:\n arg1: desc")
# No indent for main docstring
self._check("Brief\n\nDocstring\n\nArgs:\n arg1: desc", expected)
# 2 space indent for main docstring, blank lines not indented
self._check("Brief\n\n Docstring\n\n Args:\n arg1: desc", expected)
# 2 space indent for main docstring, blank lines indented as well.
self._check("Brief\n \n Docstring\n \n Args:\n arg1: desc", expected)
# No indent for main docstring, first line blank.
self._check("\n Brief\n \n Docstring\n \n Args:\n arg1: desc",
expected)
# 2 space indent, first line blank.
self._check("\n Brief\n \n Docstring\n \n Args:\n arg1: desc",
expected)
def test_brief_only(self):
expected = "Brief (suffix)\n\nGo away\nInstructions"
self._check("Brief", expected)
self._check("Brief\n", expected)
self._check("Brief\n ", expected)
self._check("\nBrief\n ", expected)
self._check("\n Brief\n ", expected)
def test_no_docstring(self):
expected = "Nothing here\n\nGo away\nInstructions"
self._check(None, expected)
self._check("", expected)
def test_no_empty_line(self):
expected = "Brief (suffix)\n\nGo away\nInstructions\n\nDocstring"
# No second line indent
self._check("Brief\nDocstring", expected)
# 2 space second line indent
self._check("Brief\n Docstring", expected)
# No second line indent, first line blank
self._check("\nBrief\nDocstring", expected)
# 2 space second line indent, first line blank
self._check("\n Brief\n Docstring", expected)
class ValidateCallableTest(test.TestCase):
def test_function(self):
decorator_utils.validate_callable(_test_function, "test")
def test_method(self):
decorator_utils.validate_callable(self.test_method, "test")
def test_callable(self):
class TestClass(object):
def __call__(self):
pass
decorator_utils.validate_callable(TestClass(), "test")
def test_partial(self):
partial = functools.partial(_test_function, unused_arg=7)
decorator_utils.validate_callable(partial, "test")
def test_fail_non_callable(self):
x = 0
self.assertRaises(ValueError, decorator_utils.validate_callable, x, "test")
if __name__ == "__main__":
test.main()
|
bravomikekilo/mxconsole
|
mxconsole/util/decorator_utils_test.py
|
Python
|
apache-2.0
| 4,173
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from flask_admin.form import DateTimePickerWidget
from wtforms import DateTimeField, SelectField
from flask_wtf import Form
class DateTimeForm(Form):
# Date filter form needed for gantt and graph view
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
class DateTimeWithNumRunsForm(Form):
# Date time and number of runs form for tree view, task duration
# and landing times
base_date = DateTimeField(
"Anchor date", widget=DateTimePickerWidget(), default=datetime.now())
num_runs = SelectField("Number of runs", default=25, choices=(
(5, "5"),
(25, "25"),
(50, "50"),
(100, "100"),
(365, "365"),
))
|
opensignal/airflow
|
airflow/www/forms.py
|
Python
|
apache-2.0
| 898
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
'''
Created on 05/10/2012
@author: Alex Ip
'''
import os
import sys
import logging
import numpy
from osgeo import osr
import numexpr
import netCDF4
from scipy.ndimage import sobel
from geophys_utils._netcdf_grid_utils import NetCDFGridUtils
from geophys_utils._array_pieces import array_pieces
from geophys_utils._vincenty import vinc_dist
from geophys_utils._blrb import interpolate_grid
# Set top level standard output
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(__name__)
if not logger.level:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
RADIANS_PER_DEGREE = 0.01745329251994329576923690768489
class earth(object):
# Mean radius
RADIUS = 6371009.0 # (metres)
# WGS-84
#RADIUS = 6378135.0 # equatorial (metres)
#RADIUS = 6356752.0 # polar (metres)
# Length of Earth ellipsoid semi-major axis (metres)
SEMI_MAJOR_AXIS = 6378137.0
# WGS-84
A = 6378137.0 # equatorial radius (metres)
B = 6356752.3142 # polar radius (metres)
F = (A - B) / A # flattening
ECC2 = 1.0 - B**2/A**2 # squared eccentricity
MEAN_RADIUS = (A*2 + B) / 3
# Earth ellipsoid eccentricity (dimensionless)
#ECCENTRICITY = 0.00669438
#ECC2 = math.pow(ECCENTRICITY, 2)
# Earth rotational angular velocity (radians/sec)
OMEGA = 0.000072722052
class DEMUtils(NetCDFGridUtils):
def getFileSizekB(self, path):
"""Gets the size of a file (megabytes).
Arguments:
path: file path
Returns:
File size (MB)
Raises:
OSError [Errno=2] if file does not exist
"""
return os.path.getsize(path) / 1024
def getFileSizeMB(self, path):
"""Gets the size of a file (megabytes).
Arguments:
path: file path
Returns:
File size (MB)
Raises:
OSError [Errno=2] if file does not exist
"""
return self.getFileSizekB(path) / 1024
def get_pixel_size(self, index_tuple):
"""
Returns X & Y sizes in metres of specified pixel as a tuple.
N.B: Pixel ordinates are zero-based from top left
"""
x, y = index_tuple
logger.debug('(x, y) = (%f, %f)', x, y)
native_spatial_reference = osr.SpatialReference()
native_spatial_reference.ImportFromWkt(self.crs)
latlong_spatial_reference = native_spatial_reference.CloneGeogCS()
coord_transform_to_latlong = osr.CoordinateTransformation(native_spatial_reference, latlong_spatial_reference)
# Determine pixel centre and edges in georeferenced coordinates
xw = self.GeoTransform[0] + x * self.GeoTransform[1]
yn = self.GeoTransform[3] + y * self.GeoTransform[5]
xc = self.GeoTransform[0] + (x + 0.5) * self.GeoTransform[1]
yc = self.GeoTransform[3] + (y + 0.5) * self.GeoTransform[5]
xe = self.GeoTransform[0] + (x + 1.0) * self.GeoTransform[1]
ys = self.GeoTransform[3] + (y + 1.0) * self.GeoTransform[5]
logger.debug('xw = %f, yn = %f, xc = %f, yc = %f, xe = %f, ys = %f', xw, yn, xc, yc, xe, ys)
# Convert georeferenced coordinates to lat/lon for Vincenty
lon1, lat1, _z = coord_transform_to_latlong.TransformPoint(xw, yc, 0)
lon2, lat2, _z = coord_transform_to_latlong.TransformPoint(xe, yc, 0)
logger.debug('For X size: (lon1, lat1) = (%f, %f), (lon2, lat2) = (%f, %f)', lon1, lat1, lon2, lat2)
x_size, _az_to, _az_from = vinc_dist(earth.F, earth.A,
lat1 * RADIANS_PER_DEGREE, lon1 * RADIANS_PER_DEGREE,
lat2 * RADIANS_PER_DEGREE, lon2 * RADIANS_PER_DEGREE)
lon1, lat1, _z = coord_transform_to_latlong.TransformPoint(xc, yn, 0)
lon2, lat2, _z = coord_transform_to_latlong.TransformPoint(xc, ys, 0)
logger.debug('For Y size: (lon1, lat1) = (%f, %f), (lon2, lat2) = (%f, %f)', lon1, lat1, lon2, lat2)
y_size, _az_to, _az_from = vinc_dist(earth.F, earth.A,
lat1 * RADIANS_PER_DEGREE, lon1 * RADIANS_PER_DEGREE,
lat2 * RADIANS_PER_DEGREE, lon2 * RADIANS_PER_DEGREE)
logger.debug('(x_size, y_size) = (%f, %f)', x_size, y_size)
return (x_size, y_size)
def get_pixel_size_grid(self, source_array, offsets):
""" Returns grid with interpolated X and Y pixel sizes for given arrays"""
def get_pixel_x_size(x, y):
return self.get_pixel_size((offsets[0] + x, offsets[1] + y))[0]
def get_pixel_y_size(x, y):
return self.get_pixel_size((offsets[0] + x, offsets[1] + y))[1]
pixel_size_function = [get_pixel_x_size, get_pixel_y_size]
pixel_size_grid = numpy.zeros(shape=(source_array.shape[0], source_array.shape[1], 2)).astype(source_array.dtype)
for dim_index in range(2):
interpolate_grid(depth=1,
shape=pixel_size_grid[:,:,dim_index].shape,
eval_func=pixel_size_function[dim_index],
grid=pixel_size_grid[:,:,dim_index])
return pixel_size_grid
def __init__(self, dem_dataset):
"""Constructor
Arguments:
source_dem_nc: NetCDF dataset containing DEM data
"""
# Start of init function - Call inherited constructor first
NetCDFGridUtils.__init__(self, dem_dataset)
def create_dzdxy_arrays(self, elevation_array, offsets):
'''
Function to return two arrays containing dzdx and dzdy values
'''
def pixels_in_m():
'''
Function returning True if pixels are in metres
'''
result = True
for dimension_name in self.data_variable.dimensions:
try:
if self.netcdf_dataset.variables[dimension_name].units == 'm':
continue
else:
result = False
break
except:
result = False
break
return result
native_pixel_x_size = float(abs(self.GeoTransform[1]))
native_pixel_y_size = float(abs(self.GeoTransform[5]))
dzdx_array = sobel(elevation_array, axis=1)/(8. * native_pixel_x_size)
dzdy_array = sobel(elevation_array, axis=0)/(8. * native_pixel_y_size)
if pixels_in_m():
print('Pixels are a uniform size of {} x {} metres.'.format(native_pixel_x_size, native_pixel_y_size))
# Pixel sizes are in metres - use scalars
pixel_x_metres = native_pixel_x_size
pixel_y_metres = native_pixel_y_size
else:
print('Pixels are of varying sizes. Computing and applying pixel size arrays.')
# Compute variable pixel size
m_array = self.get_pixel_size_grid(elevation_array, offsets)
pixel_x_metres = m_array[:,:,0]
pixel_y_metres = m_array[:,:,1]
dzdx_array = numexpr.evaluate("dzdx_array * native_pixel_x_size / pixel_x_metres")
dzdy_array = numexpr.evaluate("dzdy_array * native_pixel_y_size / pixel_y_metres")
return dzdx_array, dzdy_array
def create_slope_array(self, dzdx_array, dzdy_array):
hypotenuse_array = numpy.hypot(dzdx_array, dzdy_array)
slope_array = numexpr.evaluate("arctan(hypotenuse_array) / RADIANS_PER_DEGREE")
#Blank out no-data cells
slope_array[numpy.isnan(slope_array)] = self.data_variable._FillValue
return slope_array
def create_aspect_array(self, dzdx_array, dzdy_array):
# Convert angles from conventional radians to compass heading 0-360
aspect_array = numexpr.evaluate("(450 - arctan2(dzdy_array, -dzdx_array) / RADIANS_PER_DEGREE) % 360")
#Blank out no-data cells
aspect_array[numpy.isnan(aspect_array)] = self.data_variable._FillValue
return aspect_array
def create_slope_and_aspect(self, slope_path=None, aspect_path=None, overlap=4):
'''
Create slope & aspect datasets from elevation
'''
# Copy dataset structure but not data
slope_path = slope_path or os.path.splitext(self.nc_path)[0] + '_slope.nc'
self.copy(slope_path, empty_var_list=[self.data_variable.name])
slope_nc_dataset = netCDF4.Dataset(slope_path, 'r+')
slope_nc_dataset.renameVariable(self.data_variable.name, 'slope')
slope_variable = slope_nc_dataset.variables['slope']
slope_variable.long_name = 'slope expressed in degrees from horizontal (0=horizontal, 90=vertical)'
slope_variable.units = 'degrees'
aspect_path = aspect_path or os.path.splitext(self.nc_path)[0] + '_aspect.nc'
self.copy(aspect_path, empty_var_list=[self.data_variable.name])
aspect_nc_dataset = netCDF4.Dataset(aspect_path, 'r+')
aspect_nc_dataset.renameVariable(self.data_variable.name, 'aspect')
aspect_variable = aspect_nc_dataset.variables['aspect']
aspect_variable.long_name = 'aspect expressed compass bearing of normal to plane (0=North, 90=East, etc.)'
aspect_variable.units = 'degrees'
# Process dataset in small pieces
for piece_array, offsets in array_pieces(self.data_variable,
max_bytes=self.max_bytes if self.opendap else self.max_bytes/2, # Need to allow for multiple arrays in memory
overlap=overlap):
print('Processing array of shape {} at {}'.format(piece_array.shape, offsets))
if type(piece_array) == numpy.ma.masked_array:
piece_array = piece_array.data # Convert from masked array to plain array
piece_array[(piece_array == self.data_variable._FillValue)] = numpy.NaN
# Calculate raw source & destination slices including overlaps
source_slices = [slice(0,
piece_array.shape[dim_index])
for dim_index in range(2)
]
dest_slices = [slice(offsets[dim_index],
offsets[dim_index] + piece_array.shape[dim_index])
for dim_index in range(2)
]
# Trim overlaps off source & destination slices
source_slices = [slice(0 if dest_slices[dim_index].start < overlap else source_slices[dim_index].start+overlap,
piece_array.shape[dim_index] if (self.data_variable.shape[dim_index] - dest_slices[dim_index].stop) < overlap else source_slices[dim_index].stop-overlap)
for dim_index in range(2)
]
dest_slices = [slice(0 if dest_slices[dim_index].start < overlap else dest_slices[dim_index].start+overlap,
self.data_variable.shape[dim_index] if (self.data_variable.shape[dim_index] - dest_slices[dim_index].stop) < overlap else dest_slices[dim_index].stop-overlap)
for dim_index in range(2)
]
print('Computing dzdx and dzdy arrays')
dzdx_array, dzdy_array = self.create_dzdxy_arrays(piece_array, offsets)
print('Computing slope array')
result_array = self.create_slope_array(dzdx_array, dzdy_array)
print('Writing slope array of shape %s at %s'.format(tuple([dest_slices[dim_index].stop - dest_slices[dim_index].start
for dim_index in range(2)
]),
tuple([dest_slices[dim_index].start
for dim_index in range(2)
])
)
)
slope_variable[dest_slices] = result_array[source_slices]
slope_nc_dataset.sync()
print('Computing aspect array')
result_array = self.create_aspect_array(dzdx_array, dzdy_array)
print('Writing aspect array of shape {} at {}'.format(tuple([dest_slices[dim_index].stop - dest_slices[dim_index].start
for dim_index in range(2)
]),
tuple([dest_slices[dim_index].start
for dim_index in range(2)
])
)
)
aspect_variable[dest_slices] = result_array[source_slices]
aspect_nc_dataset.sync()
slope_nc_dataset.close()
print('Finished writing slope dataset %s'.format(slope_path))
aspect_nc_dataset.close()
print('Finished writing aspect dataset %s'.format(aspect_path))
if __name__ == '__main__':
# Define command line arguments
dem_path = sys.argv[1]
try:
slope_path = sys.argv[2]
except:
slope_path = None
try:
aspect_path = sys.argv[3]
except:
aspect_path = None
dem_utils = DEMUtils(dem_path)
dem_utils.create_slope_and_aspect(slope_path, aspect_path)
|
alex-ip/geophys_utils
|
geophys_utils/_dem_utils.py
|
Python
|
apache-2.0
| 15,195
|
"""
taskmaster.cli.master
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from taskmaster.util import parse_options
from taskmaster.constants import (DEFAULT_LOG_LEVEL,
DEFAULT_ADDRESS, DEFAULT_BUFFER_SIZE)
def run(target, kwargs=None, reset=False, size=DEFAULT_BUFFER_SIZE,
address=DEFAULT_ADDRESS, log_level=DEFAULT_LOG_LEVEL):
from taskmaster.server import Server, Controller
server = Server(address, size=size, log_level=log_level)
controller = Controller(server, target, kwargs=kwargs, log_level=log_level)
if reset:
controller.reset()
controller.start()
def main():
import optparse
import sys
parser = optparse.OptionParser()
parser.add_option("--address", dest="address", default=DEFAULT_ADDRESS)
parser.add_option("--size", dest="size", default=DEFAULT_BUFFER_SIZE, type=int)
parser.add_option("--reset", dest="reset", default=False, action='store_true')
parser.add_option("--log-level", dest="log_level", default=DEFAULT_LOG_LEVEL)
(options, args) = parser.parse_args()
if len(args) < 1:
print 'Usage: tm-master <callback> [key=value, key2=value2]'
sys.exit(1)
sys.exit(run(args[0], parse_options(args[1:]), **options.__dict__))
if __name__ == '__main__':
main()
|
dcramer/taskmaster
|
src/taskmaster/cli/master.py
|
Python
|
apache-2.0
| 1,378
|
#!/usr/bin/env python
# import modules needed
from pprint import pprint as pp
import json, yaml
# initialize list and dict
my_list = []
my_dict = {'key1': 'val1','key2': 'val2', 'key3': {'subkey1': 'subval1', 'subkey2': 'subval2'}}
my_list = range(10)
my_list.append(my_dict)
# dump to json
with open('json.txt', 'w') as f:
json.dump(my_list, f, sort_keys=True, indent=4)
# dump to yaml
with open('yaml.txt', 'w') as f:
yaml.dump(my_list, f)
|
tomjshine/pynet
|
week1/ex6.py
|
Python
|
apache-2.0
| 454
|
# Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.platforms.base import BasePlatform
from platformio.util import get_systype
class Linux_x86_64Platform(BasePlatform):
"""
Linux x86_64 (64-bit) is a Unix-like and mostly POSIX-compliant
computer operating system (OS) assembled under the model of free and
open-source software development and distribution.
Using host OS (Mac OS X or Linux 64-bit) you can build native
application for Linux x86_64 platform.
http://platformio.org/platforms/linux_i686
"""
PACKAGES = {
"toolchain-gcclinux64": {
"alias": "toolchain",
"default": True
}
}
def __init__(self):
if get_systype() == "linux_x86_64":
del self.PACKAGES['toolchain-gcclinux64']
BasePlatform.__init__(self)
|
valeros/platformio
|
platformio/platforms/linux_x86_64.py
|
Python
|
apache-2.0
| 1,419
|
from typing import List
import numpy as np
import functools
from federatedml.ensemble.boosting.hetero.hetero_secureboost_guest import HeteroSecureBoostingTreeGuest
from federatedml.param.boosting_param import HeteroFastSecureBoostParam
from federatedml.ensemble.basic_algorithms import HeteroFastDecisionTreeGuest
from federatedml.ensemble.boosting.hetero import hetero_fast_secureboost_plan as plan
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroFastSecureBoostingTreeGuest(HeteroSecureBoostingTreeGuest):
def __init__(self):
super(HeteroFastSecureBoostingTreeGuest, self).__init__()
self.tree_num_per_party = 1
self.guest_depth = 0
self.host_depth = 0
self.work_mode = consts.MIX_TREE
self.init_tree_plan = False
self.tree_plan = []
self.model_param = HeteroFastSecureBoostParam()
self.model_name = 'HeteroFastSecureBoost'
def _init_model(self, param: HeteroFastSecureBoostParam):
super(HeteroFastSecureBoostingTreeGuest, self)._init_model(param)
self.tree_num_per_party = param.tree_num_per_party
self.work_mode = param.work_mode
self.guest_depth = param.guest_depth
self.host_depth = param.host_depth
if self.work_mode == consts.MIX_TREE and self.EINI_inference:
LOGGER.info('Mix mode of fast-sbt does not support EINI predict, reset to False')
self.EINI_inference = False
def get_tree_plan(self, idx):
if not self.init_tree_plan:
tree_plan = plan.create_tree_plan(self.work_mode, k=self.tree_num_per_party, tree_num=self.boosting_round,
host_list=self.component_properties.host_party_idlist,
complete_secure=self.complete_secure)
self.tree_plan += tree_plan
self.init_tree_plan = True
LOGGER.info('tree plan is {}'.format(self.tree_plan))
return self.tree_plan[idx]
def check_host_number(self, tree_type):
host_num = len(self.component_properties.host_party_idlist)
LOGGER.info('host number is {}'.format(host_num))
if tree_type == plan.tree_type_dict['layered_tree']:
assert host_num == 1, 'only 1 host party is allowed in layered mode'
def fit_a_booster(self, epoch_idx: int, booster_dim: int):
# prepare tree plan
tree_type, target_host_id = self.get_tree_plan(epoch_idx)
LOGGER.info('tree work mode is {}'.format(tree_type))
self.check_host_number(tree_type)
if self.cur_epoch_idx != epoch_idx:
# update g/h every epoch
self.grad_and_hess = self.compute_grad_and_hess(self.y_hat, self.y, self.data_inst)
self.cur_epoch_idx = epoch_idx
g_h = self.get_grad_and_hess(self.grad_and_hess, booster_dim)
tree = HeteroFastDecisionTreeGuest(tree_param=self.tree_param)
tree.init(flowid=self.generate_flowid(epoch_idx, booster_dim),
data_bin=self.data_bin, bin_split_points=self.bin_split_points, bin_sparse_points=self.bin_sparse_points,
grad_and_hess=g_h,
encrypter=self.encrypter, encrypted_mode_calculator=self.encrypted_calculator,
valid_features=self.sample_valid_features(),
host_party_list=self.component_properties.host_party_idlist,
runtime_idx=self.component_properties.local_partyid,
goss_subsample=self.enable_goss,
top_rate=self.top_rate, other_rate=self.other_rate,
task_type=self.task_type,
complete_secure=True if (self.cur_epoch_idx == 0 and self.complete_secure) else False,
cipher_compressing=self.cipher_compressing,
max_sample_weight=self.max_sample_weight,
new_ver=self.new_ver
)
tree.set_tree_work_mode(tree_type, target_host_id)
tree.set_layered_depth(self.guest_depth, self.host_depth)
tree.fit()
self.update_feature_importance(tree.get_feature_importance())
if self.work_mode == consts.LAYERED_TREE:
self.sync_feature_importance()
return tree
@staticmethod
def traverse_guest_local_trees(node_pos, sample, trees: List[HeteroFastDecisionTreeGuest]):
"""
in mix mode, a sample can reach leaf directly
"""
new_node_pos = node_pos + 0 # avoid inplace manipulate
for t_idx, tree in enumerate(trees):
cur_node_idx = new_node_pos[t_idx]
if not tree.use_guest_feat_only_predict_mode:
continue
rs, reach_leaf = HeteroSecureBoostingTreeGuest.traverse_a_tree(tree, sample, cur_node_idx)
new_node_pos[t_idx] = rs
return new_node_pos
@staticmethod
def merge_leaf_pos(pos1, pos2):
return pos1 + pos2
# this func will be called by super class's predict()
def boosting_fast_predict(self, data_inst, trees: List[HeteroFastDecisionTreeGuest], predict_cache=None,
pred_leaf=False):
LOGGER.info('fast sbt running predict')
if self.work_mode == consts.MIX_TREE:
LOGGER.info('running mix mode predict')
tree_num = len(trees)
node_pos = data_inst.mapValues(lambda x: np.zeros(tree_num, dtype=np.int64))
# traverse local trees
traverse_func = functools.partial(self.traverse_guest_local_trees, trees=trees)
guest_leaf_pos = node_pos.join(data_inst, traverse_func)
# get leaf node from other host parties
host_leaf_pos_list = self.hetero_sbt_transfer_variable.host_predict_data.get(idx=-1)
for host_leaf_pos in host_leaf_pos_list:
guest_leaf_pos = guest_leaf_pos.join(host_leaf_pos, self.merge_leaf_pos)
if pred_leaf: # predict leaf, return leaf position only
return guest_leaf_pos
else:
predict_result = self.get_predict_scores(leaf_pos=guest_leaf_pos, learning_rate=self.learning_rate,
init_score=self.init_score, trees=trees,
multi_class_num=self.booster_dim, predict_cache=predict_cache)
return predict_result
else:
LOGGER.debug('running layered mode predict')
return super(HeteroFastSecureBoostingTreeGuest, self).boosting_fast_predict(data_inst, trees, predict_cache,
pred_leaf=pred_leaf)
def load_booster(self, model_meta, model_param, epoch_idx, booster_idx):
tree = HeteroFastDecisionTreeGuest(self.tree_param)
tree.load_model(model_meta, model_param)
tree.set_flowid(self.generate_flowid(epoch_idx, booster_idx))
tree.set_runtime_idx(self.component_properties.local_partyid)
tree.set_host_party_idlist(self.component_properties.host_party_idlist)
tree_type, target_host_id = self.get_tree_plan(epoch_idx)
tree.set_tree_work_mode(tree_type, target_host_id)
if self.tree_plan[epoch_idx][0] == plan.tree_type_dict['guest_feat_only']:
LOGGER.debug('tree of epoch {} is guest only'.format(epoch_idx))
tree.use_guest_feat_only_predict_mode()
return tree
def get_model_meta(self):
_, model_meta = super(HeteroFastSecureBoostingTreeGuest, self).get_model_meta()
meta_name = consts.HETERO_FAST_SBT_GUEST_MODEL + "Meta"
model_meta.work_mode = self.work_mode
return meta_name, model_meta
def get_model_param(self):
_, model_param = super(HeteroFastSecureBoostingTreeGuest, self).get_model_param()
param_name = consts.HETERO_FAST_SBT_GUEST_MODEL + 'Param'
model_param.tree_plan.extend(plan.encode_plan(self.tree_plan))
model_param.model_name = consts.HETERO_FAST_SBT_MIX if self.work_mode == consts.MIX_TREE else \
consts.HETERO_FAST_SBT_LAYERED
return param_name, model_param
def set_model_meta(self, model_meta):
super(HeteroFastSecureBoostingTreeGuest, self).set_model_meta(model_meta)
self.work_mode = model_meta.work_mode
def set_model_param(self, model_param):
super(HeteroFastSecureBoostingTreeGuest, self).set_model_param(model_param)
self.tree_plan = plan.decode_plan(model_param.tree_plan)
|
FederatedAI/FATE
|
python/federatedml/ensemble/boosting/hetero/hetero_fast_secureboost_guest.py
|
Python
|
apache-2.0
| 8,618
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.db import model_query
from neutron_lib import fixture
from neutron_lib.tests import _base
from neutron_lib.utils import helpers
# TODO(boden): find a way to test other model_query functions
class TestHooks(_base.BaseTestCase):
def setUp(self):
super(TestHooks, self).setUp()
self.useFixture(fixture.DBQueryHooksFixture())
def _mock_hook(self, x):
return x
def test_register_hook(self):
mock_model = mock.Mock()
model_query.register_hook(
mock_model, 'hook1', self._mock_hook,
self._mock_hook, result_filters=self._mock_hook)
self.assertEqual(1, len(model_query._model_query_hooks.keys()))
hook_ref = helpers.make_weak_ref(self._mock_hook)
registered_hooks = model_query.get_hooks(mock_model)
self.assertEqual(1, len(registered_hooks))
for d in registered_hooks:
for k in d.keys():
self.assertEqual(hook_ref, d.get(k))
def test_register_hook_non_callables(self):
mock_model = mock.Mock()
model_query.register_hook(
mock_model, 'hook1', self._mock_hook, {}, result_filters={})
self.assertEqual(1, len(model_query._model_query_hooks.keys()))
hook_ref = helpers.make_weak_ref(self._mock_hook)
registered_hooks = model_query.get_hooks(mock_model)
self.assertEqual(1, len(registered_hooks))
for d in registered_hooks:
for k in d.keys():
if k == 'query':
self.assertEqual(hook_ref, d.get(k))
else:
self.assertEqual({}, d.get(k))
def test_get_values(self):
mock_model = mock.Mock()
mock_context = mock.Mock()
with mock.patch.object(
model_query, 'query_with_hooks') as query_with_hooks:
query_with_hooks.return_value = [['value1'], ['value2']]
values = model_query.get_values(mock_context, mock_model,
'fake_field')
self.assertEqual(['value1', 'value2'], values)
query_with_hooks.assert_called_with(
mock_context, mock_model, field='fake_field')
|
openstack/neutron-lib
|
neutron_lib/tests/unit/db/test_model_query.py
|
Python
|
apache-2.0
| 2,803
|
# File to calculate properties for a molecule and add these properties back to the molecules
# property to be calculate will be put in using a request.header string
from java import lang
from com.im.lac.types import MoleculeObject, MoleculeObjectIterable
lang.System.loadLibrary('GraphMolWrap')
from org.RDKit import *
import sys
def num_hba(mol):
"""Function for calculating number of H-bond acceptors
Takes an RDKit molecule
Returns an int"""
return RDKFuncs.calcNumHBA(mol)
def num_hbd(mol):
"""Function for calculating number of H-bond donors
Takes an RDKit molecule
Returns an int"""
return RDKFuncs.calcNumHBD(mol)
def num_rings(mol):
"""Function for calculating number of rings
Takes an RDKit molecule
Returns an int"""
return RDKFuncs.calcNumRings(mol)
def mol_logp(mol):
"""Function for calculating mol log p
Takes an RDKit molecule
Returns a int"""
return RDKFuncs.calcMolLogP(mol)
def mol_mr(mol):
"""Function to find the mass of a molecule
Takes an RDKit molecule
Returns a float"""
return RDKFuncs.calcMolMR(mol)
# A dictionary to relate functioons tostrings
funct_dict = {"num_hba": num_hba,
"num_hbd": num_hbd,
"num_rings": num_rings,
"mol_logp": mol_logp,
"mol_mr": mol_mr}
def calc_props(rdmol, function):
try:
val = funct_dict[function](rdmol)
except:
val = None
sys.stderr.write("ERROR CALCULATNG PROPERTY -> " + function)
return val
## 1) Stream of molecuels
## 2) String relating to property
|
InformaticsMatters/squonk
|
components/rdkit-camel/src/main/python/find_props/find_props.py
|
Python
|
apache-2.0
| 1,538
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.tests.functional.sol_kubernetes.vnflcm import base as vnflcm_base
class VnfLcmKubernetesMultiNsTest(vnflcm_base.BaseVnfLcmKubernetesTest):
@classmethod
def setUpClass(cls):
super(VnfLcmKubernetesMultiNsTest, cls).setUpClass()
vnf_package_id, cls.vnfd_id = cls._create_and_upload_vnf_package(
cls, cls.tacker_client, "test_cnf_multi_ns",
{"key": "sample_multi_ns_functional"})
cls.vnf_package_ids.append(vnf_package_id)
@classmethod
def tearDownClass(cls):
super(VnfLcmKubernetesMultiNsTest, cls).tearDownClass()
def _test_cnf_scale(self, vnf_instance, aspect_id,
number_of_steps=1, error=False):
scale_level = self._get_scale_level_by_aspect_id(
vnf_instance, aspect_id)
# test scale out
scale_level = self._test_scale(
vnf_instance['id'], 'SCALE_OUT', aspect_id, scale_level,
number_of_steps, error)
if error:
return scale_level
# test scale in
scale_level = self._test_scale(
vnf_instance['id'], 'SCALE_IN', aspect_id, scale_level,
number_of_steps)
return scale_level
def test_multi_tenant_k8s_additional_params(self):
vnf_instance_name = "multi_tenant_k8s_additional_params"
vnf_instance_description = "multi tenant k8s additional params"
files = ["Files/kubernetes/deployment_has_namespace.yaml",
"Files/kubernetes/namespace01.yaml"]
additional_param = {
"lcm-kubernetes-def-files": files,
"namespace": "multi-namespace01"}
# instantiate
vnf_instance = self._create_and_instantiate_vnf_instance(
self.vnfd_id, "simple", vnf_instance_name,
vnf_instance_description, additional_param)
# scale
self._test_cnf_scale(vnf_instance, "vdu1_aspect", number_of_steps=1)
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
deployment_target_vnfc = [vnfc_rsc for vnfc_rsc in before_vnfc_rscs if
vnfc_rsc['vduId'] == 'VDU1'][0]
vnfc_instance_id = [deployment_target_vnfc['id']]
# heal
after_vnfc_rscs = self._test_heal(vnf_instance, vnfc_instance_id)
for vnfc_rsc in after_vnfc_rscs:
after_pod_name = vnfc_rsc['computeResource']['resourceId']
if vnfc_rsc['id'] == deployment_target_vnfc['id']:
after_resource = deployment_target_vnfc
compute_resource = after_resource['computeResource']
before_pod_name = compute_resource['resourceId']
self.assertNotEqual(after_pod_name, before_pod_name)
# terminate
self._terminate_vnf_instance(vnf_instance['id'])
self._delete_vnf_instance(vnf_instance['id'])
def test_multi_tenant_k8s_manifest(self):
vnf_instance_name = "multi_tenant_k8s_manifest"
vnf_instance_description = "multi tenant k8s manifest"
files = ["Files/kubernetes/deployment_has_namespace.yaml",
"Files/kubernetes/namespace02.yaml"]
additional_param = {"lcm-kubernetes-def-files": files}
# instantiate
vnf_instance = self._create_and_instantiate_vnf_instance(
self.vnfd_id, "simple", vnf_instance_name,
vnf_instance_description, additional_param)
# scale
self._test_cnf_scale(vnf_instance, "vdu1_aspect", number_of_steps=1)
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
deployment_target_vnfc = [vnfc_rsc for vnfc_rsc in before_vnfc_rscs if
vnfc_rsc['vduId'] == 'VDU1'][0]
vnfc_instance_id = [deployment_target_vnfc['id']]
# heal
after_vnfc_rscs = self._test_heal(vnf_instance, vnfc_instance_id)
for vnfc_rsc in after_vnfc_rscs:
after_pod_name = vnfc_rsc['computeResource']['resourceId']
if vnfc_rsc['id'] == deployment_target_vnfc['id']:
after_resource = deployment_target_vnfc
compute_resource = after_resource['computeResource']
before_pod_name = compute_resource['resourceId']
self.assertNotEqual(after_pod_name, before_pod_name)
# terminate
self._terminate_vnf_instance(vnf_instance['id'])
self._delete_vnf_instance(vnf_instance['id'])
def test_multi_tenant_k8s_default(self):
vnf_instance_name = "multi_tenant_k8s_default"
vnf_instance_description = "multi tenant k8s default"
files = ["Files/kubernetes/deployment_no_namespace.yaml"]
additional_param = {"lcm-kubernetes-def-files": files}
# instantiate
vnf_instance = self._create_and_instantiate_vnf_instance(
self.vnfd_id, "simple", vnf_instance_name,
vnf_instance_description, additional_param)
# scale
self._test_cnf_scale(vnf_instance, "vdu2_aspect", number_of_steps=1)
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
deployment_target_vnfc = [vnfc_rsc for vnfc_rsc in before_vnfc_rscs if
vnfc_rsc['vduId'] == 'VDU2'][0]
vnfc_instance_id = [deployment_target_vnfc['id']]
# heal
after_vnfc_rscs = self._test_heal(vnf_instance, vnfc_instance_id)
for vnfc_rsc in after_vnfc_rscs:
after_pod_name = vnfc_rsc['computeResource']['resourceId']
if vnfc_rsc['id'] == deployment_target_vnfc['id']:
after_resource = deployment_target_vnfc
compute_resource = after_resource['computeResource']
before_pod_name = compute_resource['resourceId']
self.assertNotEqual(after_pod_name, before_pod_name)
# terminate
self._terminate_vnf_instance(vnf_instance['id'])
self._delete_vnf_instance(vnf_instance['id'])
|
openstack/tacker
|
tacker/tests/functional/sol_kubernetes/vnflcm/test_kubernetes_multi_ns.py
|
Python
|
apache-2.0
| 6,554
|
# Generated by Django 3.1.6 on 2021-04-06 07:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cases", "0002_auto_20210305_1248"),
]
operations = [
migrations.AlterField(
model_name="rawimageuploadsession",
name="status",
field=models.PositiveSmallIntegerField(
choices=[
(0, "Queued"),
(1, "Started"),
(2, "Re-Queued"),
(3, "Failed"),
(4, "Succeeded"),
(5, "Cancelled"),
],
db_index=True,
default=0,
),
),
]
|
comic/comic-django
|
app/grandchallenge/cases/migrations/0003_auto_20210406_0753.py
|
Python
|
apache-2.0
| 738
|
# -*- coding: utf-8 -*-
"""
ecoop
====
ecoop_ utility functions to automatize the building of the Ecosystem Status Report for the NE-LME.
.. _ecoop: https://github.com/epifanio/ecoop
.. _ecoop-project: http://tw.rpi.edu/web/project/ECOOP
"""
from distutils.core import setup
import os
import sys
import subprocess
if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2):
raise RuntimeError("Python version 2.6, 2.7 or >= 3.2 required.")
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
AUTHOR = 'epinux'
MAJOR = 0
MINOR = 1
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
#print(GIT_REVISION)
return GIT_REVISION
GIT_REVISION = git_version()
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
def get_version_info():
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
#if os.path.exists('.git'):
GIT_REVISION = git_version()
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='lib/ecoop/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM ecoop SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'author' : AUTHOR,
'version': VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
with open('README.md') as file:
long_description = file.read()
write_version_py()
setup(
name = 'ecoop',
version = '0.1.0',
description = 'A collecton of utilities to be used from inside an IPython Notebook to automatize the building of the Ecosystem Status Report for the NE-LME - Climate forcing UseCase',
long_description=long_description,
author = 'Massimo Di Stefano',
author_unixid = 'epinux',
author_email = 'epiesasha@me.com',
url = 'http://github.com/epifanio/ecoop',
packages = ['ecoop'],
package_dir = {'': 'lib'},
license = 'BSD 3-Clause license',
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
classifiers = [
'Development Status :: 1 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
)
|
tetherless-world/ecoop
|
pyecoop/setup.py
|
Python
|
apache-2.0
| 4,856
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import random
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import get_unique_id_and_date
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.version import compare_versions, format_stack_version
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_JAAS_CONF
from resource_management.core.shell import call
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.validate import call_and_match_output
from zookeeper import zookeeper
from zookeeper_service import zookeeper_service
class ZookeeperServer(Script):
def get_component_name(self):
return "zookeeper-server"
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env, upgrade_type=None):
import params
env.set_params(params)
zookeeper(type='server', upgrade_type=upgrade_type)
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
conf_select.select(params.stack_name, "zookeeper", params.version)
stack_select.select("zookeeper-server", params.version)
#Execute(format("iop-select set zookeeper-server {version}"))
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env, upgrade_type)
zookeeper_service(action = 'start', upgrade_type=upgrade_type)
def post_upgrade_restart(self, env, upgrade_type=None):
if upgrade_type == "nonrolling":
return
Logger.info("Executing Stack Upgrade post-restart")
import params
env.set_params(params)
zk_server_host = random.choice(params.zookeeper_hosts)
cli_shell = format("{zk_cli_shell} -server {zk_server_host}:{client_port}")
# Ensure that a quorum is still formed.
unique = get_unique_id_and_date()
create_command = format("echo 'create /{unique} mydata' | {cli_shell}")
list_command = format("echo 'ls /' | {cli_shell}")
delete_command = format("echo 'delete /{unique} ' | {cli_shell}")
quorum_err_message = "Failed to establish zookeeper quorum"
call_and_match_output(create_command, 'Created', quorum_err_message)
call_and_match_output(list_command, r"\[.*?" + unique + ".*?\]", quorum_err_message)
call(delete_command)
if params.client_port:
check_leader_command = format("echo stat | nc localhost {client_port} | grep Mode")
code, out = call(check_leader_command, logoutput=False)
if code == 0 and out:
Logger.info(out)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
zookeeper_service(action = 'stop', upgrade_type=upgrade_type)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.zk_pid_file)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
# Expect the following files to be available in status_params.config_dir:
# zookeeper_jaas.conf
# zookeeper_client_jaas.conf
try:
props_value_check = None
props_empty_check = ['Server/keyTab', 'Server/principal']
props_read_check = ['Server/keyTab']
zk_env_expectations = build_expectations('zookeeper_jaas', props_value_check, props_empty_check,
props_read_check)
zk_expectations = {}
zk_expectations.update(zk_env_expectations)
security_params = get_params_from_filesystem(status_params.config_dir,
{'zookeeper_jaas.conf': FILE_TYPE_JAAS_CONF})
result_issues = validate_security_config_properties(security_params, zk_expectations)
if not result_issues: # If all validations passed successfully
# Double check the dict before calling execute
if ( 'zookeeper_jaas' not in security_params
or 'Server' not in security_params['zookeeper_jaas']
or 'keyTab' not in security_params['zookeeper_jaas']['Server']
or 'principal' not in security_params['zookeeper_jaas']['Server']):
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.zk_user,
security_params['zookeeper_jaas']['Server']['keyTab'],
security_params['zookeeper_jaas']['Server']['principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
self.put_structured_out({"securityState": "UNSECURED"})
if __name__ == "__main__":
ZookeeperServer().execute()
|
alexryndin/ambari
|
ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py
|
Python
|
apache-2.0
| 6,914
|
eight_layer = Font.glyphs["eight-qs"].layers[0]
i_layer = Font.glyphs['i-qs'].layers[0] = eight_layer.copyDecomposedLayer()
XHEIGHT = 500
WIDTH = 600
for path in i_layer.paths:
for node in path.nodes:
# mess with node.x and node.y
node.x = WIDTH - node.x
|
adiabatic/abbots-morton-experiment
|
glyph-generation scripts/eight2i.py
|
Python
|
apache-2.0
| 277
|
"""
hashdd_filemagic.py
@brad_anton
License:
Copyright 2015 hashdd.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import magic
from .feature import feature
class hashdd_filemagic(feature):
def process(self):
if self.buffer:
return magic.from_buffer(self.buffer)
elif self.filename:
return magic.from_file(self.filename)
return None
|
hashdd/pyhashdd
|
hashdd/features/hashdd_filemagic.py
|
Python
|
apache-2.0
| 872
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from examples.connect import EXAMPLE_IMAGE_NAME
"""
Delete resources with the Image service.
For a full guide see
http://developer.openstack.org/sdks/python/openstacksdk/users/guides/image.html
"""
def delete_image(conn):
print("Delete Image:")
example_image = conn.image.find_image(EXAMPLE_IMAGE_NAME)
conn.image.delete_image(example_image, ignore_missing=False)
|
briancurtin/python-openstacksdk
|
examples/image/delete.py
|
Python
|
apache-2.0
| 928
|
from mock import MagicMock
import docker.errors
from requests.exceptions import ConnectionError
import datetime
class ClientMock():
def __init__(self):
def __logs(i, stream=False):
if stream:
return ("this is line {}".format(l) for l in xrange(1, 100))
else:
return "\n".join(["this is line {}".format(l) for l in xrange(1, 3)])
self.client_node1 = MagicMock()
self.client_node1.containers = MagicMock(return_value=self.__containers_cmd_return_node1)
self.client_node1.inspect_container = MagicMock(side_effect=lambda container_id:
self.__get_container(self.__inspect_container_cmd_return_node1,
container_id))
self.client_node1.create_container = MagicMock(return_value={'Id': 'eba8bea2600029'})
self.client_node1.start = MagicMock()
self.client_node1.logs = MagicMock(side_effect=__logs)
self.client_node2 = MagicMock()
self.client_node2.containers = MagicMock(return_value=self.__containers_cmd_return_node2)
self.client_node2.inspect_container = MagicMock(side_effect=lambda container_id:
self.__get_container(self.__inspect_container_cmd_return_node2,
container_id))
self.client_node2.logs = MagicMock(side_effect=__logs)
self.client_node3 = MagicMock()
self.client_node3.containers = MagicMock(side_effect=ConnectionError())
self.client_node3.inspect_container = MagicMock(side_effect=ConnectionError())
self.client_node3.logs = MagicMock(side_effect=__logs)
self.client_node4 = MagicMock()
self.client_node4.containers = MagicMock(side_effect=ConnectionError())
self.client_node4.inspect_container = MagicMock(side_effect=ConnectionError())
self.client_node4.logs = MagicMock(side_effect=__logs)
def mock_one_docker_node(self, docker_client):
docker_client.side_effect = self.__side_effect
return self.client_node1
def mock_two_docker_nodes(self, docker_client):
docker_client.side_effect = self.__side_effect
return self.client_node1, self.client_node2, self.client_node3
def __side_effect(self, base_url, version, timeout):
if "node-1" in base_url:
return self.client_node1
if "node-2" in base_url:
return self.client_node2
if "node-3" in base_url:
return self.client_node3
if "node-4" in base_url:
return self.client_node4
raise Exception("{} not mocked".format(base_url))
def __get_container(self, data, container_id):
try:
return data[container_id]
except KeyError as e:
raise docker.errors.APIError(e, "dummy", explanation="No such container: {}".format(container_id))
__containers_cmd_return_node1 = [
{u'Command': u'/runner/init start web',
u'Created': 1408697397,
u'Id': u'656ca7c307d178',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/ers-checking-frontend-27'],
u'Ports': [{u'IP': u'0.0.0.0',
u'PrivatePort': 8080,
u'PublicPort': 9225,
u'Type': u'tcp'}],
u'Status': u'Up 40 minutes'},
{u'Command': u'/runner/init start web',
u'Created': 1408696448,
u'Id': u'eba8bea2600029',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/paye_216'],
u'Ports': [{u'IP': u'0.0.0.0',
u'PrivatePort': 8080,
u'PublicPort': 9317,
u'Type': u'tcp'}],
u'Status': u'Up 56 minutes'},
{u'Command': u'/runner/init start web',
u'Created': 1406886169,
u'Id': u'381587e2978216',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/ers_5'],
u'Ports': [],
u'Status': u'Exited (127) 4 weeks ago'},
# Weird edge case when docker doesn't fill in the container status
{u'Command': u'/runner/init start web',
u'Created': 1406886169,
u'Id': u'3815178hgdasf6',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/ers_5'],
u'Ports': [],
u'Status': u''}]
__containers_cmd_return_node2 = [
{u'Command': u'/runner/init start web',
u'Created': 1408687834,
u'Id': u'80be2a9e62ba00',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/paye_216_8020e407-e40a-478e-9f31-a43bb50d8410'],
u'Ports': [{u'IP': u'0.0.0.0',
u'PrivatePort': 8080,
u'PublicPort': 9317,
u'Type': u'tcp'}],
u'Status': u'Up 19 minutes'},
{u'Command': u'/runner/init start web',
u'Created': 1408696448,
u'Id': u'jh23899fg00029',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/noport_216'],
u'Ports': [],
u'Status': u'Up 56 minutes'},
{u'Command': u'/runner/init start web',
u'Created': 1408696448,
u'Id': u'oiwq569fg00029',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/wrongport_216'],
u'Ports': [{u'IP': u'0.0.0.0',
u'PrivatePort': 1234,
u'PublicPort': 9317,
u'Type': u'tcp'}],
u'Status': u'Up 56 minutes'},
{u'Command': u'/runner/init start web',
u'Created': 1409767240,
u'Id': u'389821jsv78216',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/myservice_10'],
u'Ports': [],
u'Status': u'Exited (127) 21 hours ago'},
{u'Command': u'/runner/init start web',
u'Created': 1426151640,
u'Id': u'61c2695fd82a',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/fresh_5'],
u'Ports': [],
u'Status': u''},
{u'Command': u'/runner/init start web',
u'Created': 1426151640,
u'Id': u'61c2695fd82b',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/notfresh_5'],
u'Ports': [],
u'Status': u''}]
__inspect_container_cmd_return_node1 = {
"381587e2978216": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.secret=H7dVw$PlJiD)^U,oa4TA1pa]pT:4ETLqbL&2P=n6T~p,A*}^.Y46@PQOV~9(B09Hc]t7-hsf~&@w=zH -Dapplication.log=INFO -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080 -Dgovuk-tax.Prod.google-analytics.token=UA-00000000-0 -Drun.mode=Prod -Dsession.secure=true -Dsession.httpOnly=true -Dcookie.encryption.key=fqpLDZ4smuDsekHkrEBlCA==',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=https://host/ers-checking-frontend_27.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'656ca7c307d1',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:49:57.80805632Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/381587e2978216/hostname',
u'HostsPath': u'/var/lib/docker/containers/381587e2978216/hosts',
u'Id': u'381587e2978216',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/ers-checking-frontend-27',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.224',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 127,
# This date format tests the fact that datetime wants microseconds but docker returns a higher granularity.
u'FinishedAt': "{}1234Z".format((datetime.datetime.now() - datetime.timedelta(days=1, minutes=10)).strftime('%Y-%m-%dT%H:%M:%S.%f')),
u'Paused': False,
u'Pid': 35327,
u'Running': False,
u'StartedAt': u'2014-09-02T08:49:57.906207449Z'},
u'Volumes': {},
u'VolumesRW': {}},
"3815178hgdasf6": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.secret=H7dVw$PlJiD)^U,oa4TA1pa]pT:4ETLqbL&2P=n6T~p,A*}^.Y46@PQOV~9(B09Hc]t7-hsf~&@w=zH -Dapplication.log=INFO -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080 -Dgovuk-tax.Prod.google-analytics.token=UA-00000000-0 -Drun.mode=Prod -Dsession.secure=true -Dsession.httpOnly=true -Dcookie.encryption.key=fqpLDZ4smuDsekHkrEBlCA==',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=https://host/ers-checking-frontend_27.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'656ca7c307d1',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:49:57.80805632Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/3815178hgdasf6/hostname',
u'HostsPath': u'/var/lib/docker/containers/3815178hgdasf6/hosts',
u'Id': u'3815178hgdasf6',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/ers-checking-frontend-27',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.224',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 127,
# Weird date that docker sometimes returns in containers with no Status
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 35327,
u'Running': False,
u'StartedAt': u'0001-01-01T00:00:00Z'},
u'Volumes': {},
u'VolumesRW': {}},
"656ca7c307d178": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.secret=H7dVw$PlJiD)^U,oa4TA1pa]pT:4ETLqbL&2P=n6T~p,A*}^.Y46@PQOV~9(B09Hc]t7-hsf~&@w=zH -Dapplication.log=INFO -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080 -Dgovuk-tax.Prod.google-analytics.token=UA-00000000-0 -Drun.mode=Prod -Dsession.secure=true -Dsession.httpOnly=true -Dcookie.encryption.key=fqpLDZ4smuDsekHkrEBlCA==',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=https://host/ers-checking-frontend_27.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'656ca7c307d1',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:49:57.80805632Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/656ca7c307d178/hostname',
u'HostsPath': u'/var/lib/docker/containers/656ca7c307d178/hosts',
u'Id': u'656ca7c307d178',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/ers-checking-frontend-27',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.224',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 0,
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 35327,
u'Running': True,
u'StartedAt': u'2014-08-22T08:49:57.906207449Z'},
u'Volumes': {},
u'VolumesRW': {}},
"eba8bea2600029": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=https://host/paye_216.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'eba8bea26000',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:34:08.134031634Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9317'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/eba8bea2600029/hostname',
u'HostsPath': u'/var/lib/docker/containers/eba8bea2600029/hosts',
u'Id': u'eba8bea2600029',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/paye_216',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.221',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9317'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 0,
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 30996,
u'Running': True,
u'StartedAt': u'2014-08-22T08:34:08.260419303Z'},
u'Volumes': {},
u'VolumesRW': {}}
}
__inspect_container_cmd_return_node2 = {
"389821jsv78216": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.secret=H7dVw$PlJiD)^U,oa4TA1pa]pT:4ETLqbL&2P=n6T~p,A*}^.Y46@PQOV~9(B09Hc]t7-hsf~&@w=zH -Dapplication.log=INFO -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080 -Dgovuk-tax.Prod.google-analytics.token=UA-00000000-0 -Drun.mode=Prod -Dsession.secure=true -Dsession.httpOnly=true -Dcookie.encryption.key=fqpLDZ4smuDsekHkrEBlCA==',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=https://host/ers-checking-frontend_27.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'656ca7c307d1',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:49:57.80805632Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/389821jsv78216/hostname',
u'HostsPath': u'/var/lib/docker/containers/389821jsv78216/hosts',
u'Id': u'389821jsv78216',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/ers-checking-frontend-27',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.224',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 127,
u'FinishedAt': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
u'Paused': False,
u'Pid': 35327,
u'Running': False,
u'StartedAt': u'2014-09-03T17:49:57.906207449Z'},
u'Volumes': {},
u'VolumesRW': {}},
"80be2a9e62ba00": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=http://example.com/paye/paye_216.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'80be2a9e62ba',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:33:11.343161034Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9317'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/80be2a9e62ba00/hostname',
u'HostsPath': u'/var/lib/docker/containers/80be2a9e62ba00/hosts',
u'Id': u'80be2a9e62ba00',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/paye_216_8020e407-e40a-478e-9f31-a43bb50d8410',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.221',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9317'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 0,
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 30996,
u'Running': True,
u'StartedAt': u'2014-08-22T08:33:39.241960303Z'},
u'Volumes': {},
u'VolumesRW': {}},
"61c2695fd82a": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 0,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'TEST=yes',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'captain-test',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
# This is a freshly created but not yet started container
u'Created': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': None,
u'NetworkMode': u'',
u'PortBindings': None,
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'',
u'HostsPath': u'',
u'Id': u'61c2695fd82a',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/fresh_5',
u'NetworkSettings': {u'Bridge': u'',
u'Gateway': u'',
u'IPAddress': u'',
u'IPPrefixLen': 0,
u'PortMapping': None,
u'Ports': None},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'',
u'State': {u'ExitCode': 0,
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 0,
u'Running': False,
u'StartedAt': u'0001-01-01T00:00:00Z'},
u'Volumes': None,
u'VolumesRW': None},
"61c2695fd82b": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 0,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'TEST=yes',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'captain-test',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
# This is a epoch dated state container but with an old, gc-able created date
u'Created': (datetime.datetime.now() - datetime.timedelta(days=1, minutes=10)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': None,
u'NetworkMode': u'',
u'PortBindings': None,
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'',
u'HostsPath': u'',
u'Id': u'61c2695fd82b',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/notfresh_5',
u'NetworkSettings': {u'Bridge': u'',
u'Gateway': u'',
u'IPAddress': u'',
u'IPPrefixLen': 0,
u'PortMapping': None,
u'Ports': None},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'',
u'State': {u'ExitCode': 0,
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 0,
u'Running': False,
u'StartedAt': u'0001-01-01T00:00:00Z'},
u'Volumes': None,
u'VolumesRW': None}
}
|
hmrc/captain
|
captain/tests/util_mock.py
|
Python
|
apache-2.0
| 36,205
|
# -*- coding: utf-8 -*-
#
# Google Cloud Bigtable documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 24 16:48:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from email import message_from_string
import os
from pkg_resources import get_distribution
import sys
import types
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Google Cloud Bigtable'
copyright = u'2015, Google'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.0.1'
# The full version, including alpha/beta/rc tags.
distro = get_distribution('gcloud_bigtable')
release = os.getenv('SPHINX_RELEASE', distro.version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_components/*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
def add_grpc_mock(grpc_mod, subpackage, module_names):
full_subpackage = 'grpc.' + subpackage
subpackage_mod = types.ModuleType(full_subpackage)
sys.modules[full_subpackage] = subpackage_mod
setattr(grpc_mod, subpackage, subpackage_mod)
for module_name in module_names:
full_mod_name = full_subpackage + '.' + module_name
mod_obj = types.ModuleType(full_mod_name)
sys.modules[full_mod_name] = mod_obj
setattr(subpackage_mod, module_name, mod_obj)
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if os.environ.get('READTHEDOCS', None) == 'True':
# Really nasty hack so that readthedocs.org can successfully build these
# docs even though gRPC can't be installed.
grpc_mod = types.ModuleType('grpc')
sys.modules['grpc'] = grpc_mod
add_grpc_mock(grpc_mod, '_adapter', ['_c'])
add_grpc_mock(grpc_mod, 'early_adopter', ['implementations'])
add_grpc_mock(grpc_mod, 'framework', ['alpha'])
name = 'grpc.framework.alpha.utilities'
util_mod = types.ModuleType(name)
sys.modules[name] = util_mod
sys.modules['grpc.framework.alpha'].utilities = util_mod
else:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GoogleCloudBigtabledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
metadata = distro.get_metadata(distro.PKG_INFO)
author = message_from_string(metadata).get('Author')
latex_documents = [
(master_doc, 'GoogleCloudBigtable.tex', u'Google Cloud Bigtable Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'googlecloudbigtable', u'Google Cloud Bigtable Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GoogleCloudBigtable', u'Google Cloud Bigtable Documentation',
author, 'GoogleCloudBigtable', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Configuration for intersphinx:
# Refer to the Python standard library and oauth2client library.
# NOTE: We also have a custom mapping for items in the stdlib not
# included in https://docs.python.org/objects.inv
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'oauth2client': ('http://oauth2client.readthedocs.org/en/latest/', None),
}
|
joerideturck/gcloud-python-bigtable
|
docs/conf.py
|
Python
|
apache-2.0
| 11,326
|
#!/usr/bin/env python3
###
###
###
### Jesse Leigh Patsolic
### 2017 <jpatsol1@jhu.edu>
### S.D.G
#
import argparse
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
from intern.utils.parallel import block_compute
import configparser
import requests
import numpy as np
from numpy import genfromtxt
import shutil
import blosc
from IPython.core.debugger import set_trace
import sys
import os
import itertools
from functools import partial
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing import cpu_count
import csv
import datetime
import toolbox
def main(COLL_NAME, EXP_NAME, COORD_FRAME,
CHAN_NAMES=None, num_threads = 4, CONFIG_FILE= 'config.ini'):
bf = [5,180,180] # in z,y,x order
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
TOKEN = config['Default']['token']
boss_url = ''.join( ( config['Default']['protocol'],'://',config['Default']['host'],'/v1/' ) )
#print(boss_url)
#'https://api.boss.neurodata.io/v1/'
#intern
rem = BossRemote(CONFIG_FILE)
cf = CoordinateFrameResource(str(COLL_NAME + '_' + EXP_NAME))
cfr = rem.get_project(cf)
anno_res = ChannelResource('annotation', COLL_NAME, EXP_NAME, 'annotation', datatype='uint64')
ex = {'x': cfr.x_stop, 'y': cfr.y_stop, 'z': cfr.z_stop}
blocks = block_compute(0,ex['x'],0,ex['y'],0,ex['z'],
origin = (0,0,0), block_size = (512, 512, 16))
rid = []
for b in blocks:
rid = rid + rem.get_ids_in_region(anno_res, 0, b[0], b[1], b[2], [0,1])
u = np.unique(np.asarray(rid)) ## returns in sorted order ascending
## bounding box for annotation_i
bb = [rem.get_bounding_box(anno_res, 0,ui, 'tight') for ui in u]
for i in range(len(bb)):
bb[i]["id"] = u[i]
A = [(rem.get_cutout(
anno_res, 0, bb[i]["x_range"],
bb[i]["y_range"], bb[i]["z_range"],
id_list = [bb[i]['id']]) != 0).astype(int)
for i in range(len(bb))]
#Bmeans = [np.int32(np.round(np.mean(np.asarray(np.where(A[i] == True)),1))) for i in range(len(A))]
Bmeans = [np.int32(np.round(np.mean(np.asarray(np.where(A[i] == 1)),1))) for i in range(len(A))]
Bglobal = []
for i in range(len(bb)):
ad1 = np.asarray([bb[i]['z_range'][0], bb[i]['y_range'][0], bb[i]['x_range'][0]])
Bglobal.append(Bmeans[i] + ad1)
ColMin = np.asarray(bf)
ColMax = np.asarray([ex['z'] - (bf[0] + 1), # The z index is inclusive
ex['y'] - (bf[1] + 1),
ex['x'] - (bf[2] + 1)])
m = [Bglobal[i] >= ColMin for i in range(len(Bglobal))]
M = [Bglobal[i] <= ColMax for i in range(len(Bglobal))]
mm = [np.all(m[i]) for i in range(len(m)) ]
MM = [np.all(M[i]) for i in range(len(M)) ]
Bcon = []
con = [np.asarray(mm[j] and MM[j]) for j in range(len(mm))]
for i in range(len(Bglobal)):
if con[i]:
Bcon.append(Bglobal[i])
if CHAN_NAMES is None:
CHAN_NAMES = ['DAPI1st', 'DAPI2nd', 'DAPI3rd',
'GABA488', 'GAD647', 'gephyrin594', 'GS594', 'MBP488',
'NR1594', 'PSD95_488', 'Synapsin647', 'VGluT1_647']
#CHAN_NAMES = ['bIIItubulin', 'DAPI_2nd', 'DAPI_3rd',
# 'GABA', 'GAD2', 'gephyrin', 'NR1', 'PSDr',
# 'synapsin', 'VGAT', 'VGluT1']
ChanList = []
## for getting masked
#for ch in CHAN_NAMES:
# di = [{
# 'rem': rem,
# 'ch_rsc':
# ChannelResource(ch,COLL_NAME,EXP_NAME,'image',datatype='uint8'),
# 'ch' : ch,
# 'res' : 0,
# 'xrng': bb[i]['x_range'],
# 'yrng': bb[i]['y_range'],
# 'zrng': bb[i]['z_range'],
# 'id' : bb[i],
# 'mask': A[i]
# } for i in range(len(bb)) if con[i]]
# with ThreadPool(num_threads) as tp:
# out = tp.map(toolbox.getMaskData, di)
# sys.stdout.flush() #DEBUG
# print(ch) ##DEBUG
# sys.stdout.flush() #DEBUG
# ChanList.append(np.asarray(out))
#cubes = np.asarray(ChanList)
## For getting bounding box around centroid of annotation
#for ch in CHAN_NAMES:
# di = [{
# 'rem': rem,
# 'ch_rsc':
# ChannelResource(ch,COLL_NAME,EXP_NAME,'image',datatype='uint8'),
# 'ch' : ch,
# 'res' : 0,
# 'xrng': [Bcon[i][2] - bf[2], Bcon[i][2] + bf[2] + 1],
# 'yrng': [Bcon[i][1] - bf[1], Bcon[i][1] + bf[1] + 1],
# 'zrng': [Bcon[i][0] - bf[0], Bcon[i][0] + bf[0] + 1],
# } for i in range(len(Bcon))]
# with ThreadPool(num_threads) as tp:
# out = tp.map(toolbox.getCube, di)
# print(ch) ##DEBUG
# sys.stdout.flush() #DEBUG
# ChanList.append(np.asarray(out))
#cubes = np.asarray(ChanList)
## for getting all regardles of near boundary
for ch in CHAN_NAMES:
di = [{
'rem': rem,
'ch_rsc':
ChannelResource(ch,COLL_NAME,EXP_NAME,'image',datatype='uint8'),
'ch' : ch,
'res' : 0,
'xrng': [max([Bglobal[i][2] - bf[2], 0]), min([Bglobal[i][2] + bf[2] + 1, ex['x']])],
'yrng': [max([Bglobal[i][1] - bf[1], 0]), min([Bglobal[i][1] + bf[1] + 1, ex['y']])],
'zrng': [max([Bglobal[i][0] - bf[0], 0]), min([Bglobal[i][0] + bf[0] + 1, ex['z']])]
} for i in range(len(Bglobal))]
with ThreadPool(num_threads) as tp:
out = tp.map(toolbox.getCube, di)
print(ch) ##DEBUG
sys.stdout.flush() #DEBUG
ChanList.append(np.asarray(out))
cubes = ChanList
loc = np.asarray(Bglobal)
return(cubes, loc)
## END main
def testMain():
COLL_NAME = 'collman'
EXP_NAME = 'collman15v2'
COORD_FRAME = 'collman_collman15v2'
CONFIG_FILE = 'config.ini'
OUTPUT = 'fmaxTest20171214.csv'
CHAN_NAMES = ['Synapsin647', 'VGluT1_647']
#CHAN_NAMES = ['DAPI1st', 'DAPI2nd', 'DAPI3rd', 'GABA488', 'GAD647',
# 'gephyrin594', 'GS594', 'MBP488', 'NR1594', 'PSD95_488',
# 'Synapsin647', 'VGluT1_647']
#CHAN_NAMES = ['synapsin', 'PSDr']
cubes, loc = main(COLL_NAME, EXP_NAME, COORD_FRAME,
CHAN_NAMES=CHAN_NAMES, num_threads = 6, CONFIG_FILE= 'config.ini')
Fmaxb = toolbox.Fmaxb(cubes)
#F0 = toolbox.F0(cubes)
#Fmax = toolbox.Fmax(cubes)
#toolbox.mainOUT(Fmax, CHAN_NAMES, OUTPUT)
#toolbox.toh5(EXP_NAME, OUTPUT + '.h5', CHAN_NAMES, cubes, loc, Fmax)
return(cubes, loc, Fmaxb)
## End testMain
if __name__ == '__main__':
parser = argparse.ArgumentParser(description =
'Get volume normalized F0 values from annotation id regions in the BOSS ')
parser.add_argument('-C', help='Valid collection id',
type = str, metavar='C', default='collman')
parser.add_argument('-E', help='Valid experiment id',
type = str, metavar='E', default='collman15v2')
parser.add_argument('-F', help='valid coordinate frame',
type = str, metavar='F', default='collman_collman15v2')
#toolbox.toh5(EXP_NAME, OUTPUT + '.h5', CHAN_NAMES, cubes, loc, F0)
parser.add_argument('-O', help='output filename',
type = str, metavar='O', required=True,
default = 'output')
parser.add_argument('--con', help='user config file for BOSS'
'authentication', type = str, metavar='con', required=True)
args = parser.parse_args()
COLL_NAME = args.C
EXP_NAME = args.E
COORD_FRAME = args.F
OUTPUT = args.O
CONFIG_FILE = args.con
#rem = BossRemote(CONFIG_FILE)
#CHAN_NAMES = rem.list_channels(COLL_NAME, EXP_NAME)
##collman15v2 channels
CHAN_NAMES = ['DAPI1st', 'DAPI2nd', 'DAPI3rd', 'GABA488', 'GAD647',
'gephyrin594', 'GS594', 'MBP488', 'NR1594', 'PSD95_488',
'Synapsin647', 'VGluT1_647']
##collman14v2 channels
#CHAN_NAMES = ['bIIItubulin', 'DAPI_2nd', 'DAPI_3rd',
# 'GABA', 'GAD2', 'VGAT', 'gephyrin',
# 'NR1', 'VGluT1', 'synapsin', 'PSDr']
cubes, loc = main(COLL_NAME, EXP_NAME, COORD_FRAME,
CHAN_NAMES=CHAN_NAMES,
num_threads = 6, CONFIG_FILE= CONFIG_FILE)
#F0 = toolbox.F0(cubes)
Fmax = toolbox.Fmaxb(cubes)
toolbox.mainOUT(Fmax, CHAN_NAMES, OUTPUT)
idx = np.argsort([3,2,1])
toolbox.mainOUT(np.transpose(loc[:,idx]), ['x','y','z'], "locations_"+OUTPUT)
#toolbox.toh5(EXP_NAME, OUTPUT + '.h5', CHAN_NAMES, cubes, loc, F0)
print('Done!')
|
neurodata/synaptome-stats
|
collman15v2/201710/annoStats.py
|
Python
|
apache-2.0
| 8,903
|
#!/usr/bin/env python2.7
'''
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import print_function
import logging
from grpc.beta import implementations
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from PIL import Image # pylint: disable=wrong-import-order
def get_prediction(image, server_host='127.0.0.1', server_port=9000,
server_name="server", timeout=10.0):
"""
Retrieve a prediction from a TensorFlow model server
:param image: a MNIST image represented as a 1x784 array
:param server_host: the address of the TensorFlow server
:param server_port: the port used by the server
:param server_name: the name of the server
:param timeout: the amount of time to wait for a prediction to complete
:return 0: the integer predicted in the MNIST image
:return 1: the confidence scores for all classes
:return 2: the version number of the model handling the request
"""
print("connecting to:%s:%i" % (server_host, server_port))
# initialize to server connection
channel = implementations.insecure_channel(server_host, server_port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# build request
request = predict_pb2.PredictRequest()
request.model_spec.name = server_name
request.model_spec.signature_name = 'serving_default'
request.inputs['x'].CopyFrom(
tf.contrib.util.make_tensor_proto(image, shape=image.shape))
# retrieve results
result = stub.Predict(request, timeout)
resultVal = result.outputs["classes"].int_val[0]
scores = result.outputs['predictions'].float_val
version = result.outputs["classes"].int_val[0]
return resultVal, scores, version
def random_mnist(save_path=None):
"""
Pull a random image out of the MNIST test dataset
Optionally save the selected image as a file to disk
:param savePath: the path to save the file to. If None, file is not saved
:return 0: a 1x784 representation of the MNIST image
:return 1: the ground truth label associated with the image
:return 2: a bool representing whether the image file was saved to disk
"""
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
batch_size = 1
batch_x, batch_y = mnist.test.next_batch(batch_size)
saved = False
if save_path is not None:
# save image file to disk
try:
data = (batch_x * 255).astype(np.uint8).reshape(28, 28)
img = Image.fromarray(data, 'L')
img.save(save_path)
saved = True
except Exception as e: # pylint: disable=broad-except
logging.error("There was a problem saving the image; %s", e)
return batch_x, np.argmax(batch_y), saved
|
kubeflow/examples
|
mnist/web-ui/mnist_client.py
|
Python
|
apache-2.0
| 3,348
|