code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Misc helper functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging as log
import tensorflow as tf
def optimistic_restorer(save_file, vars_all=None):
"""Restores the variables which are present in save_file.
Args:
save_file: checkpoint file
vars_all: optional list of the variable superset
Returns:
restorer: object on which we should call restore(sess, save_file)
"""
if vars_all is None:
vars_all = tf.global_variables()
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0])
for var in vars_all
if var.name.split(':')[0] in saved_shapes])
var_names_new = sorted([
var.name for var in vars_all if var.name.split(':')[0] not in saved_shapes
])
log.info('Number of new variables: %d', len(var_names_new))
log.info(var_names_new)
restore_vars = []
name2var = dict(
zip([x.name.split(':')[0] for x in tf.global_variables()],
tf.global_variables()))
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
curr_var = name2var[saved_var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
else:
log.info('Different shape than saved: %s', var_name)
restorer = tf.train.Saver(restore_vars)
return restorer
def transpose(rot):
"""Transposes last two dimensions.
Args:
rot: relative rotation, are [...] X M X N matrices
Returns:
rot_t: [...] X N X M matrices
"""
with tf.name_scope('transpose'):
n_inp_dim = len(rot.get_shape())
perm = range(n_inp_dim)
perm[-1] = n_inp_dim - 2
perm[-2] = n_inp_dim - 1
rot_t = tf.transpose(rot, perm=perm)
return rot_t
def divide_safe(num, den, name=None):
eps = 1e-8
den += eps * tf.cast(tf.equal(den, 0), 'float32')
return tf.divide(num, den, name=name)
def pixel_coords(bs, h, w):
"""Creates a bs X h X w X 3 tensor with (x,y,1) coord at each pixel.
Args:
bs: batch_size (number of meshgrid repetitions)
h: number of rows
w: number of columns
Returns:
bs X h X w X 3 tensor with (x,y,1) coord at each pixel.
Note : these coordinates are 0.5 indexed
"""
with tf.name_scope('pixel_coords'):
ones_w = tf.ones((1, 1, w))
ones_h = tf.ones((1, h, 1))
ones_b = tf.ones((bs, 1, 1))
range_h = tf.cast(tf.reshape(tf.range(h) + 1, (1, h, 1)), 'float32')
range_w = tf.cast(tf.reshape(tf.range(w) + 1, (1, 1, w)), 'float32')
# subtracting 0.5 so that pixel centres correspond to 0.5
# for example, the top left pixel centre is at (0.5, 0.5)
ys = ones_b * range_h * ones_w - 0.5
xs = ones_b * ones_h * range_w - 0.5
ones = ones_b * ones_h * ones_w
return tf.stack([xs, ys, ones], axis=3)
def transform_pts(pts_coords_init, tform_mat):
"""Transforms input points according to the transformation.
Args:
pts_coords_init : [...] X H X W X D; pixelwise coordinates.
tform_mat : [...] X D X D; desired matrix transformation
Returns:
pts_coords : [...] X H X W X D; transformed coordinates.
"""
with tf.name_scope('transform_pts'):
tform_mat_size = tform_mat.get_shape().as_list()
n_dims_t = len(tform_mat_size)
pts_init_size = pts_coords_init.get_shape().as_list()
pts_transform_size = [tform_mat_size[ix] for ix in range(n_dims_t)]
pts_transform_size[-2] = -1
pts_coords_init_reshape = tf.reshape(pts_coords_init, pts_transform_size)
tform_mat_transpose = transpose(tform_mat)
pts_mul = tf.matmul(pts_coords_init_reshape, tform_mat_transpose)
pts_coords_transformed = tf.reshape(pts_mul, pts_init_size)
return pts_coords_transformed
def soft_z_buffering(layer_masks, layer_disps, depth_softmax_temp=1):
"""Computes pixelwise probability for belonging to each layer.
Args:
layer_masks: L X [...] X 1, indicating which layer pixels are valid
layer_disps: L X [...] X 1, laywewise per pixel disparity
depth_softmax_temp: Denominator for exponentiation of negative depths
Returns:
layer_probs: L X [...] X 1, indicating prob. of layer assignment
"""
eps = 1e-8
layer_disps = tf.nn.relu(layer_disps)
layer_depths = divide_safe(1, layer_disps)
log_depth_probs = -layer_depths / depth_softmax_temp
log_layer_probs = tf.log(layer_masks + eps) + log_depth_probs
log_layer_probs -= tf.reduce_max(log_layer_probs, axis=0, keep_dims=True)
layer_probs = tf.exp(log_layer_probs)
probs_sum = tf.reduce_sum(layer_probs, axis=0, keep_dims=True)
layer_probs = tf.divide(layer_probs, probs_sum)
return layer_probs
def enforce_bg_occupied(ldi_masks):
"""Enforce that the last layer's mask has all ones.
Args:
ldi_masks: L X [...] masks
Returns:
ldi_masks: L X [..], masks with last layer set as 1
"""
n_layers = ldi_masks.get_shape().as_list()[0]
if n_layers == 1:
return ldi_masks * 0 + 1
else:
masks_fg, masks_bg = tf.split(ldi_masks, [n_layers - 1, 1], axis=0)
masks_bg = masks_bg * 0 + 1
return tf.concat([masks_fg, masks_bg], axis=0)
def zbuffer_weights(disps, scale=50):
"""Compute decreasing, non-negative inverse depths based on disparity deltas.
Args:
disps: [...] inverse depths, between 0 (far) and 1 (closest possible depth)
scale: multiplicative factor before exponentiation
Returns:
z_buf_wts: [..], higher weights for closer things
"""
pos_disps = tf.cast(tf.greater(disps, 0), tf.float32)
disps = tf.clip_by_value(disps, 0, 1)
disps -= 0.5 # subtracting a constant just divides all weights by a fraction
wts = tf.exp(disps * scale) * pos_disps
return wts
|
google/layered-scene-inference
|
lsi/nnutils/helpers.py
|
Python
|
apache-2.0
| 6,450
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import math
import pytest
import tvm
from tvm import relay
import numpy as np
from tvm.runtime.vm import VirtualMachine
from tvm.relay.op.contrib.cutlass import partition_for_cutlass
from tvm.contrib.cutlass import (
tune_cutlass_kernels,
build_cutlass_kernels,
build_cutlass_kernels_vm,
)
logging.basicConfig(level=logging.INFO)
def has_cublas():
return tvm.get_global_func("tvm.contrib.cublas.matmul", True) != None
def has_cutlass():
return tvm.get_global_func("relay.ext.cutlass", True) != None
def get_ref_rt_mod(mod, params, target="cuda"):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
dev = tvm.device(target, 0)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
return rt_mod, dev
def get_ref_vm(mod, params, target="cuda"):
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, target=target, params=params)
code, lib = vm_exec.save()
dev = tvm.device(target, 0)
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
return VirtualMachine(vm_exec, dev), dev
def get_output(rt_mod, names, inputs):
for name, inp in zip(names, inputs):
rt_mod.set_input(name, inp)
rt_mod.run()
return rt_mod.get_output(0).asnumpy()
def get_output_vm(vm, names, inputs):
params = dict(zip(names, inputs))
return vm.invoke("main", **params).numpy()
def get_dense_with_shape(data_shape, weight_shape, out_dtype="float16"):
data = relay.var("data", shape=data_shape, dtype="float16")
weight = relay.var("weight", shape=weight_shape, dtype="float16")
return relay.nn.dense(data, weight, out_dtype=out_dtype)
def get_dense(M, N, K, out_dtype="float16"):
return get_dense_with_shape((M, K), (N, K), out_dtype)
def get_dense_bias(M, N, K, out_dtype="float16"):
dense = get_dense(M, N, K, out_dtype=out_dtype)
bias = relay.var("bias", shape=(N,), dtype=out_dtype)
return relay.nn.bias_add(dense, bias)
def get_dense_bias_relu(M, N, K, out_dtype="float16"):
return relay.nn.relu(get_dense_bias(M, N, K, out_dtype=out_dtype))
def get_dense_bias_gelu(M, N, K, out_dtype="float16"):
bias_add = get_dense_bias(M, N, K, out_dtype)
mul = bias_add * relay.const((1.0 / math.sqrt(2.0)), dtype=out_dtype)
if out_dtype == "float16":
erf = relay.cast(relay.op.erf(relay.cast(mul, "float32")), "float16")
else:
erf = relay.op.erf(mul)
mul_half = erf * relay.const(0.5, dtype=out_dtype)
add = mul_half + relay.const(0.5, dtype=out_dtype)
return add * bias_add
def get_batch_matmul_with_shape(x_shape, y_shape, out_dtype="float16"):
x = relay.var("x", shape=x_shape, dtype="float16")
y = relay.var("y", shape=y_shape, dtype="float16")
return relay.nn.batch_matmul(x, y, out_dtype=out_dtype)
def get_batch_matmul(batch, M, N, K, out_dtype="float16"):
return get_batch_matmul_with_shape((batch, M, K), (batch, N, K), out_dtype="float16")
def get_conv2d_nchw(d_shape, w_shape, padding, out_dtype="float16"):
data = relay.var("data", shape=d_shape, dtype="float16")
weight = relay.var("weight", shape=w_shape, dtype="float16")
out_channel = w_shape[0]
return relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
out_dtype=out_dtype,
)
def get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype="float16"):
conv2d = get_conv2d_nchw(d_shape, w_shape, padding, out_dtype=out_dtype)
bias = relay.var("bias", shape=(w_shape[0],), dtype=out_dtype)
return relay.nn.bias_add(conv2d, bias)
def silu(x):
return x * relay.sigmoid(x)
def hardswish(x, out_dtype="float16"):
return x * (
relay.clip(x + relay.const(3, dtype=out_dtype), a_min=0, a_max=6)
/ relay.const(6, dtype=out_dtype)
)
def get_conv2d_nchw_bias_relu(d_shape, w_shape, padding, out_dtype="float16"):
return relay.nn.relu(get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype))
def get_conv2d_nchw_bias_sigmoid(d_shape, w_shape, padding, out_dtype="float16"):
return relay.sigmoid(get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype))
def get_conv2d_nchw_bias_silu(d_shape, w_shape, padding, out_dtype="float16"):
conv_out = get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype)
return silu(conv_out)
def get_conv2d_nchw_bias_hardswish(d_shape, w_shape, padding, out_dtype="float16"):
conv_out = get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype)
return hardswish(conv_out, out_dtype)
def get_conv2d_nchw_bias_residual(d_shape, w_shape, padding, out_dtype="float16"):
data = relay.var("data", shape=d_shape, dtype="float16")
weight = relay.var("weight", shape=w_shape, dtype="float16")
bias = relay.var("bias", shape=(w_shape[0],), dtype=out_dtype)
out_channel = w_shape[0]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
out_dtype=out_dtype,
)
bias_add = relay.nn.bias_add(conv2d, bias)
return bias_add, data
def profile_and_build(mod, params, sm, tmp_dir="./tmp", lib_path="compile.so", use_fast_math=False):
mod = partition_for_cutlass(mod)
mod, num_cutlass_partition = tune_cutlass_kernels(
mod, sm, profile_all=False, use_multiprocessing=False, tmp_dir=tmp_dir
)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target="cuda", params=params)
lib = build_cutlass_kernels(lib, sm, tmp_dir, lib_path, use_fast_math=use_fast_math)
dev = tvm.device("cuda", 0)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
return rt_mod, dev, num_cutlass_partition
def profile_and_build_vm(
mod,
params,
sm,
tmp_dir="./tmp",
lib_path="compile.so",
vmcode_path="vmcode.ro",
use_fast_math=False,
):
mod = partition_for_cutlass(mod)
mod, num_cutlass_partition = tune_cutlass_kernels(mod, sm, tmp_dir=tmp_dir)
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, target="cuda", params=params)
vm_exec = build_cutlass_kernels_vm(
vm_exec, sm, tmp_dir, lib_path, vmcode_path, use_fast_math=use_fast_math
)
dev = tvm.device("cuda", 0)
return VirtualMachine(vm_exec, dev), dev, num_cutlass_partition
def verify_dense(
func, M, N, K, ref_target="cuda", sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
):
if not has_cutlass():
return
mod = tvm.IRModule.from_expr(func)
typ = relay.transform.InferType()(mod)["main"].body.checked_type
out_dtype = typ.dtype
use_vm = any(isinstance(s, tvm.tir.Any) for s in typ.shape)
np_data = np.random.uniform(-1, 1, (M, K)).astype("float16")
np_weight = np.random.uniform(-1, 1, (N, K)).astype("float16")
np_bias = np.random.uniform(-1, 1, (N,)).astype(out_dtype)
params = {"weight": np_weight, "bias": np_bias}
if use_vm:
if ref_target == "cuda" and out_dtype == "float16":
# Uncomment "return" below to see the accuracy difference of static vs dynamic TVM native fp16 dense
# The static one can use a tensorcore schedule, but the dynamic one cannot
rt_mod, dev = get_ref_vm(tvm.IRModule.from_expr(get_dense(M, N, K)), params)
num_partition = 1
logging.warning(
"The reference fp16 dense with dynamic shape using fp16 accumulation has accuracy issues."
)
return
else:
rt_mod, dev, num_partition = profile_and_build_vm(mod, params, sm)
rt_mod_ref, dev = get_ref_vm(mod, params, target=ref_target)
x = tvm.nd.array(np_data, device=dev)
out = get_output_vm(rt_mod, ["data"], [x])
ref_out = get_output_vm(rt_mod_ref, ["data"], [x])
else:
rt_mod_ref, dev = get_ref_rt_mod(mod, params, target=ref_target)
rt_mod, dev, num_partition = profile_and_build(mod, params, sm)
x = tvm.nd.array(np_data, device=dev)
out = get_output(rt_mod, ["data"], [x])
ref_out = get_output(rt_mod_ref, ["data"], [x])
assert num_partition > 0
np.testing.assert_allclose(out, ref_out, atol=atol, rtol=rtol)
if run_benchmark:
print("CUTLASS:", rt_mod.benchmark(dev, number=1, repeat=600))
print("TVM with target %s:" % ref_target, rt_mod_ref.benchmark(dev, number=1, repeat=600))
def verify_batch_matmul(
func, batch, M, N, K, ref_target="cuda", sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
):
if not has_cutlass():
return
mod = tvm.IRModule.from_expr(func)
typ = relay.transform.InferType()(mod)["main"].body.checked_type
use_vm = any(isinstance(s, tvm.tir.Any) for s in typ.shape)
x_np = np.random.uniform(-1, 1, (batch, M, K)).astype("float16")
y_np = np.random.uniform(-1, 1, (batch, N, K)).astype("float16")
if use_vm:
rt_mod, dev, num_partition = profile_and_build_vm(mod, {}, sm)
rt_mod_ref, dev = get_ref_vm(mod, {}, target=ref_target)
assert num_partition > 0
x = tvm.nd.array(x_np, device=dev)
y = tvm.nd.array(y_np, device=dev)
out = get_output_vm(rt_mod, ["x", "y"], [x, y])
ref_out = get_output_vm(rt_mod_ref, ["x", "y"], [x, y])
else:
rt_mod, dev, num_partition = profile_and_build(mod, {}, sm)
rt_mod_ref, dev = get_ref_rt_mod(mod, {})
assert num_partition > 0
x = tvm.nd.array(x_np, device=dev)
y = tvm.nd.array(y_np, device=dev)
out = get_output(rt_mod, ["x", "y"], [x, y])
ref_out = get_output(rt_mod_ref, ["x", "y"], [x, y])
np.testing.assert_allclose(out, ref_out, atol=atol, rtol=rtol)
if run_benchmark:
print("CUTLASS:", rt_mod.benchmark(dev, number=1, repeat=600))
print("TVM Tensorcore (no tuning):", rt_mod_ref.benchmark(dev, number=1, repeat=600))
M = 1820
N = 768
K = 768
def test_dense():
verify_dense(get_dense(M, N, K), M, N, K)
verify_dense(get_dense(M, N, K, out_dtype="float32"), M, N, K)
# Test align1 case
verify_dense(get_dense_bias(M, N + 1, K), M, N + 1, K)
def test_dense_bias():
verify_dense(get_dense_bias(M, N, K), M, N, K)
verify_dense(get_dense_bias(M, N, K, out_dtype="float32"), M, N, K)
def test_dense_bias_relu():
verify_dense(get_dense_bias_relu(M, N, K), M, N, K)
verify_dense(get_dense_bias_relu(M, N, K, out_dtype="float32"), M, N, K)
def test_dense_bias_gelu():
verify_dense(get_dense_bias_gelu(M, N, K), M, N, K, atol=1e-3, rtol=1e-3)
verify_dense(get_dense_bias_gelu(M, N, K, out_dtype="float32"), M, N, K, atol=1e-3, rtol=1e-3)
def test_dense_dynamic():
data_shape = (relay.Any(), K)
weight_shape = (relay.Any(), K)
if has_cublas():
# TVM native fp16 dense (without tensorcore), using fp16 accum, seems to have accuracy issues
# Use cublas as a reference
verify_dense(
get_dense_with_shape(data_shape, weight_shape),
M,
N,
K,
ref_target="cuda -libs=cublas",
)
verify_dense(
get_dense_with_shape(data_shape, weight_shape, out_dtype="float32"),
M,
N,
K,
atol=1e-4,
rtol=1e-4,
)
def test_batch_matmul():
batch = 8
verify_batch_matmul(get_batch_matmul(batch, M, N, K), batch, M, N, K)
verify_batch_matmul(get_batch_matmul(batch, M, N, K, out_dtype="float32"), batch, M, N, K)
if has_cublas():
# Test dynamic shape batch_matmul
# AutoTVM does not seem to support it
x_shape = (relay.Any(), relay.Any(), K)
y_shape = (relay.Any(), relay.Any(), K)
verify_batch_matmul(
get_batch_matmul_with_shape(x_shape, y_shape),
batch,
M,
N,
K,
ref_target="cuda -libs=cublas",
)
def convert_conv2d_layout(mod, desired_layouts):
with tvm.transform.PassContext(opt_level=3):
seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)])
return seq(mod)
def verify_conv2d(
expr_nchw, # can be dynamic batch
expr_ref, # always static batch
d_shape,
w_shape,
sm=80,
atol=1e-5,
rtol=1e-5,
use_cudnn_ref=False,
run_benchmark=False,
use_fast_math=False,
):
if not has_cutlass():
return
mod_nchw = tvm.IRModule.from_expr(expr_nchw)
mod_ref = tvm.IRModule.from_expr(expr_ref)
typ = relay.transform.InferType()(mod_nchw)["main"].body.checked_type
out_dtype = typ.dtype
np_data = np.random.uniform(-1, 1, d_shape).astype("float16")
np_weight = np.random.uniform(-1, 1, w_shape).astype("float16")
np_bias = np.random.uniform(-1, 1, (w_shape[0],)).astype(out_dtype)
params = {"weight": np_weight, "bias": np_bias}
typ = relay.transform.InferType()(mod_nchw)["main"].body.checked_type
use_vm = any(isinstance(s, tvm.tir.Any) for s in typ.shape)
mod_weight_ohwi = convert_conv2d_layout(mod_nchw, {"nn.conv2d": ["NHWC", "OHWI"]})
if use_vm:
rt_mod, _, num_cutlass_partition = profile_and_build_vm(
mod_weight_ohwi, params, sm, use_fast_math=use_fast_math
)
out = get_output_vm(rt_mod, ["data"], [np_data])
else:
rt_mod, _, num_cutlass_partition = profile_and_build(
mod_weight_ohwi, params, sm, use_fast_math=use_fast_math
)
out = get_output(rt_mod, ["data"], [np_data])
assert num_cutlass_partition > 0
if use_cudnn_ref:
rt_mod_ref, dev = get_ref_rt_mod(
convert_conv2d_layout(mod_ref, {"nn.conv2d": ["NHWC", "OHWI"]}),
params,
target="cuda -libs=cudnn",
)
else:
rt_mod_ref, dev = get_ref_rt_mod(
convert_conv2d_layout(mod_ref, {"nn.conv2d": ["NHWC", "HWIO"]}),
params,
target="cuda",
)
ref_out = get_output(rt_mod_ref, ["data"], [np_data])
if run_benchmark:
print("CUTLASS:", rt_mod.benchmark(dev, number=1, repeat=600))
print("TVM Tensorcore (no tuning):", rt_mod_ref.benchmark(dev, number=1, repeat=600))
np.testing.assert_allclose(out, ref_out, atol=atol, rtol=rtol)
def test_conv2d():
padding = (1, 1)
for IC in [3, 16]:
d_shape = (16, IC, 32, 32)
w_shape = (32, IC, 3, 3)
mod_nchw = get_conv2d_nchw(d_shape, w_shape, padding)
verify_conv2d(
mod_nchw,
mod_nchw,
d_shape,
w_shape,
sm=80,
atol=1e-5,
rtol=1e-5,
use_cudnn_ref=(IC == 3), # The autotvm kernel has an accuracy issue with IC == 3 case
run_benchmark=False,
)
d_shape = (16, 16, 32, 32)
w_shape = (32, 16, 3, 3)
padding = (1, 1)
dyn_batch_shape = (relay.Any(),) + d_shape[1:]
mod_nchw = get_conv2d_nchw(d_shape, w_shape, padding)
mod_dyn = get_conv2d_nchw(dyn_batch_shape, w_shape, padding)
verify_conv2d(
mod_dyn, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
def test_conv2d_fusion():
d_shape = (16, 16, 32, 32)
w_shape = (32, 16, 3, 3)
padding = (1, 1)
mod_nchw = get_conv2d_nchw_bias(d_shape, w_shape, padding)
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
mod_nchw = get_conv2d_nchw_bias_relu(d_shape, w_shape, padding)
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
mod_nchw = get_conv2d_nchw_bias_sigmoid(d_shape, w_shape, padding, out_dtype="float16")
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
verify_conv2d(
mod_nchw,
mod_nchw,
d_shape,
w_shape,
sm=80,
atol=1e-3,
rtol=1e-3,
run_benchmark=False,
use_fast_math=True,
)
mod_nchw = get_conv2d_nchw_bias_sigmoid(d_shape, w_shape, padding, out_dtype="float32")
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
mod_nchw = get_conv2d_nchw_bias_silu(d_shape, w_shape, padding, out_dtype="float32")
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
mod_nchw = get_conv2d_nchw_bias_hardswish(d_shape, w_shape, padding, out_dtype="float16")
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
def test_conv2d_residual_block():
d_shape = (16, 16, 32, 32)
w_shape = (16, 16, 3, 3)
padding = (1, 1)
bias_add, residual_input = get_conv2d_nchw_bias_residual(d_shape, w_shape, padding)
for func, tol in [
(relay.nn.relu(bias_add + residual_input), 1e-5),
(relay.nn.relu(bias_add) + residual_input, 1e-5),
(relay.sigmoid(bias_add) * residual_input, 1e-5),
(relay.nn.relu(silu(bias_add) * residual_input), 1e-5),
# HardSwish requires higher tolerance since vectoring the residual block epilogue
# in cutlass.
# TODO(masahi): Invesitigate this issue
(relay.nn.relu(hardswish(bias_add) + residual_input), 1e-3),
]:
verify_conv2d(func, func, d_shape, w_shape, sm=80, atol=tol, rtol=tol, run_benchmark=False)
if __name__ == "__main__":
pytest.main([__file__])
|
Laurawly/tvm-1
|
tests/python/contrib/test_cutlass.py
|
Python
|
apache-2.0
| 18,515
|
# Copyright 2016 Grist Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import bisect
import token
import tokenize
import io
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from .line_numbers import LineNumbers
from .util import Token, match_token, is_non_coding_token
from .mark_tokens import MarkTokens
class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
def _generate_tokens(self, text):
"""
Generates tokens for the given code.
"""
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1]))
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token_from_offset(self, offset):
"""
Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens.
"""
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
def get_token(self, lineno, col_offset):
"""
Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens.
"""
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
def get_token_from_utf8(self, lineno, col_offset):
"""
Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
"""
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
def next_token(self, tok, include_extra=False):
"""
Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i]
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
def token_range(self, first_token, last_token, include_extra=False):
"""
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]
def get_tokens(self, node, include_extra=False):
"""
Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT.
"""
return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
def get_text(self, node):
"""
After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text.
"""
start, end = self.get_text_range(node)
return self._text[start : end]
|
gristlabs/asttokens
|
asttokens/asttokens.py
|
Python
|
apache-2.0
| 8,288
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding=UTF8
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the DB API."""
import copy
import datetime
import iso8601
import types
import uuid as stdlib_uuid
import mox
import netaddr
from oslo.config import cfg
from sqlalchemy.dialects import sqlite
from sqlalchemy import exc
from sqlalchemy.exc import IntegrityError
from sqlalchemy import MetaData
from sqlalchemy.orm import exc as sqlalchemy_orm_exc
from sqlalchemy.orm import query
from sqlalchemy.sql.expression import select
from nova import block_device
from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import quota
from nova import test
from nova.tests import matchers
from nova import utils
CONF = cfg.CONF
CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
get_engine = db_session.get_engine
get_session = db_session.get_session
def _quota_reserve(context, project_id, user_id):
"""Create sample Quota, QuotaUsage and Reservation objects.
There is no method db.quota_usage_create(), so we have to use
db.quota_reserve() for creating QuotaUsage objects.
Returns reservations uuids.
"""
def get_sync(resource, usage):
def sync(elevated, project_id, user_id, session):
return {resource: usage}
return sync
quotas = {}
user_quotas = {}
resources = {}
deltas = {}
for i in range(3):
resource = 'resource%d' % i
if i == 2:
# test for project level resources
resource = 'fixed_ips'
quotas[resource] = db.quota_create(context,
project_id, resource, i)
user_quotas[resource] = quotas[resource]
else:
quotas[resource] = db.quota_create(context,
project_id, resource, i)
user_quotas[resource] = db.quota_create(context, project_id,
resource, i,
user_id=user_id)
sync_name = '_sync_%s' % resource
resources[resource] = quota.ReservableResource(
resource, sync_name, 'quota_res_%d' % i)
deltas[resource] = i
setattr(sqlalchemy_api, sync_name, get_sync(resource, i))
sqlalchemy_api.QUOTA_SYNC_FUNCTIONS[sync_name] = getattr(
sqlalchemy_api, sync_name)
return db.quota_reserve(context, resources, quotas, user_quotas, deltas,
timeutils.utcnow(), CONF.until_refresh,
datetime.timedelta(days=1), project_id, user_id)
class DbTestCase(test.TestCase):
def setUp(self):
super(DbTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def create_instance_with_args(self, **kwargs):
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
'node': 'node1', 'project_id': self.project_id,
'vm_state': 'fake'}
if 'context' in kwargs:
ctxt = kwargs.pop('context')
args['project_id'] = ctxt.project_id
else:
ctxt = self.context
args.update(kwargs)
return db.instance_create(ctxt, args)
def fake_metadata(self, content):
meta = {}
for i in range(0, 10):
meta["foo%i" % i] = "this is %s item %i" % (content, i)
return meta
def create_metadata_for_instance(self, instance_uuid):
meta = self.fake_metadata('metadata')
db.instance_metadata_update(self.context, instance_uuid, meta, False)
sys_meta = self.fake_metadata('system_metadata')
db.instance_system_metadata_update(self.context, instance_uuid,
sys_meta, False)
return meta, sys_meta
class DecoratorTestCase(test.TestCase):
def _test_decorator_wraps_helper(self, decorator):
def test_func():
"""Test docstring."""
decorated_func = decorator(test_func)
self.assertEquals(test_func.func_name, decorated_func.func_name)
self.assertEquals(test_func.__doc__, decorated_func.__doc__)
self.assertEquals(test_func.__module__, decorated_func.__module__)
def test_require_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_context)
def test_require_admin_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_admin_context)
def _get_fake_aggr_values():
return {'name': 'fake_aggregate'}
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
'fake_key2': 'fake_value2',
'availability_zone': 'fake_avail_zone'}
def _get_fake_aggr_hosts():
return ['foo.openstack.org']
def _create_aggregate(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata()):
return db.aggregate_create(context, values, metadata)
def _create_aggregate_with_hosts(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata(),
hosts=_get_fake_aggr_hosts()):
result = _create_aggregate(context=context,
values=values, metadata=metadata)
for host in hosts:
db.aggregate_host_add(context, result['id'], host)
return result
class NotDbApiTestCase(DbTestCase):
def setUp(self):
super(NotDbApiTestCase, self).setUp()
self.flags(connection='notdb://', group='database')
def test_instance_get_all_by_filters_regex_unsupported_db(self):
# Ensure that the 'LIKE' operator is used for unsupported dbs.
self.create_instance_with_args(display_name='test1')
self.create_instance_with_args(display_name='test.*')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.context,
{'display_name': 'test.*'})
self.assertEqual(1, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'})
self.assertEqual(2, len(result))
def test_instance_get_all_by_filters_paginate(self):
test1 = self.create_instance_with_args(display_name='test1')
test2 = self.create_instance_with_args(display_name='test2')
test3 = self.create_instance_with_args(display_name='test3')
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
marker=None)
self.assertEqual(3, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test1['uuid'])
self.assertEqual(2, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test2['uuid'])
self.assertEqual(1, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test3['uuid'])
self.assertEqual(0, len(result))
self.assertRaises(exception.MarkerNotFound,
db.instance_get_all_by_filters,
self.context, {'display_name': '%test%'},
marker=str(stdlib_uuid.uuid4()))
class AggregateDBApiTestCase(test.TestCase):
def setUp(self):
super(AggregateDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_aggregate_create_no_metadata(self):
result = _create_aggregate(metadata=None)
self.assertEquals(result['name'], 'fake_aggregate')
def test_aggregate_create_avoid_name_conflict(self):
r1 = _create_aggregate(metadata=None)
db.aggregate_delete(context.get_admin_context(), r1['id'])
values = {'name': r1['name']}
metadata = {'availability_zone': 'new_zone'}
r2 = _create_aggregate(values=values, metadata=metadata)
self.assertEqual(r2['name'], values['name'])
self.assertEqual(r2['availability_zone'],
metadata['availability_zone'])
def test_aggregate_create_raise_exist_exc(self):
_create_aggregate(metadata=None)
self.assertRaises(exception.AggregateNameExists,
_create_aggregate, metadata=None)
def test_aggregate_get_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_get,
ctxt, aggregate_id)
def test_aggregate_metadata_get_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_metadata_get,
ctxt, aggregate_id)
def test_aggregate_create_with_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
def test_aggregate_create_delete_create_with_metadata(self):
#test for bug 1052479
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
db.aggregate_delete(ctxt, result['id'])
result = _create_aggregate(metadata={'availability_zone':
'fake_avail_zone'})
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertEqual(expected_metadata, {'availability_zone':
'fake_avail_zone'})
def test_aggregate_create_low_privi_context(self):
self.assertRaises(exception.AdminRequired,
db.aggregate_create,
self.context, _get_fake_aggr_values())
def test_aggregate_get(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt)
expected = db.aggregate_get(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
def test_aggregate_get_by_host(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate2'}
values3 = {'name': 'fake_aggregate3'}
values4 = {'name': 'fake_aggregate4'}
values5 = {'name': 'fake_aggregate5'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2)
# a3 has no hosts and should not be in the results.
a3 = _create_aggregate(context=ctxt, values=values3)
# a4 has no matching hosts.
a4 = _create_aggregate_with_hosts(context=ctxt, values=values4,
hosts=['foo4.openstack.org'])
# a5 has no matching hosts after deleting the only matching host.
a5 = _create_aggregate_with_hosts(context=ctxt, values=values5,
hosts=['foo5.openstack.org', 'foo.openstack.org'])
db.aggregate_host_delete(ctxt, a5['id'],
'foo.openstack.org')
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
def test_aggregate_get_by_host_with_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate2'}
values3 = {'name': 'fake_aggregate3'}
values4 = {'name': 'fake_aggregate4'}
a1 = _create_aggregate_with_hosts(context=ctxt,
metadata={'goodkey': 'good'})
_create_aggregate_with_hosts(context=ctxt, values=values2)
_create_aggregate(context=ctxt, values=values3)
_create_aggregate_with_hosts(context=ctxt, values=values4,
hosts=['foo4.openstack.org'], metadata={'goodkey': 'bad'})
# filter result by key
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
self.assertEqual([a1['id']], [x['id'] for x in r1])
def test_aggregate_metadata_get_by_host(self):
ctxt = context.get_admin_context()
values = {'name': 'fake_aggregate2'}
values2 = {'name': 'fake_aggregate3'}
_create_aggregate_with_hosts(context=ctxt)
_create_aggregate_with_hosts(context=ctxt, values=values)
_create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=['bar.openstack.org'], metadata={'badkey': 'bad'})
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual(r1['fake_key1'], set(['fake_value1']))
self.assertFalse('badkey' in r1)
def test_aggregate_metadata_get_by_metadata_key(self):
ctxt = context.get_admin_context()
values = {'aggregate_id': 'fake_id',
'name': 'fake_aggregate'}
aggr = _create_aggregate_with_hosts(context=ctxt, values=values,
hosts=['bar.openstack.org'],
metadata={'availability_zone':
'az1'})
r1 = db.aggregate_metadata_get_by_metadata_key(ctxt, aggr['id'],
'availability_zone')
self.assertEqual(r1['availability_zone'], set(['az1']))
self.assertTrue('availability_zone' in r1)
self.assertFalse('name' in r1)
def test_aggregate_metadata_get_by_host_with_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate12'}
values3 = {'name': 'fake_aggregate23'}
a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=a2_hosts, metadata=a2_metadata)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
hosts=a3_hosts, metadata=a3_metadata)
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo2.openstack.org',
key='good')
self.assertEqual(r1['good'], set(['value12', 'value23']))
self.assertFalse('fake_key1' in r1)
self.assertFalse('bad' in r1)
# Delete metadata
db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org',
key='good')
self.assertFalse('good' in r2)
def test_aggregate_host_get_by_metadata_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate12'}
values3 = {'name': 'fake_aggregate23'}
a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=a2_hosts, metadata=a2_metadata)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
hosts=a3_hosts, metadata=a3_metadata)
r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
self.assertEqual({
'foo1.openstack.org': set(['value12']),
'foo2.openstack.org': set(['value12', 'value23']),
'foo3.openstack.org': set(['value23']),
}, r1)
self.assertFalse('fake_key1' in r1)
def test_aggregate_get_by_host_not_found(self):
ctxt = context.get_admin_context()
_create_aggregate_with_hosts(context=ctxt)
self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
def test_aggregate_delete_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_delete,
ctxt, aggregate_id)
def test_aggregate_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
db.aggregate_delete(ctxt, result['id'])
expected = db.aggregate_get_all(ctxt)
self.assertEqual(0, len(expected))
aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
result['id'])
self.assertEqual(aggregate['deleted'], result['id'])
def test_aggregate_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
self.assertEqual(result['availability_zone'], 'fake_avail_zone')
new_values = _get_fake_aggr_values()
new_values['availability_zone'] = 'different_avail_zone'
updated = db.aggregate_update(ctxt, result['id'], new_values)
self.assertNotEqual(result['availability_zone'],
updated['availability_zone'])
def test_aggregate_update_with_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['availability_zone'] = 'different_avail_zone'
db.aggregate_update(ctxt, result['id'], values)
expected = db.aggregate_metadata_get(ctxt, result['id'])
updated = db.aggregate_get(ctxt, result['id'])
self.assertThat(values['metadata'],
matchers.DictMatches(expected))
self.assertNotEqual(result['availability_zone'],
updated['availability_zone'])
def test_aggregate_update_with_existing_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['metadata']['fake_key1'] = 'foo'
db.aggregate_update(ctxt, result['id'], values)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(values['metadata'], matchers.DictMatches(expected))
def test_aggregate_update_zone_with_existing_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
new_zone = {'availability_zone': 'fake_avail_zone_2'}
metadata = _get_fake_aggr_metadata()
metadata.update(new_zone)
db.aggregate_update(ctxt, result['id'], new_zone)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_update_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
new_values = _get_fake_aggr_values()
self.assertRaises(exception.AggregateNotFound,
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_get_all(self):
ctxt = context.get_admin_context()
counter = 3
for c in range(counter):
_create_aggregate(context=ctxt,
values={'name': 'fake_aggregate_%d' % c},
metadata=None)
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), counter)
def test_aggregate_get_all_non_deleted(self):
ctxt = context.get_admin_context()
add_counter = 5
remove_counter = 2
aggregates = []
for c in range(1, add_counter):
values = {'name': 'fake_aggregate_%d' % c}
aggregates.append(_create_aggregate(context=ctxt,
values=values, metadata=None))
for c in range(1, remove_counter):
db.aggregate_delete(ctxt, aggregates[c - 1]['id'])
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), add_counter - remove_counter)
def test_aggregate_metadata_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result['id'], metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_retry(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
def counted():
def get_query(context, id, session, read_deleted):
get_query.counter += 1
raise db_exc.DBDuplicateEntry
get_query.counter = 0
return get_query
get_query = counted()
self.stubs.Set(sqlalchemy_api,
'_aggregate_metadata_get_query', get_query)
self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api.
aggregate_metadata_add, ctxt, result['id'], {},
max_retries=5)
self.assertEqual(get_query.counter, 5)
def test_aggregate_metadata_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = metadata.keys()[0]
db.aggregate_metadata_delete(ctxt, result['id'], key)
new_metadata = {key: 'foo'}
db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
metadata[key] = 'foo'
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result['id'], metadata)
db.aggregate_metadata_delete(ctxt, result['id'], metadata.keys()[0])
expected = db.aggregate_metadata_get(ctxt, result['id'])
del metadata[metadata.keys()[0]]
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_remove_availability_zone(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone')
expected = db.aggregate_metadata_get(ctxt, result['id'])
aggregate = db.aggregate_get(ctxt, result['id'])
self.assertEquals(aggregate['availability_zone'], None)
self.assertThat({}, matchers.DictMatches(expected))
def test_aggregate_metadata_delete_raise_not_found(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateMetadataNotFound,
db.aggregate_metadata_delete,
ctxt, result['id'], 'foo_key')
def test_aggregate_host_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected)
def test_aggregate_host_re_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
host = _get_fake_aggr_hosts()[0]
db.aggregate_host_delete(ctxt, result['id'], host)
db.aggregate_host_add(ctxt, result['id'], host)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(len(expected), 1)
def test_aggregate_host_add_duplicate_works(self):
ctxt = context.get_admin_context()
r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
r2 = _create_aggregate_with_hosts(ctxt,
values={'name': 'fake_aggregate2'},
metadata={'availability_zone': 'fake_avail_zone2'})
h1 = db.aggregate_host_get_all(ctxt, r1['id'])
h2 = db.aggregate_host_get_all(ctxt, r2['id'])
self.assertEqual(h1, h2)
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
self.assertRaises(exception.AggregateHostExists,
db.aggregate_host_add,
ctxt, result['id'], _get_fake_aggr_hosts()[0])
def test_aggregate_host_add_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
host = _get_fake_aggr_hosts()[0]
self.assertRaises(exception.AggregateNotFound,
db.aggregate_host_add,
ctxt, aggregate_id, host)
def test_aggregate_host_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
db.aggregate_host_delete(ctxt, result['id'],
_get_fake_aggr_hosts()[0])
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(0, len(expected))
def test_aggregate_host_delete_raise_not_found(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
db.aggregate_host_delete,
ctxt, result['id'], _get_fake_aggr_hosts()[0])
class SqlAlchemyDbApiTestCase(DbTestCase):
def test_instance_get_all_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
self.assertEqual(2, len(result))
def test_instance_get_all_uuids_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
self.assertEqual(2, len(result))
self.assertEqual(types.UnicodeType, type(result[0]))
class MigrationTestCase(test.TestCase):
def setUp(self):
super(MigrationTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self._create()
self._create()
self._create(status='reverted')
self._create(status='confirmed')
self._create(source_compute='host2', source_node='b',
dest_compute='host1', dest_node='a')
self._create(source_compute='host2', dest_compute='host3')
self._create(source_compute='host3', dest_compute='host4')
def _create(self, status='migrating', source_compute='host1',
source_node='a', dest_compute='host2', dest_node='b',
system_metadata=None):
values = {'host': source_compute}
instance = db.instance_create(self.ctxt, values)
if system_metadata:
db.instance_system_metadata_update(self.ctxt, instance['uuid'],
system_metadata, False)
values = {'status': status, 'source_compute': source_compute,
'source_node': source_node, 'dest_compute': dest_compute,
'dest_node': dest_node, 'instance_uuid': instance['uuid']}
db.migration_create(self.ctxt, values)
def _assert_in_progress(self, migrations):
for migration in migrations:
self.assertNotEqual('confirmed', migration['status'])
self.assertNotEqual('reverted', migration['status'])
def test_migration_get_in_progress_joins(self):
self._create(source_compute='foo', system_metadata={'foo': 'bar'})
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'foo', 'a')
system_metadata = migrations[0]['instance']['system_metadata'][0]
self.assertEqual(system_metadata['key'], 'foo')
self.assertEqual(system_metadata['value'], 'bar')
def test_in_progress_host1_nodea(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'a')
# 2 as source + 1 as dest
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_in_progress_host1_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'b')
# some migrations are to/from host1, but none with a node 'b'
self.assertEqual(0, len(migrations))
def test_in_progress_host2_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
# 2 as dest, 1 as source
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_instance_join(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
for migration in migrations:
instance = migration['instance']
self.assertEqual(migration['instance_uuid'], instance['uuid'])
def test_get_migrations_by_filters(self):
filters = {"status": "migrating", "host": "host3"}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
for migration in migrations:
self.assertEqual(filters["status"], migration['status'])
hosts = [migration['source_compute'], migration['dest_compute']]
self.assertIn(filters["host"], hosts)
def test_only_admin_can_get_all_migrations_by_filters(self):
user_ctxt = context.RequestContext(user_id=None, project_id=None,
is_admin=False, read_deleted="no",
overwrite=False)
self.assertRaises(exception.AdminRequired,
db.migration_get_all_by_filters, user_ctxt, {})
def test_migration_get_unconfirmed_by_dest_compute(self):
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(0, len(results))
updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
# Ensure different host is not returned
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure one migration older than 10 seconds is returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(1, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
# Ensure the new migration is not returned.
updated_at = timeutils.utcnow()
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
"fake_host2")
self.assertEqual(0, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
def test_migration_update_not_found(self):
self.assertRaises(exception.MigrationNotFound,
db.migration_update, self.ctxt, 42, {})
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return dict([(k, v) for k, v in obj.iteritems()
if k not in ignored_keys])
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
obj2 = self._dict_from_object(obj2, ignored_keys)
self.assertEqual(len(obj1),
len(obj2),
"Keys mismatch: %s" %
str(set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.iteritems():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
for primitive in primitives1:
self.assertIn(primitive, primitives2)
for primitive in primitives2:
self.assertIn(primitive, primitives1)
class InstanceSystemMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_system_metadata_* methods."""
def setUp(self):
super(InstanceSystemMetadataTestCase, self).setUp()
values = {'host': 'h1', 'project_id': 'p1',
'system_metadata': {'key': 'value'}}
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, values)
def test_instance_system_metadata_get(self):
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value'})
def test_instance_system_metadata_update_new_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
def test_instance_system_metadata_update_existent_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'new_value'})
def test_instance_system_metadata_update_delete_true(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
@test.testtools.skip("bug 1189462")
def test_instance_system_metadata_update_nonexistent(self):
self.assertRaises(exception.InstanceNotFound,
db.instance_system_metadata_update,
self.ctxt, 'nonexistent-uuid',
{'key': 'value'}, True)
class ReservationTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.reservation_* methods."""
def setUp(self):
super(ReservationTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = {'uuid': 'sample-uuid',
'project_id': 'project1',
'user_id': 'user1',
'resource': 'resource',
'delta': 42,
'expire': timeutils.utcnow() + datetime.timedelta(days=1),
'usage': {'id': 1}}
def test_reservation_create(self):
reservation = db.reservation_create(self.ctxt, **self.values)
self._assertEqualObjects(self.values, reservation, ignored_keys=(
'deleted', 'updated_at',
'deleted_at', 'id',
'created_at', 'usage',
'usage_id'))
self.assertEqual(reservation['usage_id'], self.values['usage']['id'])
def test_reservation_get(self):
reservation = db.reservation_create(self.ctxt, **self.values)
reservation_db = db.reservation_get(self.ctxt, self.values['uuid'])
self._assertEqualObjects(reservation, reservation_db)
def test_reservation_get_nonexistent(self):
self.assertRaises(exception.ReservationNotFound, db.reservation_get,
self.ctxt, 'non-exitent-resevation-uuid')
def test_reservation_commit(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 1, 'in_use': 1},
'fixed_ips': {'reserved': 2, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
db.reservation_get(self.ctxt, reservations[0])
db.reservation_commit(self.ctxt, reservations, 'project1', 'user1')
self.assertRaises(exception.ReservationNotFound,
db.reservation_get, self.ctxt, reservations[0])
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 2},
'fixed_ips': {'reserved': 0, 'in_use': 4}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
def test_reservation_rollback(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 1, 'in_use': 1},
'fixed_ips': {'reserved': 2, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
db.reservation_get(self.ctxt, reservations[0])
db.reservation_rollback(self.ctxt, reservations, 'project1', 'user1')
self.assertRaises(exception.ReservationNotFound,
db.reservation_get, self.ctxt, reservations[0])
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 1},
'fixed_ips': {'reserved': 0, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
def test_reservation_expire(self):
self.values['expire'] = timeutils.utcnow() + datetime.timedelta(days=1)
_quota_reserve(self.ctxt, 'project1', 'user1')
db.reservation_expire(self.ctxt)
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 1},
'fixed_ips': {'reserved': 0, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _get_base_rule_values(self):
return {
'protocol': "tcp",
'from_port': 80,
'to_port': 8080,
'cidr': None,
'deleted': 0,
'deleted_at': None,
'grantee_group': None,
'updated_at': None
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def _create_security_group_rule(self, values):
v = self._get_base_rule_values()
v.update(values)
return db.security_group_rule_create(self.ctxt, v)
def test_security_group_rule_create(self):
security_group_rule = self._create_security_group_rule({})
self.assertIsNotNone(security_group_rule['id'])
for key, value in self._get_base_rule_values().items():
self.assertEqual(value, security_group_rule[key])
def test_security_group_rule_get_by_security_group(self):
security_group = self._create_security_group({})
security_group_rule = self._create_security_group_rule(
{'parent_group': security_group})
security_group_rule1 = self._create_security_group_rule(
{'parent_group': security_group})
found_rules = db.security_group_rule_get_by_security_group(self.ctxt,
security_group['id'])
self.assertEqual(len(found_rules), 2)
rules_ids = [security_group_rule['id'], security_group_rule1['id']]
for rule in found_rules:
self.assertIn(rule['id'], rules_ids)
def test_security_group_rule_get_by_security_group_grantee(self):
security_group = self._create_security_group({})
security_group_rule = self._create_security_group_rule(
{'grantee_group': security_group})
rules = db.security_group_rule_get_by_security_group_grantee(self.ctxt,
security_group['id'])
self.assertEqual(len(rules), 1)
self.assertEqual(rules[0]['id'], security_group_rule['id'])
def test_security_group_rule_destroy(self):
security_group1 = self._create_security_group({'name': 'fake1'})
security_group2 = self._create_security_group({'name': 'fake2'})
security_group_rule1 = self._create_security_group_rule({})
security_group_rule2 = self._create_security_group_rule({})
db.security_group_rule_destroy(self.ctxt, security_group_rule1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get,
self.ctxt, security_group_rule1['id'])
self._assertEqualObjects(db.security_group_rule_get(self.ctxt,
security_group_rule2['id']),
security_group_rule2, ['grantee_group'])
def test_security_group_rule_destroy_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_destroy, self.ctxt, 100500)
def test_security_group_rule_get(self):
security_group_rule1 = (
self._create_security_group_rule({}))
security_group_rule2 = self._create_security_group_rule({})
real_security_group_rule = db.security_group_rule_get(self.ctxt,
security_group_rule1['id'])
self._assertEqualObjects(security_group_rule1,
real_security_group_rule, ['grantee_group'])
def test_security_group_rule_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get, self.ctxt, 100500)
def test_security_group_rule_count_by_group(self):
sg1 = self._create_security_group({'name': 'fake1'})
sg2 = self._create_security_group({'name': 'fake2'})
rules_by_group = {sg1: [], sg2: []}
for group in rules_by_group:
rules = rules_by_group[group]
for i in range(0, 10):
rules.append(
self._create_security_group_rule({'parent_group_id':
group['id']}))
db.security_group_rule_destroy(self.ctxt,
rules_by_group[sg1][0]['id'])
counted_groups = [db.security_group_rule_count_by_group(self.ctxt,
group['id'])
for group in [sg1, sg2]]
expected = [9, 10]
self.assertEqual(counted_groups, expected)
class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def test_security_group_create(self):
security_group = self._create_security_group({})
self.assertFalse(security_group['id'] is None)
for key, value in self._get_base_values().iteritems():
self.assertEqual(value, security_group[key])
def test_security_group_destroy(self):
security_group1 = self._create_security_group({})
security_group2 = \
self._create_security_group({'name': 'fake_sec_group2'})
db.security_group_destroy(self.ctxt, security_group1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get,
self.ctxt, security_group1['id'])
self._assertEqualObjects(db.security_group_get(
self.ctxt, security_group2['id'],
columns_to_join=['instances']), security_group2)
def test_security_group_get(self):
security_group1 = self._create_security_group({})
self._create_security_group({'name': 'fake_sec_group2'})
real_security_group = db.security_group_get(self.ctxt,
security_group1['id'],
columns_to_join=['instances'])
self._assertEqualObjects(security_group1,
real_security_group)
def test_security_group_get_no_instances(self):
instance = db.instance_create(self.ctxt, {})
sid = self._create_security_group({'instances': [instance]})['id']
session = get_session()
self.mox.StubOutWithMock(sqlalchemy_api, 'get_session')
sqlalchemy_api.get_session().AndReturn(session)
sqlalchemy_api.get_session().AndReturn(session)
self.mox.ReplayAll()
security_group = db.security_group_get(self.ctxt, sid,
columns_to_join=['instances'])
session.expunge(security_group)
self.assertEqual(1, len(security_group['instances']))
security_group = db.security_group_get(self.ctxt, sid)
session.expunge(security_group)
self.assertRaises(sqlalchemy_orm_exc.DetachedInstanceError,
getattr, security_group, 'instances')
def test_security_group_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get, self.ctxt, 100500)
def test_security_group_get_by_name(self):
security_group1 = self._create_security_group({'name': 'fake1'})
security_group2 = self._create_security_group({'name': 'fake2'})
real_security_group1 = db.security_group_get_by_name(
self.ctxt,
security_group1['project_id'],
security_group1['name'])
real_security_group2 = db.security_group_get_by_name(
self.ctxt,
security_group2['project_id'],
security_group2['name'])
self._assertEqualObjects(security_group1, real_security_group1)
self._assertEqualObjects(security_group2, real_security_group2)
def test_security_group_get_by_project(self):
security_group1 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake2', 'project_id': 'fake_proj2'})
real1 = db.security_group_get_by_project(
self.ctxt,
security_group1['project_id'])
real2 = db.security_group_get_by_project(
self.ctxt,
security_group2['project_id'])
expected1, expected2 = [security_group1], [security_group2]
self._assertEqualListsOfObjects(expected1, real1,
ignored_keys=['instances'])
self._assertEqualListsOfObjects(expected2, real2,
ignored_keys=['instances'])
def test_security_group_get_by_instance(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'name': 'fake1', 'instances': [instance]},
{'name': 'fake2', 'instances': [instance]},
{'name': 'fake3', 'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_by_instance(self.ctxt,
instance['uuid'])
expected = security_groups[:2]
self._assertEqualListsOfObjects(expected, real,
ignored_keys=['instances'])
def test_security_group_get_all(self):
values = [
{'name': 'fake1', 'project_id': 'fake_proj1'},
{'name': 'fake2', 'project_id': 'fake_proj2'},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_all(self.ctxt)
self._assertEqualListsOfObjects(security_groups, real,
ignored_keys=['instances'])
def test_security_group_in_use(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'instances': [instance],
'name': 'fake_in_use'},
{'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = []
for security_group in security_groups:
in_use = db.security_group_in_use(self.ctxt,
security_group['id'])
real.append(in_use)
expected = [True, False]
self.assertEquals(expected, real)
def test_security_group_ensure_default(self):
self.assertEquals(0, len(db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)))
db.security_group_ensure_default(self.ctxt)
security_groups = db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)
self.assertEquals(1, len(security_groups))
self.assertEquals("default", security_groups[0]["name"])
def test_security_group_update(self):
security_group = self._create_security_group({})
new_values = {
'name': 'sec_group1',
'description': 'sec_group_descr1',
'user_id': 'fake_user1',
'project_id': 'fake_proj1',
}
updated_group = db.security_group_update(self.ctxt,
security_group['id'],
new_values,
columns_to_join=['rules.grantee_group'])
for key, value in new_values.iteritems():
self.assertEqual(updated_group[key], value)
self.assertEqual(updated_group['rules'], [])
def test_security_group_update_to_duplicate(self):
security_group1 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj2'})
self.assertRaises(exception.SecurityGroupExists,
db.security_group_update,
self.ctxt, security_group2['id'],
{'project_id': 'fake_proj1'})
class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.instance_* methods."""
sample_data = {
'project_id': 'project1',
'hostname': 'example.com',
'host': 'h1',
'node': 'n1',
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'ckey': 'cvalue'},
}
def setUp(self):
super(InstanceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _assertEqualInstances(self, instance1, instance2):
self._assertEqualObjects(instance1, instance2,
ignored_keys=['metadata', 'system_metadata', 'info_cache'])
def _assertEqualListsOfInstances(self, list1, list2):
self._assertEqualListsOfObjects(list1, list2,
ignored_keys=['metadata', 'system_metadata', 'info_cache'])
def create_instance_with_args(self, **kwargs):
if 'context' in kwargs:
context = kwargs.pop('context')
else:
context = self.ctxt
args = self.sample_data.copy()
args.update(kwargs)
return db.instance_create(context, args)
def test_instance_create(self):
instance = self.create_instance_with_args()
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
def test_instance_create_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_no_metadata_clobber(self):
meta = {'foo': 'bar'}
sys_meta = {'sfoo': 'sbar'}
values = {
'metadata': meta,
'system_metadata': sys_meta,
}
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual({'foo': 'bar'}, meta)
self.assertEqual({'sfoo': 'sbar'}, sys_meta)
def test_instance_get_all_with_meta(self):
inst = self.create_instance_with_args()
for inst in db.instance_get_all(self.ctxt):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_update(self):
instance = self.create_instance_with_args()
metadata = {'host': 'bar', 'key2': 'wuff'}
system_metadata = {'original_image_ref': 'baz'}
# Update the metadata
db.instance_update(self.ctxt, instance['uuid'], {'metadata': metadata,
'system_metadata': system_metadata})
# Retrieve the user-provided metadata to ensure it was successfully
# updated
self.assertEqual(metadata,
db.instance_metadata_get(self.ctxt, instance['uuid']))
self.assertEqual(system_metadata,
db.instance_system_metadata_get(self.ctxt, instance['uuid']))
def test_instance_update_bad_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '123'}
self.assertRaises(ValueError,
db.instance_update,
self.ctxt, instance['uuid'], values)
def test_instance_update_good_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '2011-01-31T00:00:00.0'}
actual = db.instance_update(self.ctxt, instance['uuid'], values)
expected = datetime.datetime(2011, 1, 31)
self.assertEquals(expected, actual["created_at"])
def test_create_instance_unique_hostname(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
self.create_instance_with_args(hostname='h1', project_id='p1')
# With scope 'global' any duplicate should fail, be it this project:
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p3')
# or another:
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context2,
hostname='h1', project_id='p2')
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p1')
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
self.create_instance_with_args(context=context2, hostname='h2')
self.flags(osapi_compute_unique_server_name_scope=None)
def test_instance_get_all_by_filters_with_meta(self):
inst = self.create_instance_with_args()
for inst in db.instance_get_all_by_filters(self.ctxt, {}):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_without_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt, {},
columns_to_join=[])
for inst in result:
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_all_by_filters(self):
instances = [self.create_instance_with_args() for i in range(3)]
filtered_instances = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfInstances(instances, filtered_instances)
def test_instance_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
meta = sqlalchemy_api._instance_metadata_get_multi(self.ctxt, uuids)
for row in meta:
self.assertTrue(row['instance_uuid'] in uuids)
def test_instance_metadata_get_multi_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
def test_instance_system_system_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
sys_meta = sqlalchemy_api._instance_system_metadata_get_multi(
self.ctxt, uuids)
for row in sys_meta:
self.assertTrue(row['instance_uuid'] in uuids)
def test_instance_system_metadata_get_multi_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
sqlalchemy_api._instance_system_metadata_get_multi(self.ctxt, [])
def test_instance_get_all_by_filters_regex(self):
i1 = self.create_instance_with_args(display_name='test1')
i2 = self.create_instance_with_args(display_name='teeeest2')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': 't.*st.'})
self._assertEqualListsOfInstances(result, [i1, i2])
def test_instance_get_all_by_filters_exact_match(self):
instance = self.create_instance_with_args(host='host1')
self.create_instance_with_args(host='host12')
result = db.instance_get_all_by_filters(self.ctxt,
{'host': 'host1'})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_metadata(self):
instance = self.create_instance_with_args(metadata={'foo': 'bar'})
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt,
{'metadata': {'foo': 'bar'}})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_system_metadata(self):
instance = self.create_instance_with_args(
system_metadata={'foo': 'bar'})
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_unicode_value(self):
instance = self.create_instance_with_args(display_name=u'test♥')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': u'test'})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_tags(self):
instance = self.create_instance_with_args(
metadata={'foo': 'bar'})
self.create_instance_with_args()
#For format 'tag-'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-key', 'value': 'foo'},
{'name': 'tag-value', 'value': 'bar'},
]})
self._assertEqualListsOfInstances([instance], result)
#For format 'tag:'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'bar'},
]})
self._assertEqualListsOfInstances([instance], result)
#For non-existent tag
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'barred'},
]})
self.assertEqual([], result)
#Confirm with deleted tags
db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo')
#For format 'tag-'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-key', 'value': 'foo'},
]})
self.assertEqual([], result)
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-value', 'value': 'bar'}
]})
self.assertEqual([], result)
#For format 'tag:'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'bar'},
]})
self.assertEqual([], result)
def test_instance_get_by_uuid(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'])
self._assertEqualInstances(inst, result)
def test_instance_get_by_uuid_join_empty(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=[])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_by_uuid_join_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=['metadata'])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_by_uuid_join_sys_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=['system_metadata'])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['metadata', 'system_metadata',
'deleted', 'deleted_at', 'info_cache',
'pci_devices'])
def test_instance_get_all_by_filters_deleted_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': True})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['metadata', 'system_metadata',
'deleted', 'deleted_at', 'info_cache',
'pci_devices'])
def test_instance_get_all_by_filters_deleted_no_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': True,
'soft_deleted': False})
self._assertEqualListsOfObjects([inst1], result,
ignored_keys=['deleted', 'deleted_at', 'metadata',
'system_metadata', 'info_cache', 'pci_devices'])
def test_instance_get_all_by_filters_alive_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': False,
'soft_deleted': True})
self._assertEqualListsOfInstances([inst2, inst3], result)
def test_instance_get_all_by_filters_cleaned(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_update(self.ctxt, inst1['uuid'], {'cleaned': 1})
result = db.instance_get_all_by_filters(self.ctxt, {})
self.assertEqual(2, len(result))
self.assertIn(inst1['uuid'], [result[0]['uuid'], result[1]['uuid']])
self.assertIn(inst2['uuid'], [result[0]['uuid'], result[1]['uuid']])
if inst1['uuid'] == result[0]['uuid']:
self.assertTrue(result[0]['cleaned'])
self.assertFalse(result[1]['cleaned'])
else:
self.assertTrue(result[1]['cleaned'])
self.assertFalse(result[0]['cleaned'])
def test_instance_get_all_by_host_and_node_no_join(self):
instance = self.create_instance_with_args()
result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1')
self.assertEqual(result[0]['uuid'], instance['uuid'])
self.assertEqual(result[0]['system_metadata'], [])
def test_instance_get_all_hung_in_rebooting(self):
# Ensure no instances are returned.
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self.assertEqual([], results)
# Ensure one rebooting instance with updated_at older than 10 seconds
# is returned.
instance = self.create_instance_with_args(task_state="rebooting",
updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0))
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self._assertEqualListsOfObjects([instance], results,
ignored_keys=['task_state', 'info_cache', 'security_groups',
'metadata', 'system_metadata', 'pci_devices'])
db.instance_update(self.ctxt, instance['uuid'], {"task_state": None})
# Ensure the newly rebooted instance is not returned.
instance = self.create_instance_with_args(task_state="rebooting",
updated_at=timeutils.utcnow())
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self.assertEqual([], results)
def test_instance_update_with_expected_vm_state(self):
instance = self.create_instance_with_args(vm_state='foo')
db.instance_update(self.ctxt, instance['uuid'], {'host': 'h1',
'expected_vm_state': ('foo', 'bar')})
def test_instance_update_with_unexpected_vm_state(self):
instance = self.create_instance_with_args(vm_state='foo')
self.assertRaises(exception.UnexpectedVMStateError,
db.instance_update, self.ctxt, instance['uuid'],
{'host': 'h1', 'expected_vm_state': ('spam', 'bar')})
def test_instance_update_with_instance_uuid(self):
# test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
# Update the metadata
values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
'system_metadata': {'original_image_ref': 'baz'}}
db.instance_update(ctxt, instance['uuid'], values)
# Retrieve the user-provided metadata to ensure it was successfully
# updated
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('bar', instance_meta['host'])
self.assertEqual('wuff', instance_meta['key2'])
self.assertNotIn('key1', instance_meta)
# Retrieve the system metadata to ensure it was successfully updated
system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
self.assertEqual('baz', system_meta['original_image_ref'])
def test_delete_instance_metadata_on_instance_destroy(self):
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('foo', instance_meta['host'])
self.assertEqual('meow', instance_meta['key1'])
db.instance_destroy(ctxt, instance['uuid'])
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
# Make sure instance metadata is deleted as well
self.assertEqual({}, instance_meta)
def test_instance_update_with_and_get_original(self):
instance = self.create_instance_with_args(vm_state='building')
(old_ref, new_ref) = db.instance_update_and_get_original(self.ctxt,
instance['uuid'], {'vm_state': 'needscoffee'})
self.assertEqual('building', old_ref['vm_state'])
self.assertEqual('needscoffee', new_ref['vm_state'])
def test_instance_update_and_get_original_metadata(self):
instance = self.create_instance_with_args()
columns_to_join = ['metadata']
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'vm_state': 'needscoffee'},
columns_to_join=columns_to_join)
meta = utils.metadata_to_dict(new_ref['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(new_ref['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_update_and_get_original_metadata_none_join(self):
instance = self.create_instance_with_args()
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
meta = utils.metadata_to_dict(new_ref['metadata'])
self.assertEqual(meta, {'mk1': 'mv3'})
def test_instance_update_unique_name(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
inst1 = self.create_instance_with_args(context=context1,
project_id='p1',
hostname='fake_name1')
inst2 = self.create_instance_with_args(context=context1,
project_id='p1',
hostname='fake_name2')
inst3 = self.create_instance_with_args(context=context2,
project_id='p2',
hostname='fake_name3')
# osapi_compute_unique_server_name_scope is unset so this should work:
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name2'})
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name1'})
# With scope 'global' any duplicate should fail.
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
db.instance_update,
context1,
inst2['uuid'],
{'hostname': 'fake_name1'})
self.assertRaises(exception.InstanceExists,
db.instance_update,
context2,
inst3['uuid'],
{'hostname': 'fake_name1'})
# But we should definitely be able to update our name if we aren't
# really changing it.
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_NAME'})
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists, db.instance_update,
context1, inst2['uuid'], {'hostname': 'fake_NAME'})
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
db.instance_update(context2, inst3['uuid'], {'hostname': 'fake_NAME'})
def _test_instance_update_updates_metadata(self, metadata_type):
instance = self.create_instance_with_args()
def set_and_check(meta):
inst = db.instance_update(self.ctxt, instance['uuid'],
{metadata_type: dict(meta)})
_meta = utils.metadata_to_dict(inst[metadata_type])
self.assertEqual(meta, _meta)
meta = {'speed': '88', 'units': 'MPH'}
set_and_check(meta)
meta['gigawatts'] = '1.21'
set_and_check(meta)
del meta['gigawatts']
set_and_check(meta)
def test_security_group_in_use(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'instances': [instance]},
{'instances': []},
]
def test_instance_update_updates_system_metadata(self):
# Ensure that system_metadata is updated during instance_update
self._test_instance_update_updates_metadata('system_metadata')
def test_instance_update_updates_metadata(self):
# Ensure that metadata is updated during instance_update
self._test_instance_update_updates_metadata('metadata')
def test_instance_floating_address_get_all(self):
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'})
instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'})
fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3']
instance_uuids = [instance1['uuid'], instance1['uuid'],
instance2['uuid']]
for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses,
float_addresses,
instance_uuids):
db.fixed_ip_create(ctxt, {'address': fixed_addr,
'instance_uuid': instance_uuid})
fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id']
db.floating_ip_create(ctxt,
{'address': float_addr,
'fixed_ip_id': fixed_id})
real_float_addresses = \
db.instance_floating_address_get_all(ctxt, instance_uuids[0])
self.assertEqual(set(float_addresses[:2]), set(real_float_addresses))
real_float_addresses = \
db.instance_floating_address_get_all(ctxt, instance_uuids[2])
self.assertEqual(set([float_addresses[2]]), set(real_float_addresses))
def test_instance_stringified_ips(self):
instance = self.create_instance_with_args()
instance = db.instance_update(
self.ctxt, instance['uuid'],
{'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1')})
self.assertTrue(isinstance(instance['access_ip_v4'], basestring))
self.assertTrue(isinstance(instance['access_ip_v6'], basestring))
instance = db.instance_get_by_uuid(self.ctxt, instance['uuid'])
self.assertTrue(isinstance(instance['access_ip_v4'], basestring))
self.assertTrue(isinstance(instance['access_ip_v6'], basestring))
class InstanceMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_metadata_* methods."""
def setUp(self):
super(InstanceMetadataTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_instance_metadata_get(self):
instance = db.instance_create(self.ctxt, {'metadata':
{'key': 'value'}})
self.assertEqual({'key': 'value'}, db.instance_metadata_get(
self.ctxt, instance['uuid']))
def test_instance_metadata_delete(self):
instance = db.instance_create(self.ctxt,
{'metadata': {'key': 'val',
'key1': 'val1'}})
db.instance_metadata_delete(self.ctxt, instance['uuid'], 'key1')
self.assertEqual({'key': 'val'}, db.instance_metadata_get(
self.ctxt, instance['uuid']))
def test_instance_metadata_update(self):
instance = db.instance_create(self.ctxt, {'host': 'h1',
'project_id': 'p1', 'metadata': {'key': 'value'}})
# This should add new key/value pair
metadata = db.instance_metadata_update(
self.ctxt, instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
# This should leave only one key/value pair
metadata = db.instance_metadata_update(
self.ctxt, instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ServiceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'host': 'fake_host',
'binary': 'fake_binary',
'topic': 'fake_topic',
'report_count': 3,
'disabled': False
}
def _create_service(self, values):
v = self._get_base_values()
v.update(values)
return db.service_create(self.ctxt, v)
def test_service_create(self):
service = self._create_service({})
self.assertFalse(service['id'] is None)
for key, value in self._get_base_values().iteritems():
self.assertEqual(value, service[key])
def test_service_destroy(self):
service1 = self._create_service({})
service2 = self._create_service({'host': 'fake_host2'})
db.service_destroy(self.ctxt, service1['id'])
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, service1['id'])
self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
service2, ignored_keys=['compute_node'])
def test_service_update(self):
service = self._create_service({})
new_values = {
'host': 'fake_host1',
'binary': 'fake_binary1',
'topic': 'fake_topic1',
'report_count': 4,
'disabled': True
}
db.service_update(self.ctxt, service['id'], new_values)
updated_service = db.service_get(self.ctxt, service['id'])
for key, value in new_values.iteritems():
self.assertEqual(value, updated_service[key])
def test_service_update_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_update, self.ctxt, 100500, {})
def test_service_get(self):
service1 = self._create_service({})
self._create_service({'host': 'some_other_fake_host'})
real_service1 = db.service_get(self.ctxt, service1['id'])
self._assertEqualObjects(service1, real_service1,
ignored_keys=['compute_node'])
def test_service_get_with_compute_node(self):
service = self._create_service({})
compute_values = dict(vcpus=2, memory_mb=1024, local_gb=2048,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=service['id'])
compute = db.compute_node_create(self.ctxt, compute_values)
real_service = db.service_get(self.ctxt, service['id'])
real_compute = real_service['compute_node'][0]
self.assertEqual(compute['id'], real_compute['id'])
def test_service_get_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, 100500)
def test_service_get_by_host_and_topic(self):
service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
self._create_service({'host': 'host2', 'topic': 'topic2'})
real_service1 = db.service_get_by_host_and_topic(self.ctxt,
host='host1',
topic='topic1')
self._assertEqualObjects(service1, real_service1)
def test_service_get_all(self):
values = [
{'host': 'host1', 'topic': 'topic1'},
{'host': 'host2', 'topic': 'topic2'},
{'disabled': True}
]
services = [self._create_service(vals) for vals in values]
disabled_services = [services[-1]]
non_disabled_services = services[:-1]
compares = [
(services, db.service_get_all(self.ctxt)),
(disabled_services, db.service_get_all(self.ctxt, True)),
(non_disabled_services, db.service_get_all(self.ctxt, False))
]
for comp in compares:
self._assertEqualListsOfObjects(*comp)
def test_service_get_all_by_topic(self):
values = [
{'host': 'host1', 'topic': 't1'},
{'host': 'host2', 'topic': 't1'},
{'disabled': True, 'topic': 't1'},
{'host': 'host3', 'topic': 't2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_topic(self.ctxt, 't1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_by_host(self):
values = [
{'host': 'host1', 'topic': 't11', 'binary': 'b11'},
{'host': 'host1', 'topic': 't12', 'binary': 'b12'},
{'host': 'host2', 'topic': 't1'},
{'host': 'host3', 'topic': 't1'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_host(self.ctxt, 'host1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_by_compute_host(self):
values = [
{'host': 'host1', 'topic': CONF.compute_topic},
{'host': 'host2', 'topic': 't1'},
{'host': 'host3', 'topic': CONF.compute_topic}
]
services = [self._create_service(vals) for vals in values]
real_service = db.service_get_by_compute_host(self.ctxt, 'host1')
self._assertEqualObjects(services[0], real_service,
ignored_keys=['compute_node'])
self.assertRaises(exception.ComputeHostNotFound,
db.service_get_by_compute_host,
self.ctxt, 'non-exists-host')
def test_service_get_by_compute_host_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.service_get_by_compute_host,
self.ctxt, 'non-exists-host')
def test_service_get_by_args(self):
values = [
{'host': 'host1', 'binary': 'a'},
{'host': 'host2', 'binary': 'b'}
]
services = [self._create_service(vals) for vals in values]
service1 = db.service_get_by_args(self.ctxt, 'host1', 'a')
self._assertEqualObjects(services[0], service1)
service2 = db.service_get_by_args(self.ctxt, 'host2', 'b')
self._assertEqualObjects(services[1], service2)
def test_service_get_by_args_not_found_exception(self):
self.assertRaises(exception.HostBinaryNotFound,
db.service_get_by_args,
self.ctxt, 'non-exists-host', 'a')
def test_service_binary_exists_exception(self):
db.service_create(self.ctxt, self._get_base_values())
values = self._get_base_values()
values.update({'topic': 'top1'})
self.assertRaises(exception.ServiceBinaryExists, db.service_create,
self.ctxt, values)
def test_service_topic_exists_exceptions(self):
db.service_create(self.ctxt, self._get_base_values())
values = self._get_base_values()
values.update({'binary': 'bin1'})
self.assertRaises(exception.ServiceTopicExists, db.service_create,
self.ctxt, values)
class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(BaseInstanceTypeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.user_ctxt = context.RequestContext('user', 'user')
def _get_base_values(self):
return {
'name': 'fake_name',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': 'fake_flavor',
'swap': 0,
'rxtx_factor': 0.5,
'vcpu_weight': 1,
'disabled': False,
'is_public': True
}
def _create_inst_type(self, values):
v = self._get_base_values()
v.update(values)
return db.flavor_create(self.ctxt, v)
class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
IGNORED_FIELDS = [
'id',
'created_at',
'updated_at',
'deleted_at',
'deleted'
]
def setUp(self):
super(InstanceActionTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_action_values(self, uuid, action='run_instance', ctxt=None):
if ctxt is None:
ctxt = self.ctxt
return {
'action': action,
'instance_uuid': uuid,
'request_id': ctxt.request_id,
'user_id': ctxt.user_id,
'project_id': ctxt.project_id,
'start_time': timeutils.utcnow(),
'message': 'action-message'
}
def _create_event_values(self, uuid, event='schedule',
ctxt=None, extra=None):
if ctxt is None:
ctxt = self.ctxt
values = {
'event': event,
'instance_uuid': uuid,
'request_id': ctxt.request_id,
'start_time': timeutils.utcnow()
}
if extra is not None:
values.update(extra)
return values
def _assertActionSaved(self, action, uuid):
"""Retrieve the action to ensure it was successfully added."""
actions = db.actions_get(self.ctxt, uuid)
self.assertEqual(1, len(actions))
self._assertEqualObjects(action, actions[0])
def _assertActionEventSaved(self, event, action_id):
# Retrieve the event to ensure it was successfully added
events = db.action_events_get(self.ctxt, action_id)
self.assertEqual(1, len(events))
self._assertEqualObjects(event, events[0],
['instance_uuid', 'request_id'])
def test_instance_action_start(self):
"""Create an instance action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
ignored_keys = self.IGNORED_FIELDS + ['finish_time']
self._assertEqualObjects(action_values, action, ignored_keys)
self._assertActionSaved(action, uuid)
def test_instance_action_finish(self):
"""Create an instance action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
db.action_start(self.ctxt, action_values)
action_values['finish_time'] = timeutils.utcnow()
action = db.action_finish(self.ctxt, action_values)
self._assertEqualObjects(action_values, action, self.IGNORED_FIELDS)
self._assertActionSaved(action, uuid)
def test_instance_action_finish_without_started_event(self):
"""Create an instance finish action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action_values['finish_time'] = timeutils.utcnow()
self.assertRaises(exception.InstanceActionNotFound, db.action_finish,
self.ctxt, action_values)
def test_instance_actions_get_by_instance(self):
"""Ensure we can get actions by UUID."""
uuid1 = str(stdlib_uuid.uuid4())
expected = []
action_values = self._create_action_values(uuid1)
action = db.action_start(self.ctxt, action_values)
expected.append(action)
action_values['action'] = 'resize'
action = db.action_start(self.ctxt, action_values)
expected.append(action)
# Create some extra actions
uuid2 = str(stdlib_uuid.uuid4())
ctxt2 = context.get_admin_context()
action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
db.action_start(ctxt2, action_values)
db.action_start(ctxt2, action_values)
# Retrieve the action to ensure it was successfully added
actions = db.actions_get(self.ctxt, uuid1)
self._assertEqualListsOfObjects(expected, actions)
def test_instance_action_get_by_instance_and_action(self):
"""Ensure we can get an action by instance UUID and action id."""
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
uuid2 = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid1)
db.action_start(self.ctxt, action_values)
action_values['action'] = 'resize'
db.action_start(self.ctxt, action_values)
action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
db.action_start(ctxt2, action_values)
db.action_start(ctxt2, action_values)
actions = db.actions_get(self.ctxt, uuid1)
request_id = actions[0]['request_id']
action = db.action_get_by_request_id(self.ctxt, uuid1, request_id)
self.assertEqual('run_instance', action['action'])
self.assertEqual(self.ctxt.request_id, action['request_id'])
def test_instance_action_event_start(self):
"""Create an instance action event."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
event_values = self._create_event_values(uuid)
event = db.action_event_start(self.ctxt, event_values)
# self.fail(self._dict_from_object(event, None))
event_values['action_id'] = action['id']
ignored = self.IGNORED_FIELDS + ['finish_time', 'traceback', 'result']
self._assertEqualObjects(event_values, event, ignored)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_event_start_without_action(self):
"""Create an instance action event."""
uuid = str(stdlib_uuid.uuid4())
event_values = self._create_event_values(uuid)
self.assertRaises(exception.InstanceActionNotFound,
db.action_event_start, self.ctxt, event_values)
def test_instance_action_event_finish_without_started_event(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
db.action_start(self.ctxt, self._create_action_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
self.assertRaises(exception.InstanceActionEventNotFound,
db.action_event_finish, self.ctxt, event_values)
def test_instance_action_event_finish_without_action(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
self.assertRaises(exception.InstanceActionNotFound,
db.action_event_finish, self.ctxt, event_values)
def test_instance_action_event_finish_success(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
db.action_event_start(self.ctxt, self._create_event_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_finish(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertNotEqual('Error', action['message'])
def test_instance_action_event_finish_error(self):
"""Finish an instance action event with an error."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
db.action_event_start(self.ctxt, self._create_event_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Error'
}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_finish(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertEqual('Error', action['message'])
def test_instance_action_and_event_start_string_time(self):
"""Create an instance action and event with a string start_time."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
event_values = {'start_time': timeutils.strtime(timeutils.utcnow())}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_start(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_event_get_by_id(self):
"""Get a specific instance action event."""
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
uuid2 = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt,
self._create_action_values(uuid1))
db.action_start(ctxt2,
self._create_action_values(uuid2, 'reboot', ctxt2))
event = db.action_event_start(self.ctxt,
self._create_event_values(uuid1))
event_values = self._create_event_values(uuid2, 'reboot', ctxt2)
db.action_event_start(ctxt2, event_values)
# Retrieve the event to ensure it was successfully added
saved_event = db.action_event_get_by_id(self.ctxt,
action['id'],
event['id'])
self._assertEqualObjects(event, saved_event,
['instance_uuid', 'request_id'])
class InstanceFaultTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(InstanceFaultTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_fault_values(self, uuid, code=404):
return {
'message': 'message',
'details': 'detail',
'instance_uuid': uuid,
'code': code,
'host': 'localhost'
}
def test_instance_fault_create(self):
"""Ensure we can create an instance fault."""
uuid = str(stdlib_uuid.uuid4())
# Ensure no faults registered for this instance
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
self.assertEqual(0, len(faults[uuid]))
# Create a fault
fault_values = self._create_fault_values(uuid)
fault = db.instance_fault_create(self.ctxt, fault_values)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(fault_values, fault, ignored_keys)
# Retrieve the fault to ensure it was successfully added
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
self.assertEqual(1, len(faults[uuid]))
self._assertEqualObjects(fault, faults[uuid][0])
def test_instance_fault_get_by_instance(self):
"""Ensure we can retrieve faults for instance."""
uuids = [str(stdlib_uuid.uuid4()), str(stdlib_uuid.uuid4())]
fault_codes = [404, 500]
expected = {}
# Create faults
for uuid in uuids:
expected[uuid] = []
for code in fault_codes:
fault_values = self._create_fault_values(uuid, code)
fault = db.instance_fault_create(self.ctxt, fault_values)
expected[uuid].append(fault)
# Ensure faults are saved
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, uuids)
self.assertEqual(len(expected), len(faults))
for uuid in uuids:
self._assertEqualListsOfObjects(expected[uuid], faults[uuid])
def test_instance_faults_get_by_instance_uuids_no_faults(self):
uuid = str(stdlib_uuid.uuid4())
# None should be returned when no faults exist.
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
expected = {uuid: []}
self.assertEqual(expected, faults)
def test_instance_faults_get_by_instance_uuids_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [])
self.assertEqual({}, faults)
class InstanceTypeTestCase(BaseInstanceTypeTestCase):
def test_flavor_create(self):
inst_type = self._create_inst_type({})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'extra_specs']
self.assertFalse(inst_type['id'] is None)
self._assertEqualObjects(inst_type, self._get_base_values(),
ignored_keys)
def test_instance_type_destroy(self):
specs1 = {'a': '1', 'b': '2'}
inst_type1 = self._create_inst_type({'name': 'name1', 'flavorid': 'a1',
'extra_specs': specs1})
specs2 = {'c': '4', 'd': '3'}
inst_type2 = self._create_inst_type({'name': 'name2', 'flavorid': 'a2',
'extra_specs': specs2})
db.flavor_destroy(self.ctxt, 'name1')
self.assertRaises(exception.InstanceTypeNotFound,
db.flavor_get, self.ctxt, inst_type1['id'])
real_specs1 = db.flavor_extra_specs_get(self.ctxt,
inst_type1['flavorid'])
self._assertEqualObjects(real_specs1, {})
r_inst_type2 = db.flavor_get(self.ctxt, inst_type2['id'])
self._assertEqualObjects(inst_type2, r_inst_type2, 'extra_specs')
def test_instance_type_destroy_not_found(self):
self.assertRaises(exception.InstanceTypeNotFound,
db.flavor_destroy, self.ctxt, 'nonexists')
def test_flavor_create_duplicate_name(self):
self._create_inst_type({})
self.assertRaises(exception.InstanceTypeExists,
self._create_inst_type,
{'flavorid': 'some_random_flavor'})
def test_flavor_create_duplicate_flavorid(self):
self._create_inst_type({})
self.assertRaises(exception.InstanceTypeIdExists,
self._create_inst_type,
{'name': 'some_random_name'})
def test_flavor_create_with_extra_specs(self):
extra_specs = dict(a='abc', b='def', c='ghi')
inst_type = self._create_inst_type({'extra_specs': extra_specs})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'extra_specs']
self._assertEqualObjects(inst_type, self._get_base_values(),
ignored_keys)
self._assertEqualObjects(extra_specs, inst_type['extra_specs'])
def test_instance_type_get_all(self):
# NOTE(boris-42): Remove base instance types
for it in db.flavor_get_all(self.ctxt):
db.flavor_destroy(self.ctxt, it['name'])
instance_types = [
{'root_gb': 600, 'memory_mb': 100, 'disabled': True,
'is_public': True, 'name': 'a1', 'flavorid': 'f1'},
{'root_gb': 500, 'memory_mb': 200, 'disabled': True,
'is_public': True, 'name': 'a2', 'flavorid': 'f2'},
{'root_gb': 400, 'memory_mb': 300, 'disabled': False,
'is_public': True, 'name': 'a3', 'flavorid': 'f3'},
{'root_gb': 300, 'memory_mb': 400, 'disabled': False,
'is_public': False, 'name': 'a4', 'flavorid': 'f4'},
{'root_gb': 200, 'memory_mb': 500, 'disabled': True,
'is_public': False, 'name': 'a5', 'flavorid': 'f5'},
{'root_gb': 100, 'memory_mb': 600, 'disabled': True,
'is_public': False, 'name': 'a6', 'flavorid': 'f6'}
]
instance_types = [self._create_inst_type(it) for it in instance_types]
lambda_filters = {
'min_memory_mb': lambda it, v: it['memory_mb'] >= v,
'min_root_gb': lambda it, v: it['root_gb'] >= v,
'disabled': lambda it, v: it['disabled'] == v,
'is_public': lambda it, v: (v is None or it['is_public'] == v)
}
mem_filts = [{'min_memory_mb': x} for x in [100, 350, 550, 650]]
root_filts = [{'min_root_gb': x} for x in [100, 350, 550, 650]]
disabled_filts = [{'disabled': x} for x in [True, False]]
is_public_filts = [{'is_public': x} for x in [True, False, None]]
def assert_multi_filter_instance_type_get(filters=None):
if filters is None:
filters = {}
expected_it = instance_types
for name, value in filters.iteritems():
filt = lambda it: lambda_filters[name](it, value)
expected_it = filter(filt, expected_it)
real_it = db.flavor_get_all(self.ctxt, filters=filters)
self._assertEqualListsOfObjects(expected_it, real_it)
#no filter
assert_multi_filter_instance_type_get()
#test only with one filter
for filt in mem_filts:
assert_multi_filter_instance_type_get(filt)
for filt in root_filts:
assert_multi_filter_instance_type_get(filt)
for filt in disabled_filts:
assert_multi_filter_instance_type_get(filt)
for filt in is_public_filts:
assert_multi_filter_instance_type_get(filt)
#test all filters together
for mem in mem_filts:
for root in root_filts:
for disabled in disabled_filts:
for is_public in is_public_filts:
filts = [f.items() for f in
[mem, root, disabled, is_public]]
filts = dict(reduce(lambda x, y: x + y, filts, []))
assert_multi_filter_instance_type_get(filts)
def test_flavor_get_all_limit_sort(self):
def assert_sorted_by_key_dir(sort_key, asc=True):
sort_dir = 'asc' if asc else 'desc'
results = db.flavor_get_all(self.ctxt, sort_key='name',
sort_dir=sort_dir)
# Manually sort the results as we would expect them
expected_results = sorted(results,
key=lambda item: item['name'],
reverse=(not asc))
self.assertEqual(expected_results, results)
def assert_sorted_by_key_both_dir(sort_key):
assert_sorted_by_key_dir(sort_key, True)
assert_sorted_by_key_dir(sort_key, False)
for attr in ['memory_mb', 'root_gb', 'deleted_at', 'name', 'deleted',
'created_at', 'ephemeral_gb', 'updated_at', 'disabled',
'vcpus', 'swap', 'rxtx_factor', 'is_public', 'flavorid',
'vcpu_weight', 'id']:
assert_sorted_by_key_both_dir(attr)
def test_flavor_get_all_limit(self):
limited_flavors = db.flavor_get_all(self.ctxt, limit=2)
self.assertEqual(2, len(limited_flavors))
def test_flavor_get_all_list_marker(self):
all_flavors = db.flavor_get_all(self.ctxt)
# Set the 3rd result as the marker
marker_flavorid = all_flavors[2]['flavorid']
marked_flavors = db.flavor_get_all(self.ctxt, marker=marker_flavorid)
# We expect everything /after/ the 3rd result
expected_results = all_flavors[3:]
self.assertEqual(expected_results, marked_flavors)
def test_instance_type_get(self):
inst_types = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
inst_types = [self._create_inst_type(t) for t in inst_types]
for inst_type in inst_types:
inst_type_by_id = db.flavor_get(self.ctxt, inst_type['id'])
self._assertEqualObjects(inst_type, inst_type_by_id)
def test_instance_type_get_non_public(self):
inst_type = self._create_inst_type({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
inst_type_by_id = db.flavor_get(self.ctxt, inst_type['id'])
self._assertEqualObjects(inst_type, inst_type_by_id)
# Regular user can not
self.assertRaises(exception.InstanceTypeNotFound, db.flavor_get,
self.user_ctxt, inst_type['id'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, inst_type['flavorid'],
self.user_ctxt.project_id)
inst_type_by_id = db.flavor_get(self.user_ctxt, inst_type['id'])
self._assertEqualObjects(inst_type, inst_type_by_id)
def test_instance_type_get_by_name(self):
inst_types = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
inst_types = [self._create_inst_type(t) for t in inst_types]
for inst_type in inst_types:
inst_type_by_name = db.flavor_get_by_name(self.ctxt,
inst_type['name'])
self._assertEqualObjects(inst_type, inst_type_by_name)
def test_instance_type_get_by_name_not_found(self):
self._create_inst_type({})
self.assertRaises(exception.InstanceTypeNotFoundByName,
db.flavor_get_by_name, self.ctxt, 'nonexists')
def test_instance_type_get_by_name_non_public(self):
inst_type = self._create_inst_type({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
inst_type_by_name = db.flavor_get_by_name(self.ctxt,
inst_type['name'])
self._assertEqualObjects(inst_type, inst_type_by_name)
# Regular user can not
self.assertRaises(exception.InstanceTypeNotFoundByName,
db.flavor_get_by_name, self.user_ctxt,
inst_type['name'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, inst_type['flavorid'],
self.user_ctxt.project_id)
inst_type_by_name = db.flavor_get_by_name(self.user_ctxt,
inst_type['name'])
self._assertEqualObjects(inst_type, inst_type_by_name)
def test_instance_type_get_by_flavor_id(self):
inst_types = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
inst_types = [self._create_inst_type(t) for t in inst_types]
for inst_type in inst_types:
params = (self.ctxt, inst_type['flavorid'])
inst_type_by_flavorid = db.flavor_get_by_flavor_id(*params)
self._assertEqualObjects(inst_type, inst_type_by_flavorid)
def test_instance_type_get_by_flavor_not_found(self):
self._create_inst_type({})
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id,
self.ctxt, 'nonexists')
def test_instance_type_get_by_flavor_id_non_public(self):
inst_type = self._create_inst_type({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
inst_type_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
inst_type['flavorid'])
self._assertEqualObjects(inst_type, inst_type_by_fid)
# Regular user can not
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id, self.user_ctxt,
inst_type['flavorid'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, inst_type['flavorid'],
self.user_ctxt.project_id)
inst_type_by_fid = db.flavor_get_by_flavor_id(self.user_ctxt,
inst_type['flavorid'])
self._assertEqualObjects(inst_type, inst_type_by_fid)
def test_instance_type_get_by_flavor_id_deleted(self):
inst_type = self._create_inst_type({'name': 'abc', 'flavorid': '123'})
db.flavor_destroy(self.ctxt, 'abc')
inst_type_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
inst_type['flavorid'], read_deleted='yes')
self.assertEqual(inst_type['id'], inst_type_by_fid['id'])
class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase):
def setUp(self):
super(InstanceTypeExtraSpecsTestCase, self).setUp()
values = ({'name': 'n1', 'flavorid': 'f1',
'extra_specs': dict(a='a', b='b', c='c')},
{'name': 'n2', 'flavorid': 'f2',
'extra_specs': dict(d='d', e='e', f='f')})
# NOTE(boris-42): We have already tested flavor_create method
# with extra_specs in InstanceTypeTestCase.
self.inst_types = [self._create_inst_type(v) for v in values]
def test_instance_type_extra_specs_get(self):
for it in self.inst_types:
real_specs = db.flavor_extra_specs_get(self.ctxt,
it['flavorid'])
self._assertEqualObjects(it['extra_specs'], real_specs)
def test_instance_type_extra_specs_get_item(self):
expected = dict(f1=dict(a='a', b='b', c='c'),
f2=dict(d='d', e='e', f='f'))
for flavor, specs in expected.iteritems():
for key, val in specs.iteritems():
spec = db.flavor_extra_specs_get_item(self.ctxt, flavor,
key)
self.assertEqual(spec[key], val)
def test_instance_type_extra_specs_delete(self):
for it in self.inst_types:
specs = it['extra_specs']
key = specs.keys()[0]
del specs[key]
db.flavor_extra_specs_delete(self.ctxt, it['flavorid'], key)
real_specs = db.flavor_extra_specs_get(self.ctxt,
it['flavorid'])
self._assertEqualObjects(it['extra_specs'], real_specs)
def test_instance_type_extra_specs_delete_failed(self):
for it in self.inst_types:
self.assertRaises(exception.InstanceTypeExtraSpecsNotFound,
db.flavor_extra_specs_delete,
self.ctxt, it['flavorid'], 'dummy')
def test_instance_type_extra_specs_update_or_create(self):
for it in self.inst_types:
current_specs = it['extra_specs']
current_specs.update(dict(b='b1', c='c1', d='d1', e='e1'))
params = (self.ctxt, it['flavorid'], current_specs)
db.flavor_extra_specs_update_or_create(*params)
real_specs = db.flavor_extra_specs_get(self.ctxt,
it['flavorid'])
self._assertEqualObjects(current_specs, real_specs)
def test_instance_type_extra_specs_update_or_create_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_extra_specs_update_or_create,
self.ctxt, 'nonexists', {})
def test_instance_type_extra_specs_update_or_create_retry(self):
def counted():
def get_id(context, flavorid, session):
get_id.counter += 1
raise db_exc.DBDuplicateEntry
get_id.counter = 0
return get_id
get_id = counted()
self.stubs.Set(sqlalchemy_api,
'_instance_type_get_id_from_flavor', get_id)
self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api.
flavor_extra_specs_update_or_create,
self.ctxt, 1, {}, 5)
self.assertEqual(get_id.counter, 5)
class InstanceTypeAccessTestCase(BaseInstanceTypeTestCase):
def _create_inst_type_access(self, instance_type_id, project_id):
return db.flavor_access_add(self.ctxt, instance_type_id,
project_id)
def test_instance_type_access_get_by_flavor_id(self):
inst_types = ({'name': 'n1', 'flavorid': 'f1'},
{'name': 'n2', 'flavorid': 'f2'})
it1, it2 = tuple((self._create_inst_type(v) for v in inst_types))
access_it1 = [self._create_inst_type_access(it1['flavorid'], 'pr1'),
self._create_inst_type_access(it1['flavorid'], 'pr2')]
access_it2 = [self._create_inst_type_access(it2['flavorid'], 'pr1')]
for it, access_it in zip((it1, it2), (access_it1, access_it2)):
params = (self.ctxt, it['flavorid'])
real_access_it = db.flavor_access_get_by_flavor_id(*params)
self._assertEqualListsOfObjects(access_it, real_access_it)
def test_instance_type_access_get_by_flavor_id_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id,
self.ctxt, 'nonexists')
def test_instance_type_access_add(self):
inst_type = self._create_inst_type({'flavorid': 'f1'})
project_id = 'p1'
access = self._create_inst_type_access(inst_type['flavorid'],
project_id)
# NOTE(boris-42): Check that instance_type_access_add doesn't fail and
# returns correct value. This is enough because other
# logic is checked by other methods.
self.assertFalse(access['id'] is None)
self.assertEqual(access['instance_type_id'], inst_type['id'])
self.assertEqual(access['project_id'], project_id)
def test_instance_type_access_add_to_non_existing_flavor(self):
self.assertRaises(exception.FlavorNotFound,
self._create_inst_type_access,
'nonexists', 'does_not_matter')
def test_instance_type_access_add_duplicate_project_id_flavor(self):
inst_type = self._create_inst_type({'flavorid': 'f1'})
params = (inst_type['flavorid'], 'p1')
self._create_inst_type_access(*params)
self.assertRaises(exception.FlavorAccessExists,
self._create_inst_type_access, *params)
def test_instance_type_access_remove(self):
inst_types = ({'name': 'n1', 'flavorid': 'f1'},
{'name': 'n2', 'flavorid': 'f2'})
it1, it2 = tuple((self._create_inst_type(v) for v in inst_types))
access_it1 = [self._create_inst_type_access(it1['flavorid'], 'pr1'),
self._create_inst_type_access(it1['flavorid'], 'pr2')]
access_it2 = [self._create_inst_type_access(it2['flavorid'], 'pr1')]
db.flavor_access_remove(self.ctxt, it1['flavorid'],
access_it1[1]['project_id'])
for it, access_it in zip((it1, it2), (access_it1[:1], access_it2)):
params = (self.ctxt, it['flavorid'])
real_access_it = db.flavor_access_get_by_flavor_id(*params)
self._assertEqualListsOfObjects(access_it, real_access_it)
def test_instance_type_access_remove_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_access_remove,
self.ctxt, 'nonexists', 'does_not_matter')
def test_instance_type_access_remove_access_not_found(self):
inst_type = self._create_inst_type({'flavorid': 'f1'})
params = (inst_type['flavorid'], 'p1')
self._create_inst_type_access(*params)
self.assertRaises(exception.FlavorAccessNotFound,
db.flavor_access_remove,
self.ctxt, inst_type['flavorid'], 'p2')
def test_instance_type_access_removed_after_instance_type_destroy(self):
inst_type1 = self._create_inst_type({'flavorid': 'f1', 'name': 'n1'})
inst_type2 = self._create_inst_type({'flavorid': 'f2', 'name': 'n2'})
values = [
(inst_type1['flavorid'], 'p1'),
(inst_type1['flavorid'], 'p2'),
(inst_type2['flavorid'], 'p3')
]
for v in values:
self._create_inst_type_access(*v)
db.flavor_destroy(self.ctxt, inst_type1['name'])
p = (self.ctxt, inst_type1['flavorid'])
self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
p = (self.ctxt, inst_type2['flavorid'])
self.assertEqual(1, len(db.flavor_access_get_by_flavor_id(*p)))
db.flavor_destroy(self.ctxt, inst_type2['name'])
self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
class FixedIPTestCase(BaseInstanceTypeTestCase):
def _timeout_test(self, ctxt, timeout, multi_host):
instance = db.instance_create(ctxt, dict(host='foo'))
net = db.network_create_safe(ctxt, dict(multi_host=multi_host,
host='bar'))
old = timeout - datetime.timedelta(seconds=5)
new = timeout + datetime.timedelta(seconds=5)
# should deallocate
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=net['id'],
updated_at=old))
# still allocated
db.fixed_ip_create(ctxt, dict(allocated=True,
instance_uuid=instance['uuid'],
network_id=net['id'],
updated_at=old))
# wrong network
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=None,
updated_at=old))
# too new
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=None,
updated_at=new))
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(exc.DataError(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()))
self.mox.ReplayAll()
def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
now = timeutils.utcnow()
self._timeout_test(self.ctxt, now, False)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
self.assertEqual(result, 0)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
self.assertEqual(result, 1)
def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
now = timeutils.utcnow()
self._timeout_test(self.ctxt, now, True)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
self.assertEqual(result, 1)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
self.assertEqual(result, 0)
def test_fixed_ip_get_by_floating_address(self):
fixed_ip = db.fixed_ip_create(self.ctxt, {'address': '192.168.0.2'})
values = {'address': '8.7.6.5',
'fixed_ip_id': fixed_ip['id']}
floating = db.floating_ip_create(self.ctxt, values)['address']
fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating)
self._assertEqualObjects(fixed_ip, fixed_ip_ref)
def test_fixed_ip_get_by_host(self):
host_ips = {
'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'],
'host2': ['1.1.1.4', '1.1.1.5'],
'host3': ['1.1.1.6']
}
for host, ips in host_ips.iteritems():
for ip in ips:
instance_uuid = self._create_instance(host=host)
db.fixed_ip_create(self.ctxt, {'address': ip})
db.fixed_ip_associate(self.ctxt, ip, instance_uuid)
for host, ips in host_ips.iteritems():
ips_on_host = map(lambda x: x['address'],
db.fixed_ip_get_by_host(self.ctxt, host))
self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips)
def test_fixed_ip_get_by_network_host_not_found_exception(self):
self.assertRaises(
exception.FixedIpNotFoundForNetworkHost,
db.fixed_ip_get_by_network_host,
self.ctxt, 1, 'ignore')
def test_fixed_ip_get_by_network_host_fixed_ip_found(self):
db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host'))
fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host')
self.assertEquals(1, fip['network_id'])
self.assertEquals('host', fip['host'])
def _create_instance(self, **kwargs):
instance = db.instance_create(self.ctxt, kwargs)
return instance['uuid']
def test_fixed_ip_get_by_instance_fixed_ip_found(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
[ips_list[0].address])
def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ip_get_by_instance_inappropriate_ignored(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
another_instance = db.instance_create(self.ctxt, {})
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=another_instance['uuid'], address="192.168.1.7"))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ip_get_by_instance_not_found_exception(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.FixedIpNotFoundForInstance,
db.fixed_ip_get_by_instance,
self.ctxt, instance_uuid)
def test_fixed_ips_by_virtual_interface_fixed_ip_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
[ips_list[0].address])
def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
another_vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=another_vif.id, address="192.168.1.7"))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ips_by_virtual_interface_no_ip_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self.assertEquals(0, len(ips_list))
def create_fixed_ip(self, **params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)['address']
def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_fails_if_ip_in_use(self):
instance_uuid = self._create_instance()
address = self.create_fixed_ip(instance_uuid=instance_uuid)
self.assertRaises(exception.FixedIpAlreadyInUse,
db.fixed_ip_associate,
self.ctxt, address, instance_uuid)
def test_fixed_ip_associate_succeeds(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_associate_succeeds_and_sets_network(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_pool_invalid_uuid(self):
instance_uuid = '123'
self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_no_more_fixed_ips(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_succeeds(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_create_same_address(self):
address = '192.168.1.5'
params = {'address': address}
db.fixed_ip_create(self.ctxt, params)
self.assertRaises(exception.FixedIpExists, db.fixed_ip_create,
self.ctxt, params)
def test_fixed_ip_create_success(self):
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': '192.168.1.5',
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_data = db.fixed_ip_create(self.ctxt, param)
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
def test_fixed_ip_bulk_create_same_address(self):
address_1 = '192.168.1.5'
address_2 = '192.168.1.6'
instance_uuid = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
params = [
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_2, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_1, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': 'localhost', 'address': address_2, 'allocated': True,
'instance_uuid': instance_uuid, 'network_id': network_id_2,
'virtual_interface_id': None},
]
self.assertRaises(exception.FixedIpExists, db.fixed_ip_bulk_create,
self.ctxt, params)
# In this case the transaction will be rolled back and none of the ips
# will make it to the database.
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address_1)
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address_2)
def test_fixed_ip_bulk_create_success(self):
address_1 = '192.168.1.5'
address_2 = '192.168.1.6'
instance_uuid = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
params = [
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_1, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': 'localhost', 'address': address_2, 'allocated': True,
'instance_uuid': instance_uuid, 'network_id': network_id_2,
'virtual_interface_id': None}
]
db.fixed_ip_bulk_create(self.ctxt, params)
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
# we have no `id` in incoming data so we can not use
# _assertEqualListsOfObjects to compare incoming data and received
# objects
fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id'])
params = sorted(params, key=lambda i: i['network_id'])
for param, ip in zip(params, fixed_ip_data):
self._assertEqualObjects(param, ip, ignored_keys)
def test_fixed_ip_disassociate(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
db.fixed_ip_create(self.ctxt, param)
db.fixed_ip_disassociate(self.ctxt, address)
fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, address)
ignored_keys = ['created_at', 'id', 'deleted_at',
'updated_at', 'instance_uuid']
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
self.assertIsNone(fixed_ip_data['instance_uuid'])
def test_fixed_ip_get_not_found_exception(self):
self.assertRaises(exception.FixedIpNotFound,
db.fixed_ip_get, self.ctxt, 0)
def test_fixed_ip_get_success2(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
fixed_ip_id = db.fixed_ip_create(self.ctxt, param)
self.ctxt.is_admin = False
self.assertRaises(exception.NotAuthorized, db.fixed_ip_get,
self.ctxt, fixed_ip_id)
def test_fixed_ip_get_success(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
db.fixed_ip_create(self.ctxt, param)
fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, address)['id']
fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id)
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
def test_fixed_ip_get_by_address_detailed_not_found_exception(self):
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address_detailed, self.ctxt,
'192.168.1.5')
def test_fixed_ip_get_by_address_with_data_error_exception(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.FixedIpInvalid,
db.fixed_ip_get_by_address_detailed, self.ctxt,
'192.168.1.6')
def test_fixed_ip_get_by_address_detailed_sucsess(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
db.fixed_ip_create(self.ctxt, param)
fixed_ip_data = db.fixed_ip_get_by_address_detailed(self.ctxt, address)
# fixed ip check here
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
self._assertEqualObjects(param, fixed_ip_data[0], ignored_keys)
# network model check here
network_data = db.network_get(self.ctxt, network_id)
self._assertEqualObjects(network_data, fixed_ip_data[1])
# Instance check here
instance_data = db.instance_get_by_uuid(self.ctxt, instance_uuid)
ignored_keys = ['info_cache', 'system_metadata',
'security_groups', 'metadata',
'pci_devices'] # HOW ????
self._assertEqualObjects(instance_data, fixed_ip_data[2], ignored_keys)
def test_fixed_ip_update_not_found_for_address(self):
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_update, self.ctxt,
'192.168.1.5', {})
def test_fixed_ip_update(self):
instance_uuid_1 = self._create_instance()
instance_uuid_2 = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
param_1 = {
'reserved': True, 'deleted': 0, 'leased': True,
'host': '192.168.133.1', 'address': '10.0.0.2',
'allocated': True, 'instance_uuid': instance_uuid_1,
'network_id': network_id_1, 'virtual_interface_id': '123',
}
param_2 = {
'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': '10.0.0.3', 'allocated': False,
'instance_uuid': instance_uuid_2, 'network_id': network_id_2,
'virtual_interface_id': None
}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address']
db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2)
fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt,
param_2['address'])
self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys)
class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(FloatingIpTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'address': '1.1.1.1',
'fixed_ip_id': None,
'project_id': 'fake_project',
'host': 'fake_host',
'auto_assigned': False,
'pool': 'fake_pool',
'interface': 'fake_interface',
}
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(exc.DataError(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()))
self.mox.ReplayAll()
def _create_floating_ip(self, values):
if not values:
values = {}
vals = self._get_base_values()
vals.update(values)
return db.floating_ip_create(self.ctxt, vals)
def test_floating_ip_get(self):
values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}]
floating_ips = [self._create_floating_ip(val) for val in values]
for floating_ip in floating_ips:
real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id'])
self._assertEqualObjects(floating_ip, real_floating_ip,
ignored_keys=['fixed_ip'])
def test_floating_ip_get_not_found(self):
self.assertRaises(exception.FloatingIpNotFound,
db.floating_ip_get, self.ctxt, 100500)
def test_floating_ip_get_with_long_id_not_found(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidID,
db.floating_ip_get, self.ctxt, 123456789101112)
def test_floating_ip_get_pools(self):
values = [
{'address': '0.0.0.0', 'pool': 'abc'},
{'address': '1.1.1.1', 'pool': 'abc'},
{'address': '2.2.2.2', 'pool': 'def'},
{'address': '3.3.3.3', 'pool': 'ghi'},
]
for val in values:
self._create_floating_ip(val)
expected_pools = [{'name': x}
for x in set(map(lambda x: x['pool'], values))]
real_pools = db.floating_ip_get_pools(self.ctxt)
self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools)
def test_floating_ip_allocate_address(self):
pools = {
'pool1': ['0.0.0.0', '1.1.1.1'],
'pool2': ['2.2.2.2'],
'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5']
}
for pool, addresses in pools.iteritems():
for address in addresses:
vals = {'pool': pool, 'address': address, 'project_id': None}
self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
for pool, addresses in pools.iteritems():
alloc_addrs = []
for i in addresses:
float_addr = db.floating_ip_allocate_address(self.ctxt,
project_id, pool)
alloc_addrs.append(float_addr)
self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses)
def test_floating_ip_allocate_auto_assigned(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
float_ips = []
for i in range(0, 2):
float_ips.append(self._create_floating_ip(
{"address": addresses[i]}))
for i in range(2, 4):
float_ips.append(self._create_floating_ip({"address": addresses[i],
"auto_assigned": True}))
for i in range(0, 2):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertFalse(float_ip.auto_assigned)
for i in range(2, 4):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertTrue(float_ip.auto_assigned)
def test_floating_ip_allocate_address_no_more_floating_ips(self):
self.assertRaises(exception.NoMoreFloatingIps,
db.floating_ip_allocate_address,
self.ctxt, 'any_project_id', 'no_such_pool')
def test_floating_ip_allocate_not_authorized(self):
ctxt = context.RequestContext(user_id='a', project_id='abc',
is_admin=False)
self.assertRaises(exception.NotAuthorized,
db.floating_ip_allocate_address,
ctxt, 'other_project_id', 'any_pool')
def _get_existing_ips(self):
return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)]
def test_floating_ip_bulk_create(self):
expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
db.floating_ip_bulk_create(self.ctxt,
map(lambda x: {'address': x}, expected_ips))
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
expected_ips)
def test_floating_ip_bulk_create_duplicate(self):
ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
prepare_ips = lambda x: {'address': x}
db.floating_ip_bulk_create(self.ctxt, map(prepare_ips, ips))
self.assertRaises(exception.FloatingIpExists,
db.floating_ip_bulk_create,
self.ctxt, map(prepare_ips, ['1.1.1.5', '1.1.1.4']))
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_get_by_address,
self.ctxt, '1.1.1.5')
def test_floating_ip_bulk_destroy(self):
ips_for_delete = []
ips_for_non_delete = []
def create_ips(i):
return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, 256)]
# NOTE(boris-42): Create more then 256 ip to check that
# _ip_range_splitter works properly.
for i in range(1, 3):
ips_for_delete.extend(create_ips(i))
ips_for_non_delete.extend(create_ips(3))
db.floating_ip_bulk_create(self.ctxt,
ips_for_delete + ips_for_non_delete)
db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete)
expected_addresses = map(lambda x: x['address'], ips_for_non_delete)
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
expected_addresses)
def test_floating_ip_create(self):
floating_ip = self._create_floating_ip({})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self.assertFalse(floating_ip['id'] is None)
self._assertEqualObjects(floating_ip, self._get_base_values(),
ignored_keys)
def test_floating_ip_create_duplicate(self):
self._create_floating_ip({})
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ip, {})
def _create_fixed_ip(self, params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)['address']
def test_floating_ip_fixed_ip_associate(self):
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
float_ips = [self._create_floating_ip({'address': address})
for address in float_addresses]
fixed_addrs = [self._create_fixed_ip({'address': address})
for address in fixed_addresses]
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_addr, 'host')
self.assertEqual(fixed_ip.address, fixed_addr)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id)
self.assertEqual('host', updated_float_ip.host)
# Test that already allocated float_ip returns None
result = db.floating_ip_fixed_ip_associate(self.ctxt,
float_addresses[0],
fixed_addresses[0], 'host')
self.assertTrue(result is None)
def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_fixed_ip_associate,
self.ctxt, '10.10.10.10', 'some', 'some')
def test_floating_ip_deallocate(self):
values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
float_ip = self._create_floating_ip(values)
db.floating_ip_deallocate(self.ctxt, float_ip.address)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertTrue(updated_float_ip.project_id is None)
self.assertTrue(updated_float_ip.host is None)
self.assertFalse(updated_float_ip.auto_assigned)
def test_floating_ip_destroy(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
expected_len = len(addresses)
for float_ip in float_ips:
db.floating_ip_destroy(self.ctxt, float_ip.address)
self.assertRaises(exception.FloatingIpNotFound,
db.floating_ip_get, self.ctxt, float_ip.id)
expected_len -= 1
if expected_len > 0:
self.assertEqual(expected_len,
len(db.floating_ip_get_all(self.ctxt)))
else:
self.assertRaises(exception.NoFloatingIpsDefined,
db.floating_ip_get_all, self.ctxt)
def test_floating_ip_disassociate(self):
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
float_ips = [self._create_floating_ip({'address': address})
for address in float_addresses]
fixed_addrs = [self._create_fixed_ip({'address': address})
for address in fixed_addresses]
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_addr, 'host')
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address)
self.assertEqual(fixed.address, fixed_addr)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertTrue(updated_float_ip.fixed_ip_id is None)
self.assertTrue(updated_float_ip.host is None)
def test_floating_ip_disassociate_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_disassociate, self.ctxt,
'11.11.11.11')
def test_floating_ip_set_auto_assigned(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr,
'auto_assigned': False})
for addr in addresses]
for i in range(2):
db.floating_ip_set_auto_assigned(self.ctxt, float_ips[i].address)
for i in range(2):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertTrue(float_ip.auto_assigned)
float_ip = db.floating_ip_get(self.ctxt, float_ips[2].id)
self.assertFalse(float_ip.auto_assigned)
def test_floating_ip_get_all(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
self._assertEqualListsOfObjects(float_ips,
db.floating_ip_get_all(self.ctxt))
def test_floating_ip_get_all_not_found(self):
self.assertRaises(exception.NoFloatingIpsDefined,
db.floating_ip_get_all, self.ctxt)
def test_floating_ip_get_all_by_host(self):
hosts = {
'host1': ['1.1.1.1', '1.1.1.2'],
'host2': ['2.1.1.1', '2.1.1.2'],
'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
}
hosts_with_float_ips = {}
for host, addresses in hosts.iteritems():
hosts_with_float_ips[host] = []
for address in addresses:
float_ip = self._create_floating_ip({'host': host,
'address': address})
hosts_with_float_ips[host].append(float_ip)
for host, float_ips in hosts_with_float_ips.iteritems():
real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host)
self._assertEqualListsOfObjects(float_ips, real_float_ips)
def test_floating_ip_get_all_by_host_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForHost,
db.floating_ip_get_all_by_host,
self.ctxt, 'non_exists_host')
def test_floating_ip_get_all_by_project(self):
projects = {
'pr1': ['1.1.1.1', '1.1.1.2'],
'pr2': ['2.1.1.1', '2.1.1.2'],
'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
}
projects_with_float_ips = {}
for project_id, addresses in projects.iteritems():
projects_with_float_ips[project_id] = []
for address in addresses:
float_ip = self._create_floating_ip({'project_id': project_id,
'address': address})
projects_with_float_ips[project_id].append(float_ip)
for project_id, float_ips in projects_with_float_ips.iteritems():
real_float_ips = db.floating_ip_get_all_by_project(self.ctxt,
project_id)
self._assertEqualListsOfObjects(float_ips, real_float_ips,
ignored_keys='fixed_ip')
def test_floating_ip_get_all_by_project_not_authorized(self):
ctxt = context.RequestContext(user_id='a', project_id='abc',
is_admin=False)
self.assertRaises(exception.NotAuthorized,
db.floating_ip_get_all_by_project,
ctxt, 'other_project')
def test_floating_ip_get_by_address(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
for float_ip in float_ips:
real_float_ip = db.floating_ip_get_by_address(self.ctxt,
float_ip.address)
self._assertEqualObjects(float_ip, real_float_ip,
ignored_keys='fixed_ip')
def test_floating_ip_get_by_address_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_get_by_address,
self.ctxt, '20.20.20.20')
def test_floating_ip_get_by_invalid_address(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidIpAddressError,
db.floating_ip_get_by_address,
self.ctxt, 'non_exists_host')
def test_floating_ip_get_by_fixed_address(self):
fixed_float = [
('1.1.1.1', '2.2.2.1'),
('1.1.1.2', '2.2.2.2'),
('1.1.1.3', '2.2.2.3')
]
for fixed_addr, float_addr in fixed_float:
self._create_floating_ip({'address': float_addr})
self._create_fixed_ip({'address': fixed_addr})
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
fixed_addr, 'some_host')
for fixed_addr, float_addr in fixed_float:
float_ip = db.floating_ip_get_by_fixed_address(self.ctxt,
fixed_addr)
self.assertEqual(float_addr, float_ip[0]['address'])
def test_floating_ip_get_by_fixed_ip_id(self):
fixed_float = [
('1.1.1.1', '2.2.2.1'),
('1.1.1.2', '2.2.2.2'),
('1.1.1.3', '2.2.2.3')
]
for fixed_addr, float_addr in fixed_float:
self._create_floating_ip({'address': float_addr})
self._create_fixed_ip({'address': fixed_addr})
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
fixed_addr, 'some_host')
for fixed_addr, float_addr in fixed_float:
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr)
float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt,
fixed_ip['id'])
self.assertEqual(float_addr, float_ip[0]['address'])
def test_floating_ip_update(self):
float_ip = self._create_floating_ip({})
values = {
'project_id': 'some_pr',
'host': 'some_host',
'auto_assigned': True,
'interface': 'some_interface',
'pool': 'some_pool'
}
db.floating_ip_update(self.ctxt, float_ip['address'], values)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
self._assertEqualObjects(updated_float_ip, values,
ignored_keys=['id', 'address', 'updated_at',
'deleted_at', 'created_at',
'deleted', 'fixed_ip_id',
'fixed_ip'])
def test_floating_ip_update_to_duplicate(self):
float_ip1 = self._create_floating_ip({'address': '1.1.1.1'})
float_ip2 = self._create_floating_ip({'address': '1.1.1.2'})
self.assertRaises(exception.FloatingIpExists,
db.floating_ip_update,
self.ctxt, float_ip2['address'],
{'address': float_ip1['address']})
class InstanceDestroyConstraints(test.TestCase):
def test_destroy_with_equal_any_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.equal_any('deleting'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_equal_any_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'resize'})
constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
def test_destroy_with_not_equal_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_not_equal_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'active'})
constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
class VolumeUsageDBApiTestCase(test.TestCase):
def setUp(self):
super(VolumeUsageDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.useFixture(test.TimeOverride())
def test_vol_usage_update_no_totals_update(self):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(now)
timeutils.utcnow().AndReturn(now)
timeutils.utcnow().AndReturn(now)
self.mox.ReplayAll()
expected_vol_usages = [{'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'user_id': 'fake-user-uuid1',
'curr_reads': 1000,
'curr_read_bytes': 2000,
'curr_writes': 3000,
'curr_write_bytes': 4000,
'curr_last_refreshed': now,
'tot_reads': 0,
'tot_read_bytes': 0,
'tot_writes': 0,
'tot_write_bytes': 0,
'tot_last_refreshed': None},
{'volume_id': u'2',
'instance_uuid': 'fake-instance-uuid2',
'project_id': 'fake-project-uuid2',
'user_id': 'fake-user-uuid2',
'curr_reads': 100,
'curr_read_bytes': 200,
'curr_writes': 300,
'curr_write_bytes': 400,
'tot_reads': 0,
'tot_read_bytes': 0,
'tot_writes': 0,
'tot_write_bytes': 0,
'tot_last_refreshed': None}]
def _compare(vol_usage, expected):
for key, value in expected.items():
self.assertEqual(vol_usage[key], value)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1', rd_req=10, rd_bytes=20,
wr_req=30, wr_bytes=40,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
user_id='fake-user-uuid1',
availability_zone='fake-az')
db.vol_usage_update(ctxt, u'2', rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid2',
project_id='fake-project-uuid2',
user_id='fake-user-uuid2',
availability_zone='fake-az')
db.vol_usage_update(ctxt, u'1', rd_req=1000, rd_bytes=2000,
wr_req=3000, wr_bytes=4000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
user_id='fake-user-uuid1',
availability_zone='fake-az')
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 2)
_compare(vol_usages[0], expected_vol_usages[0])
_compare(vol_usages[1], expected_vol_usages[1])
def test_vol_usage_update_totals_update(self):
ctxt = context.get_admin_context()
now = datetime.datetime(1, 1, 1, 1, 0, 0)
start_time = now - datetime.timedelta(seconds=10)
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(now)
now1 = now + datetime.timedelta(minutes=1)
timeutils.utcnow().AndReturn(now1)
now2 = now + datetime.timedelta(minutes=2)
timeutils.utcnow().AndReturn(now2)
now3 = now + datetime.timedelta(minutes=3)
timeutils.utcnow().AndReturn(now3)
self.mox.ReplayAll()
db.vol_usage_update(ctxt, u'1', rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 0)
self.assertEqual(current_usage['curr_reads'], 100)
db.vol_usage_update(ctxt, u'1', rd_req=200, rd_bytes=300,
wr_req=400, wr_bytes=500,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az',
update_totals=True)
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 0)
db.vol_usage_update(ctxt, u'1', rd_req=300, rd_bytes=400,
wr_req=500, wr_bytes=600,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
availability_zone='fake-az',
user_id='fake-user-uuid')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 300)
db.vol_usage_update(ctxt, u'1', rd_req=400, rd_bytes=500,
wr_req=600, wr_bytes=700,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az',
update_totals=True)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
expected_vol_usages = {'volume_id': u'1',
'project_id': 'fake-project-uuid',
'user_id': 'fake-user-uuid',
'instance_uuid': 'fake-instance-uuid',
'availability_zone': 'fake-az',
'tot_reads': 600,
'tot_read_bytes': 800,
'tot_writes': 1000,
'tot_write_bytes': 1200,
'tot_last_refreshed': now3,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'curr_last_refreshed': now2}
self.assertEquals(1, len(vol_usages))
for key, value in expected_vol_usages.items():
self.assertEqual(vol_usages[0][key], value, key)
def test_vol_usage_update_when_blockdevicestats_reset(self):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1',
rd_req=10000, rd_bytes=20000,
wr_req=30000, wr_bytes=40000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less then the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
db.vol_usage_update(ctxt, u'1',
rd_req=200, rd_bytes=300,
wr_req=400, wr_bytes=500,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
expected_vol_usage = {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'availability_zone': 'fake-az',
'user_id': 'fake-user-uuid1',
'curr_reads': 200,
'curr_read_bytes': 300,
'curr_writes': 400,
'curr_write_bytes': 500,
'tot_reads': 10000,
'tot_read_bytes': 20000,
'tot_writes': 30000,
'tot_write_bytes': 40000}
for key, value in expected_vol_usage.items():
self.assertEqual(vol_usage[key], value, key)
def test_vol_usage_update_totals_update_when_blockdevicestats_reset(self):
# This is unlikely to happen, but could when a volume is detached
# right after a instance has rebooted / recovered and before
# the system polled and updated the volume usage cache table.
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1',
rd_req=10000, rd_bytes=20000,
wr_req=30000, wr_bytes=40000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less then the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1',
update_totals=True)
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
expected_vol_usage = {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'availability_zone': 'fake-az',
'user_id': 'fake-user-uuid1',
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'tot_reads': 10100,
'tot_read_bytes': 20200,
'tot_writes': 30300,
'tot_write_bytes': 40400}
for key, value in expected_vol_usage.items():
self.assertEqual(vol_usage[key], value, key)
class TaskLogTestCase(test.TestCase):
def setUp(self):
super(TaskLogTestCase, self).setUp()
self.context = context.get_admin_context()
now = timeutils.utcnow()
self.begin = now - datetime.timedelta(seconds=10)
self.end = now - datetime.timedelta(seconds=5)
self.task_name = 'fake-task-name'
self.host = 'fake-host'
self.message = 'Fake task message'
db.task_log_begin_task(self.context, self.task_name, self.begin,
self.end, self.host, message=self.message)
def test_task_log_get(self):
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['task_name'], self.task_name)
self.assertEqual(result['period_beginning'], self.begin)
self.assertEqual(result['period_ending'], self.end)
self.assertEqual(result['host'], self.host)
self.assertEqual(result['message'], self.message)
def test_task_log_get_all(self):
result = db.task_log_get_all(self.context, self.task_name, self.begin,
self.end, host=self.host)
self.assertEqual(len(result), 1)
result = db.task_log_get_all(self.context, self.task_name, self.begin,
self.end, host=self.host, state='')
self.assertEqual(len(result), 0)
def test_task_log_begin_task(self):
db.task_log_begin_task(self.context, 'fake', self.begin,
self.end, self.host, task_items=42,
message=self.message)
result = db.task_log_get(self.context, 'fake', self.begin,
self.end, self.host)
self.assertEqual(result['task_name'], 'fake')
def test_task_log_begin_task_duplicate(self):
params = (self.context, 'fake', self.begin, self.end, self.host)
db.task_log_begin_task(*params, message=self.message)
self.assertRaises(exception.TaskAlreadyRunning,
db.task_log_begin_task,
*params, message=self.message)
def test_task_log_end_task(self):
errors = 1
db.task_log_end_task(self.context, self.task_name, self.begin,
self.end, self.host, errors, message=self.message)
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['errors'], 1)
def test_task_log_end_task_task_not_running(self):
self.assertRaises(exception.TaskNotRunning,
db.task_log_end_task, self.context, 'nonexistent',
self.begin, self.end, self.host, 42,
message=self.message)
class BlockDeviceMappingTestCase(test.TestCase):
def setUp(self):
super(BlockDeviceMappingTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, {})
def _create_bdm(self, values):
values.setdefault('instance_uuid', self.instance['uuid'])
values.setdefault('device_name', 'fake_device')
values.setdefault('source_type', 'volume')
values.setdefault('destination_type', 'volume')
block_dev = block_device.BlockDeviceDict(values)
db.block_device_mapping_create(self.ctxt, block_dev, legacy=False)
uuid = block_dev['instance_uuid']
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
for bdm in bdms:
if bdm['device_name'] == values['device_name']:
return bdm
def test_scrub_empty_str_values_no_effect(self):
values = {'volume_size': 5}
expected = copy.copy(values)
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, expected)
def test_scrub_empty_str_values_empty_string(self):
values = {'volume_size': ''}
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, {})
def test_scrub_empty_str_values_empty_unicode(self):
values = {'volume_size': u''}
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, {})
def test_block_device_mapping_create(self):
bdm = self._create_bdm({})
self.assertFalse(bdm is None)
def test_block_device_mapping_update(self):
bdm = self._create_bdm({})
result = db.block_device_mapping_update(
self.ctxt, bdm['id'], {'destination_type': 'moon'},
legacy=False)
uuid = bdm['instance_uuid']
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(bdm_real[0]['destination_type'], 'moon')
# Also make sure the update call returned correct data
self.assertEqual(dict(bdm_real[0].iteritems()),
dict(result.iteritems()))
def test_block_device_mapping_update_or_create(self):
values = {
'instance_uuid': self.instance['uuid'],
'device_name': 'fake_name',
'source_type': 'volume',
'destination_type': 'volume'
}
# check create
db.block_device_mapping_update_or_create(self.ctxt, values,
legacy=False)
uuid = values['instance_uuid']
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
self.assertEqual(bdm_real[0]['device_name'], 'fake_name')
# check update
values['destination_type'] = 'camelot'
db.block_device_mapping_update_or_create(self.ctxt, values,
legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
bdm_real = bdm_real[0]
self.assertEqual(bdm_real['device_name'], 'fake_name')
self.assertEqual(bdm_real['destination_type'], 'camelot')
# check create without device_name
bdm1 = dict(values)
bdm1['device_name'] = None
db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 2)
bdm_real = bdm_real[1]
self.assertEqual(bdm_real['device_name'], None)
# check create multiple devices without device_name
bdm2 = dict(values)
bdm2['device_name'] = None
db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 3)
bdm_real = bdm_real[2]
self.assertEqual(bdm_real['device_name'], None)
def test_block_device_mapping_update_or_create_multiple_ephemeral(self):
uuid = self.instance['uuid']
values = {
'instance_uuid': uuid,
'source_type': 'blank',
'guest_format': 'myformat',
}
bdm1 = dict(values)
bdm1['device_name'] = '/dev/sdb'
db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
bdm2 = dict(values)
bdm2['device_name'] = '/dev/sdc'
db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 2)
for bdm, device_name in zip(bdm_real, ['/dev/sdb', '/dev/sdc']):
self.assertEqual(bdm['device_name'], device_name)
self.assertEqual(bdm['guest_format'], 'myformat')
def test_block_device_mapping_update_or_create_check_remove_virt(self):
uuid = self.instance['uuid']
values = {
'instance_uuid': uuid,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
}
# check that old swap bdms are deleted on create
val1 = dict(values)
val1['device_name'] = 'device1'
db.block_device_mapping_create(self.ctxt, val1, legacy=False)
val2 = dict(values)
val2['device_name'] = 'device2'
db.block_device_mapping_update_or_create(self.ctxt, val2, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
bdm_real = bdm_real[0]
self.assertEqual(bdm_real['device_name'], 'device2')
self.assertEqual(bdm_real['source_type'], 'blank')
self.assertEqual(bdm_real['guest_format'], 'swap')
db.block_device_mapping_destroy(self.ctxt, bdm_real['id'])
def test_block_device_mapping_get_all_by_instance(self):
uuid1 = self.instance['uuid']
uuid2 = db.instance_create(self.ctxt, {})['uuid']
bmds_values = [{'instance_uuid': uuid1,
'device_name': 'first'},
{'instance_uuid': uuid2,
'device_name': 'second'},
{'instance_uuid': uuid2,
'device_name': 'third'}]
for bdm in bmds_values:
self._create_bdm(bdm)
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
self.assertEqual(len(bmd), 1)
self.assertEqual(bmd[0]['device_name'], 'first')
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
self.assertEqual(len(bmd), 2)
def test_block_device_mapping_destroy(self):
bdm = self._create_bdm({})
db.block_device_mapping_destroy(self.ctxt, bdm['id'])
bdm = db.block_device_mapping_get_all_by_instance(self.ctxt,
bdm['instance_uuid'])
self.assertEqual(len(bdm), 0)
def test_block_device_mapping_destory_by_instance_and_volumne(self):
vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
self._create_bdm({'device_name': 'fake1', 'volume_id': vol_id1})
self._create_bdm({'device_name': 'fake2', 'volume_id': vol_id2})
uuid = self.instance['uuid']
db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
vol_id1)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], 'fake2')
def test_block_device_mapping_destroy_by_instance_and_device(self):
self._create_bdm({'device_name': 'fake1'})
self._create_bdm({'device_name': 'fake2'})
uuid = self.instance['uuid']
params = (self.ctxt, uuid, 'fake1')
db.block_device_mapping_destroy_by_instance_and_device(*params)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], 'fake2')
def test_block_device_mapping_get_by_volume_id(self):
self._create_bdm({'volume_id': 'fake_id'})
bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id')
self.assertEqual(bdm['volume_id'], 'fake_id')
def test_block_device_mapping_get_by_volume_id_join_instance(self):
self._create_bdm({'volume_id': 'fake_id'})
bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id',
['instance'])
self.assertEqual(bdm['volume_id'], 'fake_id')
self.assertEqual(bdm['instance']['uuid'], self.instance['uuid'])
class AgentBuildTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.agent_build_* methods."""
def setUp(self):
super(AgentBuildTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_agent_build_create_and_get_all(self):
self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
agent_build = db.agent_build_create(self.ctxt, {'os': 'GNU/HURD'})
all_agent_builds = db.agent_build_get_all(self.ctxt)
self.assertEqual(1, len(all_agent_builds))
self._assertEqualObjects(agent_build, all_agent_builds[0])
def test_agent_build_get_by_triple(self):
agent_build = db.agent_build_create(self.ctxt, {'hypervisor': 'kvm',
'os': 'FreeBSD', 'architecture': 'x86_64'})
self.assertIsNone(db.agent_build_get_by_triple(self.ctxt, 'kvm',
'FreeBSD', 'i386'))
self._assertEqualObjects(agent_build, db.agent_build_get_by_triple(
self.ctxt, 'kvm', 'FreeBSD', 'x86_64'))
def test_agent_build_destroy(self):
agent_build = db.agent_build_create(self.ctxt, {})
self.assertEqual(1, len(db.agent_build_get_all(self.ctxt)))
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
def test_agent_build_update(self):
agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
db.agent_build_update(self.ctxt, agent_build.id, {'os': 'ReactOS'})
self.assertEqual('ReactOS', db.agent_build_get_all(self.ctxt)[0].os)
def test_agent_build_destroy_destroyed(self):
agent_build = db.agent_build_create(self.ctxt, {})
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertRaises(exception.AgentBuildNotFound,
db.agent_build_destroy, self.ctxt, agent_build.id)
def test_agent_build_update_destroyed(self):
agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertRaises(exception.AgentBuildNotFound,
db.agent_build_update, self.ctxt, agent_build.id, {'os': 'OS/2'})
def test_agent_build_exists(self):
values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': 'x86_64'}
db.agent_build_create(self.ctxt, values)
self.assertRaises(exception.AgentBuildExists, db.agent_build_create,
self.ctxt, values)
def test_agent_build_get_all_by_hypervisor(self):
values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': 'x86_64'}
created = db.agent_build_create(self.ctxt, values)
actual = db.agent_build_get_all(self.ctxt, hypervisor='kvm')
self._assertEqualListsOfObjects([created], actual)
class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(VirtualInterfaceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance_uuid = db.instance_create(self.ctxt, {})['uuid']
values = {'host': 'localhost', 'project_id': 'project1'}
self.network = db.network_create_safe(self.ctxt, values)
def _get_base_values(self):
return {
'instance_uuid': self.instance_uuid,
'address': 'fake_address',
'network_id': self.network['id'],
'uuid': str(stdlib_uuid.uuid4())
}
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(exc.DataError(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()))
self.mox.ReplayAll()
def _create_virt_interface(self, values):
v = self._get_base_values()
v.update(values)
return db.virtual_interface_create(self.ctxt, v)
def test_virtual_interface_create(self):
vif = self._create_virt_interface({})
self.assertFalse(vif['id'] is None)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'uuid']
self._assertEqualObjects(vif, self._get_base_values(), ignored_keys)
def test_virtual_interface_create_with_duplicate_address(self):
vif = self._create_virt_interface({})
self.assertRaises(exception.VirtualInterfaceCreateException,
self._create_virt_interface, {"uuid": vif['uuid']})
def test_virtual_interface_get(self):
vifs = [self._create_virt_interface({'address': 'a'}),
self._create_virt_interface({'address': 'b'})]
for vif in vifs:
real_vif = db.virtual_interface_get(self.ctxt, vif['id'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_address(self):
vifs = [self._create_virt_interface({'address': 'first'}),
self._create_virt_interface({'address': 'second'})]
for vif in vifs:
real_vif = db.virtual_interface_get_by_address(self.ctxt,
vif['address'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_address_not_found(self):
self.assertIsNone(db.virtual_interface_get_by_address(self.ctxt,
"i.nv.ali.ip"))
def test_virtual_interface_get_by_address_data_error_exception(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidIpAddressError,
db.virtual_interface_get_by_address,
self.ctxt,
"i.nv.ali.ip")
def test_virtual_interface_get_by_uuid(self):
vifs = [self._create_virt_interface({"address": "address_1"}),
self._create_virt_interface({"address": "address_2"})]
for vif in vifs:
real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_instance(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
vifs1 = [self._create_virt_interface({'address': 'fake1'}),
self._create_virt_interface({'address': 'fake2'})]
vifs2 = [self._create_virt_interface({'address': 'fake3',
'instance_uuid': inst_uuid2})]
vifs1_real = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
vifs2_real = db.virtual_interface_get_by_instance(self.ctxt,
inst_uuid2)
self._assertEqualListsOfObjects(vifs1, vifs1_real)
self._assertEqualListsOfObjects(vifs2, vifs2_real)
def test_virtual_interface_get_by_instance_and_network(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = {'host': 'localhost', 'project_id': 'project2'}
network_id = db.network_create_safe(self.ctxt, values)['id']
vifs = [self._create_virt_interface({'address': 'fake1'}),
self._create_virt_interface({'address': 'fake2',
'network_id': network_id,
'instance_uuid': inst_uuid2}),
self._create_virt_interface({'address': 'fake3',
'instance_uuid': inst_uuid2})]
for vif in vifs:
params = (self.ctxt, vif['instance_uuid'], vif['network_id'])
r_vif = db.virtual_interface_get_by_instance_and_network(*params)
self._assertEqualObjects(r_vif, vif)
def test_virtual_interface_delete_by_instance(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3', instance_uuid=inst_uuid2)]
for vals in values:
self._create_virt_interface(vals)
db.virtual_interface_delete_by_instance(self.ctxt, self.instance_uuid)
real_vifs1 = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
real_vifs2 = db.virtual_interface_get_by_instance(self.ctxt,
inst_uuid2)
self.assertEqual(len(real_vifs1), 0)
self.assertEqual(len(real_vifs2), 1)
def test_virtual_interface_get_all(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3', instance_uuid=inst_uuid2)]
vifs = [self._create_virt_interface(val) for val in values]
real_vifs = db.virtual_interface_get_all(self.ctxt)
self._assertEqualListsOfObjects(vifs, real_vifs)
class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.network_* methods."""
def setUp(self):
super(NetworkTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_associated_fixed_ip(self, host, cidr, ip):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': cidr})
self.assertFalse(db.network_in_use_on_host(self.ctxt, network.id,
host))
instance = db.instance_create(self.ctxt,
{'project_id': 'project1', 'host': host})
virtual_interface = db.virtual_interface_create(self.ctxt,
{'instance_uuid': instance.uuid, 'network_id': network.id,
'address': ip})
db.fixed_ip_create(self.ctxt, {'address': ip,
'network_id': network.id, 'allocated': True,
'virtual_interface_id': virtual_interface.id})
db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
network.id)
return network, instance
def test_network_get_associated_fixed_ips(self):
network, instance = self._get_associated_fixed_ip('host.net',
'192.0.2.0/30', '192.0.2.1')
data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
self.assertEqual(1, len(data))
self.assertEqual('192.0.2.1', data[0]['address'])
self.assertEqual('192.0.2.1', data[0]['vif_address'])
self.assertEqual(instance.uuid, data[0]['instance_uuid'])
self.assertTrue(data[0]['allocated'])
def test_network_create_safe(self):
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertEqual(36, len(network['uuid']))
db_network = db.network_get(self.ctxt, network['id'])
self._assertEqualObjects(network, db_network)
def test_network_create_with_duplicate_vlan(self):
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
db.network_create_safe(self.ctxt, values1)
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, self.ctxt, values2)
def test_network_delete_safe(self):
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
db_network = db.network_get(self.ctxt, network['id'])
values = {'network_id': network['id'], 'address': '192.168.1.5'}
address1 = db.fixed_ip_create(self.ctxt, values)['address']
values = {'network_id': network['id'],
'address': '192.168.1.6',
'allocated': True}
address2 = db.fixed_ip_create(self.ctxt, values)['address']
self.assertRaises(exception.NetworkInUse,
db.network_delete_safe, self.ctxt, network['id'])
db.fixed_ip_update(self.ctxt, address2, {'allocated': False})
network = db.network_delete_safe(self.ctxt, network['id'])
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address1)
ctxt = self.ctxt.elevated(read_deleted='yes')
fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
self.assertTrue(fixed_ip['deleted'])
def test_network_in_use_on_host(self):
values = {'host': 'foo', 'hostname': 'myname'}
instance = db.instance_create(self.ctxt, values)
values = {'address': '192.168.1.5', 'instance_uuid': instance['uuid']}
vif = db.virtual_interface_create(self.ctxt, values)
values = {'address': '192.168.1.6',
'network_id': 1,
'allocated': True,
'instance_uuid': instance['uuid'],
'virtual_interface_id': vif['id']}
db.fixed_ip_create(self.ctxt, values)
self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'foo'), True)
self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'bar'), False)
def test_network_update_nonexistent(self):
self.assertRaises(exception.NetworkNotFound,
db.network_update, self.ctxt, 'nonexistent', {})
def test_network_update_with_duplicate_vlan(self):
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2}
network_ref = db.network_create_safe(self.ctxt, values1)
db.network_create_safe(self.ctxt, values2)
self.assertRaises(exception.DuplicateVlan,
db.network_update, self.ctxt,
network_ref["id"], values2)
def test_network_update(self):
network = db.network_create_safe(self.ctxt, {'project_id': 'project1',
'vlan': 1, 'host': 'test.com'})
db.network_update(self.ctxt, network.id, {'vlan': 2})
network_new = db.network_get(self.ctxt, network.id)
self.assertEqual(2, network_new.vlan)
def test_network_set_host_nonexistent_network(self):
self.assertRaises(exception.NetworkNotFound,
db.network_set_host, self.ctxt, 'nonexistent', 'nonexistent')
def test_network_set_host_with_initially_no_host(self):
values = {'host': 'example.com', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertEqual(
db.network_set_host(self.ctxt, network.id, 'new.example.com'),
'example.com')
def test_network_set_host(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertEqual(
db.network_set_host(self.ctxt, network.id, 'example.com'),
'example.com')
self.assertEqual('example.com',
db.network_get(self.ctxt, network.id).host)
def test_network_get_all_by_host(self):
self.assertEqual([],
db.network_get_all_by_host(self.ctxt, 'example.com'))
host = 'h1.example.com'
# network with host set
net1 = db.network_create_safe(self.ctxt, {'host': host})
self._assertEqualListsOfObjects([net1],
db.network_get_all_by_host(self.ctxt, host))
# network with fixed ip with host set
net2 = db.network_create_safe(self.ctxt, {})
db.fixed_ip_create(self.ctxt, {'host': host, 'network_id': net2.id})
data = db.network_get_all_by_host(self.ctxt, host)
self._assertEqualListsOfObjects([net1, net2],
db.network_get_all_by_host(self.ctxt, host))
# network with instance with host set
net3 = db.network_create_safe(self.ctxt, {})
instance = db.instance_create(self.ctxt, {'host': host})
vif = db.virtual_interface_create(self.ctxt,
{'instance_uuid': instance.uuid})
db.fixed_ip_create(self.ctxt, {'network_id': net3.id,
'virtual_interface_id': vif.id})
self._assertEqualListsOfObjects([net1, net2, net3],
db.network_get_all_by_host(self.ctxt, host))
def test_network_get_by_cidr(self):
cidr = '192.0.2.0/30'
cidr_v6 = '2001:db8:1::/64'
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': cidr, 'cidr_v6': cidr_v6})
self._assertEqualObjects(network,
db.network_get_by_cidr(self.ctxt, cidr))
self._assertEqualObjects(network,
db.network_get_by_cidr(self.ctxt, cidr_v6))
def test_network_get_by_cidr_nonexistent(self):
self.assertRaises(exception.NetworkNotFoundForCidr,
db.network_get_by_cidr, self.ctxt, '192.0.2.0/30')
def test_network_get_by_uuid(self):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project_1'})
self._assertEqualObjects(network,
db.network_get_by_uuid(self.ctxt, network.uuid))
def test_network_get_by_uuid_nonexistent(self):
self.assertRaises(exception.NetworkNotFoundForUUID,
db.network_get_by_uuid, self.ctxt, 'non-existent-uuid')
def test_network_get_all_by_uuids_no_networks(self):
self.assertRaises(exception.NoNetworksFound,
db.network_get_all_by_uuids, self.ctxt, ['non-existent-uuid'])
def test_network_get_all_by_uuids(self):
net1 = db.network_create_safe(self.ctxt, {})
net2 = db.network_create_safe(self.ctxt, {})
self._assertEqualListsOfObjects([net1, net2],
db.network_get_all_by_uuids(self.ctxt, [net1.uuid, net2.uuid]))
def test_network_get_all_no_networks(self):
self.assertRaises(exception.NoNetworksFound,
db.network_get_all, self.ctxt)
def test_network_get_all(self):
network = db.network_create_safe(self.ctxt, {})
network_db = db.network_get_all(self.ctxt)
self.assertEqual(1, len(network_db))
self._assertEqualObjects(network, network_db[0])
def test_network_get_all_admin_user(self):
network1 = db.network_create_safe(self.ctxt, {})
network2 = db.network_create_safe(self.ctxt,
{'project_id': 'project1'})
self._assertEqualListsOfObjects([network1, network2],
db.network_get_all(self.ctxt,
project_only=True))
def test_network_get_all_normal_user(self):
normal_ctxt = context.RequestContext('fake', 'fake')
db.network_create_safe(self.ctxt, {})
db.network_create_safe(self.ctxt, {'project_id': 'project1'})
network1 = db.network_create_safe(self.ctxt,
{'project_id': 'fake'})
network_db = db.network_get_all(normal_ctxt, project_only=True)
self.assertEqual(1, len(network_db))
self._assertEqualObjects(network1, network_db[0])
def test_network_get(self):
network = db.network_create_safe(self.ctxt, {})
self._assertEqualObjects(db.network_get(self.ctxt, network.id),
network)
db.network_delete_safe(self.ctxt, network.id)
self.assertRaises(exception.NetworkNotFound,
db.network_get, self.ctxt, network.id)
def test_network_associate(self):
network = db.network_create_safe(self.ctxt, {})
self.assertIsNone(network.project_id)
db.network_associate(self.ctxt, "project1", network.id)
self.assertEqual("project1", db.network_get(self.ctxt,
network.id).project_id)
def test_network_diassociate(self):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'host': 'test.net'})
# disassociate project
db.network_disassociate(self.ctxt, network.id, False, True)
self.assertIsNone(db.network_get(self.ctxt, network.id).project_id)
# disassociate host
db.network_disassociate(self.ctxt, network.id, True, False)
self.assertIsNone(db.network_get(self.ctxt, network.id).host)
def test_network_count_reserved_ips(self):
net = db.network_create_safe(self.ctxt, {})
self.assertEqual(0, db.network_count_reserved_ips(self.ctxt, net.id))
db.fixed_ip_create(self.ctxt, {'network_id': net.id,
'reserved': True})
self.assertEqual(1, db.network_count_reserved_ips(self.ctxt, net.id))
class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(KeyPairTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_key_pair(self, values):
return db.key_pair_create(self.ctxt, values)
def test_key_pair_create(self):
param = {
'name': 'test_1',
'user_id': 'test_user_id_1',
'public_key': 'test_public_key_1',
'fingerprint': 'test_fingerprint_1'
}
key_pair = self._create_key_pair(param)
self.assertTrue(key_pair['id'] is not None)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(key_pair, param, ignored_keys)
def test_key_pair_create_with_duplicate_name(self):
params = {'name': 'test_name', 'user_id': 'test_user_id'}
self._create_key_pair(params)
self.assertRaises(exception.KeyPairExists, self._create_key_pair,
params)
def test_key_pair_get(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1'},
{'name': 'test_2', 'user_id': 'test_user_id_2'},
{'name': 'test_3', 'user_id': 'test_user_id_3'}
]
key_pairs = [self._create_key_pair(p) for p in params]
for key in key_pairs:
real_key = db.key_pair_get(self.ctxt, key['user_id'], key['name'])
self._assertEqualObjects(key, real_key)
def test_key_pair_get_no_results(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
def test_key_pair_get_deleted(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
key_pair_created = self._create_key_pair(param)
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
ctxt = self.ctxt.elevated(read_deleted='yes')
key_pair_deleted = db.key_pair_get(ctxt, param['user_id'],
param['name'])
ignored_keys = ['deleted', 'created_at', 'updated_at', 'deleted_at']
self._assertEqualObjects(key_pair_deleted, key_pair_created,
ignored_keys)
self.assertEqual(key_pair_deleted['deleted'], key_pair_deleted['id'])
def test_key_pair_get_all_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1'},
{'name': 'test_2', 'user_id': 'test_user_id_1'},
{'name': 'test_3', 'user_id': 'test_user_id_2'}
]
key_pairs_user_1 = [self._create_key_pair(p) for p in params
if p['user_id'] == 'test_user_id_1']
key_pairs_user_2 = [self._create_key_pair(p) for p in params
if p['user_id'] == 'test_user_id_2']
real_keys_1 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_1')
real_keys_2 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_2')
self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1)
self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2)
def test_key_pair_count_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1'},
{'name': 'test_2', 'user_id': 'test_user_id_1'},
{'name': 'test_3', 'user_id': 'test_user_id_2'}
]
for p in params:
self._create_key_pair(p)
count_1 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_1')
self.assertEqual(count_1, 2)
count_2 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_2')
self.assertEqual(count_2, 1)
def test_key_pair_destroy(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self._create_key_pair(param)
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
def test_key_pair_destroy_no_such_key(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self.assertRaises(exception.KeypairNotFound,
db.key_pair_destroy, self.ctxt,
param['user_id'], param['name'])
class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.quota_* methods."""
def setUp(self):
super(QuotaTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_quota_create(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
self.assertEqual(quota.resource, 'resource')
self.assertEqual(quota.hard_limit, 99)
self.assertEqual(quota.project_id, 'project1')
def test_quota_get(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
quota_db = db.quota_get(self.ctxt, 'project1', 'resource')
self._assertEqualObjects(quota, quota_db)
def test_quota_get_all_by_project(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j, j)
for i in range(3):
quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'resource0': 0,
'resource1': 1,
'resource2': 2})
def test_quota_get_all_by_project_and_user(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j,
j - 1, user_id='user%d' % i)
for i in range(3):
quotas_db = db.quota_get_all_by_project_and_user(self.ctxt,
'proj%d' % i,
'user%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'user_id': 'user%d' % i,
'resource0': -1,
'resource1': 0,
'resource2': 1})
def test_quota_update(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
db.quota_update(self.ctxt, 'project1', 'resource1', 42)
quota = db.quota_get(self.ctxt, 'project1', 'resource1')
self.assertEqual(quota.hard_limit, 42)
self.assertEqual(quota.resource, 'resource1')
self.assertEqual(quota.project_id, 'project1')
def test_quota_update_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_update, self.ctxt, 'project1', 'resource1', 42)
def test_quota_get_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_get, self.ctxt, 'project1', 'resource1')
def test_quota_reserve_all_resources(self):
quotas = {}
deltas = {}
reservable_resources = {}
for i, resource in enumerate(quota.resources):
if isinstance(resource, quota.ReservableResource):
quotas[resource.name] = db.quota_create(self.ctxt, 'project1',
resource.name, 100)
deltas[resource.name] = i
reservable_resources[resource.name] = resource
usages = {'instances': 3, 'cores': 6, 'ram': 9}
instances = []
for i in range(3):
instances.append(db.instance_create(self.ctxt,
{'vcpus': 2, 'memory_mb': 3,
'project_id': 'project1'}))
usages['fixed_ips'] = 2
network = db.network_create_safe(self.ctxt, {})
for i in range(2):
address = '192.168.0.%d' % i
ip = db.fixed_ip_create(self.ctxt, {'project_id': 'project1',
'address': address,
'network_id': network['id']})
db.fixed_ip_associate(self.ctxt, address,
instances[0].uuid, network['id'])
usages['floating_ips'] = 5
for i in range(5):
db.floating_ip_create(self.ctxt, {'project_id': 'project1'})
usages['security_groups'] = 3
for i in range(3):
db.security_group_create(self.ctxt, {'project_id': 'project1'})
reservations_uuids = db.quota_reserve(self.ctxt, reservable_resources,
quotas, quotas, deltas, None,
None, None, 'project1')
resources_names = reservable_resources.keys()
for reservation_uuid in reservations_uuids:
reservation = db.reservation_get(self.ctxt, reservation_uuid)
usage = db.quota_usage_get(self.ctxt, 'project1',
reservation.resource)
self.assertEqual(usage.in_use, usages[reservation.resource],
'Resource: %s' % reservation.resource)
self.assertEqual(usage.reserved, deltas[reservation.resource])
self.assertIn(reservation.resource, resources_names)
resources_names.remove(reservation.resource)
self.assertEqual(len(resources_names), 0)
def test_quota_destroy_all_by_project(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
db.quota_destroy_all_by_project(self.ctxt, 'project1')
self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
{'project_id': 'project1'})
self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
'project1', 'user1'),
{'project_id': 'project1', 'user_id': 'user1'})
self.assertEqual(db.quota_usage_get_all_by_project(
self.ctxt, 'project1'),
{'project_id': 'project1'})
for r in reservations:
self.assertRaises(exception.ReservationNotFound,
db.reservation_get, self.ctxt, r)
def test_quota_destroy_all_by_project_and_user(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
db.quota_destroy_all_by_project_and_user(self.ctxt, 'project1',
'user1')
self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
'project1', 'user1'),
{'project_id': 'project1',
'user_id': 'user1'})
self.assertEqual(db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'),
{'project_id': 'project1',
'user_id': 'user1',
'fixed_ips': {'in_use': 2, 'reserved': 2}})
for r in reservations:
self.assertRaises(exception.ReservationNotFound,
db.reservation_get, self.ctxt, r)
def test_quota_usage_get_nonexistent(self):
self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
self.ctxt, 'p1', 'nonexitent_resource')
def test_quota_usage_get(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0')
expected = {'resource': 'resource0', 'project_id': 'p1',
'in_use': 0, 'reserved': 0, 'total': 0}
for key, value in expected.iteritems():
self.assertEqual(value, quota_usage[key])
def test_quota_usage_get_all_by_project(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
expected = {'project_id': 'p1',
'resource0': {'in_use': 0, 'reserved': 0},
'resource1': {'in_use': 1, 'reserved': 1},
'fixed_ips': {'in_use': 2, 'reserved': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project(
self.ctxt, 'p1'))
def test_quota_usage_get_all_by_project_and_user(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
expected = {'project_id': 'p1',
'user_id': 'u1',
'resource0': {'in_use': 0, 'reserved': 0},
'resource1': {'in_use': 1, 'reserved': 1},
'fixed_ips': {'in_use': 2, 'reserved': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'p1', 'u1'))
def test_quota_usage_update_nonexistent(self):
self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_update,
self.ctxt, 'p1', 'u1', 'resource', in_use=42)
def test_quota_usage_update(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
db.quota_usage_update(self.ctxt, 'p1', 'u1', 'resource0', in_use=42,
reserved=43)
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0', 'u1')
expected = {'resource': 'resource0', 'project_id': 'p1',
'user_id': 'u1', 'in_use': 42, 'reserved': 43, 'total': 85}
for key, value in expected.iteritems():
self.assertEqual(value, quota_usage[key])
def test_quota_create_exists(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
self.assertRaises(exception.QuotaExists, db.quota_create, self.ctxt,
'project1', 'resource1', 42)
class QuotaClassTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(QuotaClassTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_quota_class_get_default(self):
params = {
'test_resource1': '10',
'test_resource2': '20',
'test_resource3': '30',
}
for res, limit in params.items():
db.quota_class_create(self.ctxt, 'default', res, limit)
defaults = db.quota_class_get_default(self.ctxt)
self.assertEqual(defaults, dict(class_name='default',
test_resource1=10,
test_resource2=20,
test_resource3=30))
def test_quota_class_create(self):
qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
self.assertEqual(qc.class_name, 'class name')
self.assertEqual(qc.resource, 'resource')
self.assertEqual(qc.hard_limit, 42)
def test_quota_class_get(self):
qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
qc_db = db.quota_class_get(self.ctxt, 'class name', 'resource')
self._assertEqualObjects(qc, qc_db)
def test_quota_class_get_nonexistent(self):
self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get,
self.ctxt, 'nonexistent', 'resource')
def test_quota_class_get_all_by_name(self):
for i in range(3):
for j in range(3):
db.quota_class_create(self.ctxt, 'class%d' % i,
'resource%d' % j, j)
for i in range(3):
classes = db.quota_class_get_all_by_name(self.ctxt, 'class%d' % i)
self.assertEqual(classes, {'class_name': 'class%d' % i,
'resource0': 0, 'resource1': 1, 'resource2': 2})
def test_quota_class_update(self):
db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
db.quota_class_update(self.ctxt, 'class name', 'resource', 43)
self.assertEqual(db.quota_class_get(self.ctxt, 'class name',
'resource').hard_limit, 43)
def test_quota_class_update_nonexistent(self):
self.assertRaises(exception.QuotaClassNotFound, db.quota_class_update,
self.ctxt, 'class name', 'resource', 42)
class S3ImageTestCase(test.TestCase):
def setUp(self):
super(S3ImageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = [uuidutils.generate_uuid() for i in xrange(3)]
self.images = [db.s3_image_create(self.ctxt, uuid)
for uuid in self.values]
def test_s3_image_create(self):
for ref in self.images:
self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
self.assertEqual(sorted(self.values),
sorted([ref.uuid for ref in self.images]))
def test_s3_image_get_by_uuid(self):
for uuid in self.values:
ref = db.s3_image_get_by_uuid(self.ctxt, uuid)
self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
self.assertEqual(uuid, ref.uuid)
def test_s3_image_get(self):
self.assertEqual(sorted(self.values),
sorted([db.s3_image_get(self.ctxt, ref.id).uuid
for ref in self.images]))
def test_s3_image_get_not_found(self):
self.assertRaises(exception.ImageNotFound, db.s3_image_get, self.ctxt,
100500)
def test_s3_image_get_by_uuid_not_found(self):
self.assertRaises(exception.ImageNotFound, db.s3_image_get_by_uuid,
self.ctxt, uuidutils.generate_uuid())
class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(ComputeNodeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.service_dict = dict(host='host1', binary='nova-compute',
topic=CONF.compute_topic, report_count=1,
disabled=False)
self.service = db.service_create(self.ctxt, self.service_dict)
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=self.service['id'],
disk_available_least=100,
hypervisor_hostname='abracadabra104',
host_ip='127.0.0.1',
supported_instances='',
pci_stats='')
# add some random stats
self.stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
self.compute_node_dict['stats'] = self.stats
self.flags(reserved_host_memory_mb=0)
self.flags(reserved_host_disk_mb=0)
self.item = db.compute_node_create(self.ctxt, self.compute_node_dict)
def _stats_as_dict(self, stats):
d = {}
for s in stats:
key = s['key']
d[key] = s['value']
return d
def _stats_equal(self, stats, new_stats):
for k, v in stats.iteritems():
self.assertEqual(v, int(new_stats[k]))
def test_compute_node_create(self):
self._assertEqualObjects(self.compute_node_dict, self.item,
ignored_keys=self._ignored_keys + ['stats'])
new_stats = self._stats_as_dict(self.item['stats'])
self._stats_equal(self.stats, new_stats)
def test_compute_node_get_all(self):
date_fields = set(['created_at', 'updated_at',
'deleted_at', 'deleted'])
for no_date_fields in [False, True]:
nodes = db.compute_node_get_all(self.ctxt, no_date_fields)
self.assertEqual(1, len(nodes))
node = nodes[0]
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys +
['stats', 'service'])
node_fields = set(node.keys())
if no_date_fields:
self.assertFalse(date_fields & node_fields)
else:
self.assertTrue(date_fields <= node_fields)
new_stats = self._stats_as_dict(node['stats'])
self._stats_equal(self.stats, new_stats)
def test_compute_node_get_all_deleted_compute_node(self):
# Create a service and compute node and ensure we can find its stats;
# delete the service and compute node when done and loop again
for x in range(2, 5):
# Create a service
service_data = self.service_dict.copy()
service_data['host'] = 'host-%s' % x
service = db.service_create(self.ctxt, service_data)
# Create a compute node
compute_node_data = self.compute_node_dict.copy()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = self.stats.copy()
compute_node_data['hypervisor_hostname'] = 'hypervisor-%s' % x
node = db.compute_node_create(self.ctxt, compute_node_data)
# Ensure the "new" compute node is found
nodes = db.compute_node_get_all(self.ctxt, False)
self.assertEqual(2, len(nodes))
found = None
for n in nodes:
if n['id'] == node['id']:
found = n
break
self.assertNotEqual(None, found)
# Now ensure the match has stats!
self.assertNotEqual(self._stats_as_dict(found['stats']), {})
# Now delete the newly-created compute node to ensure the related
# compute node stats are wiped in a cascaded fashion
db.compute_node_delete(self.ctxt, node['id'])
# Clean up the service
db.service_destroy(self.ctxt, service['id'])
def test_compute_node_get_all_mult_compute_nodes_one_service_entry(self):
service_data = self.service_dict.copy()
service_data['host'] = 'host2'
service = db.service_create(self.ctxt, service_data)
existing_node = dict(self.item.iteritems())
existing_node['service'] = dict(self.service.iteritems())
expected = [existing_node]
for name in ['bm_node1', 'bm_node2']:
compute_node_data = self.compute_node_dict.copy()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = self.stats
compute_node_data['hypervisor_hostname'] = 'bm_node_1'
node = db.compute_node_create(self.ctxt, compute_node_data)
node = dict(node.iteritems())
node['service'] = dict(service.iteritems())
expected.append(node)
result = sorted(db.compute_node_get_all(self.ctxt, False),
key=lambda n: n['hypervisor_hostname'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_node_get(self):
compute_node_id = self.item['id']
node = db.compute_node_get(self.ctxt, compute_node_id)
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys + ['stats', 'service'])
new_stats = self._stats_as_dict(node['stats'])
self._stats_equal(self.stats, new_stats)
def test_compute_node_update(self):
compute_node_id = self.item['id']
stats = self._stats_as_dict(self.item['stats'])
# change some values:
stats['num_instances'] = 8
stats['num_tribbles'] = 1
values = {
'vcpus': 4,
'stats': stats,
}
item_updated = db.compute_node_update(self.ctxt, compute_node_id,
values)
self.assertEqual(4, item_updated['vcpus'])
new_stats = self._stats_as_dict(item_updated['stats'])
self._stats_equal(stats, new_stats)
def test_compute_node_delete(self):
compute_node_id = self.item['id']
db.compute_node_delete(self.ctxt, compute_node_id)
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(len(nodes), 0)
def test_compute_node_search_by_hypervisor(self):
nodes_created = []
new_service = copy.copy(self.service_dict)
for i in xrange(3):
new_service['binary'] += str(i)
new_service['topic'] += str(i)
service = db.service_create(self.ctxt, new_service)
self.compute_node_dict['service_id'] = service['id']
self.compute_node_dict['hypervisor_hostname'] = 'testhost' + str(i)
self.compute_node_dict['stats'] = self.stats
node = db.compute_node_create(self.ctxt, self.compute_node_dict)
nodes_created.append(node)
nodes = db.compute_node_search_by_hypervisor(self.ctxt, 'host')
self.assertEqual(3, len(nodes))
self._assertEqualListsOfObjects(nodes_created, nodes,
ignored_keys=self._ignored_keys + ['stats', 'service'])
def test_compute_node_statistics(self):
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(stats.pop('count'), 1)
for k, v in stats.iteritems():
self.assertEqual(v, self.item[k])
def test_compute_node_not_found(self):
self.assertRaises(exception.ComputeHostNotFound, db.compute_node_get,
self.ctxt, 100500)
def test_compute_node_update_always_updates_updated_at(self):
item_updated = db.compute_node_update(self.ctxt,
self.item['id'], {})
self.assertNotEqual(self.item['updated_at'],
item_updated['updated_at'])
def test_compute_node_update_override_updated_at(self):
# Update the record once so updated_at is set.
first = db.compute_node_update(self.ctxt, self.item['id'],
{'free_ram_mb': '12'})
self.assertIsNotNone(first['updated_at'])
# Update a second time. Make sure that the updated_at value we send
# is overridden.
second = db.compute_node_update(self.ctxt, self.item['id'],
{'updated_at': first.updated_at,
'free_ram_mb': '13'})
self.assertNotEqual(first['updated_at'], second['updated_at'])
def test_compute_node_stat_unchanged(self):
# don't update unchanged stat values:
stats = self.item['stats']
stats_updated_at = dict([(stat['key'], stat['updated_at'])
for stat in stats])
stats_values = self._stats_as_dict(stats)
new_values = {'stats': stats_values}
compute_node_id = self.item['id']
db.compute_node_update(self.ctxt, compute_node_id, new_values)
updated_node = db.compute_node_get(self.ctxt, compute_node_id)
updated_stats = updated_node['stats']
for stat in updated_stats:
self.assertEqual(stat['updated_at'], stats_updated_at[stat['key']])
def test_compute_node_stat_prune(self):
for stat in self.item['stats']:
if stat['key'] == 'num_instances':
num_instance_stat = stat
break
values = {
'stats': dict(num_instances=1)
}
db.compute_node_update(self.ctxt, self.item['id'], values,
prune_stats=True)
item_updated = db.compute_node_get_all(self.ctxt)[0]
self.assertEqual(1, len(item_updated['stats']))
stat = item_updated['stats'][0]
self.assertEqual(num_instance_stat['id'], stat['id'])
self.assertEqual(num_instance_stat['key'], stat['key'])
self.assertEqual(1, int(stat['value']))
class ProviderFwRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ProviderFwRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = self._get_rule_values()
self.rules = [db.provider_fw_rule_create(self.ctxt, rule)
for rule in self.values]
def _get_rule_values(self):
cidr_samples = ['192.168.0.0/24', '10.1.2.3/32',
'2001:4f8:3:ba::/64',
'2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128']
values = []
for i in xrange(len(cidr_samples)):
rule = {}
rule['protocol'] = 'foo' + str(i)
rule['from_port'] = 9999 + i
rule['to_port'] = 9898 + i
rule['cidr'] = cidr_samples[i]
values.append(rule)
return values
def test_provider_fw_rule_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for i, rule in enumerate(self.values):
self._assertEqualObjects(self.rules[i], rule,
ignored_keys=ignored_keys)
def test_provider_fw_rule_get_all(self):
self._assertEqualListsOfObjects(self.rules,
db.provider_fw_rule_get_all(self.ctxt))
def test_provider_fw_rule_destroy(self):
for rule in self.rules:
db.provider_fw_rule_destroy(self.ctxt, rule.id)
self.assertEqual([], db.provider_fw_rule_get_all(self.ctxt))
class CertificateTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(CertificateTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.created = self._certificates_create()
def _get_certs_values(self):
base_values = {
'user_id': 'user',
'project_id': 'project',
'file_name': 'filename'
}
return [dict((k, v + str(x)) for k, v in base_values.iteritems())
for x in xrange(1, 4)]
def _certificates_create(self):
return [db.certificate_create(self.ctxt, cert)
for cert in self._get_certs_values()]
def test_certificate_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for i, cert in enumerate(self._get_certs_values()):
self._assertEqualObjects(self.created[i], cert,
ignored_keys=ignored_keys)
def test_certificate_get_all_by_project(self):
cert = db.certificate_get_all_by_project(self.ctxt,
self.created[1].project_id)
self._assertEqualObjects(self.created[1], cert[0])
def test_certificate_get_all_by_user(self):
cert = db.certificate_get_all_by_user(self.ctxt,
self.created[1].user_id)
self._assertEqualObjects(self.created[1], cert[0])
def test_certificate_get_all_by_user_and_project(self):
cert = db.certificate_get_all_by_user_and_project(self.ctxt,
self.created[1].user_id, self.created[1].project_id)
self._assertEqualObjects(self.created[1], cert[0])
class ConsoleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
pools_data = [
{'address': '192.168.10.10',
'username': 'user1',
'password': 'passwd1',
'console_type': 'type1',
'public_hostname': 'public_host1',
'host': 'host1',
'compute_host': 'compute_host1',
},
{'address': '192.168.10.11',
'username': 'user2',
'password': 'passwd2',
'console_type': 'type2',
'public_hostname': 'public_host2',
'host': 'host2',
'compute_host': 'compute_host2',
},
]
self.console_pools = [db.console_pool_create(self.ctxt, val)
for val in pools_data]
instance_uuid = uuidutils.generate_uuid()
db.instance_create(self.ctxt, {'uuid': instance_uuid})
self.console_data = [dict([('instance_name', 'name' + str(x)),
('instance_uuid', instance_uuid),
('password', 'pass' + str(x)),
('port', 7878 + x),
('pool_id', self.console_pools[x]['id'])])
for x in xrange(len(pools_data))]
self.consoles = [db.console_create(self.ctxt, val)
for val in self.console_data]
def test_console_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for console in self.consoles:
self.assertIsNotNone(console['id'])
self._assertEqualListsOfObjects(self.console_data, self.consoles,
ignored_keys=ignored_keys)
def test_console_get_by_id(self):
console = self.consoles[0]
console_get = db.console_get(self.ctxt, console['id'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_by_id_uuid(self):
console = self.consoles[0]
console_get = db.console_get(self.ctxt, console['id'],
console['instance_uuid'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_by_pool_instance(self):
console = self.consoles[0]
console_get = db.console_get_by_pool_instance(self.ctxt,
console['pool_id'], console['instance_uuid'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_all_by_instance(self):
instance_uuid = self.consoles[0]['instance_uuid']
consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfObjects(self.consoles, consoles_get)
def test_console_get_all_by_instance_with_pool(self):
instance_uuid = self.consoles[0]['instance_uuid']
consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid,
columns_to_join=['pool'])
self._assertEqualListsOfObjects(self.consoles, consoles_get,
ignored_keys=['pool'])
self._assertEqualListsOfObjects([pool for pool in self.console_pools],
[c['pool'] for c in consoles_get])
def test_console_get_all_by_instance_empty(self):
consoles_get = db.console_get_all_by_instance(self.ctxt,
uuidutils.generate_uuid())
self.assertEqual(consoles_get, [])
def test_console_delete(self):
console_id = self.consoles[0]['id']
db.console_delete(self.ctxt, console_id)
self.assertRaises(exception.ConsoleNotFound, db.console_get,
self.ctxt, console_id)
def test_console_get_by_pool_instance_not_found(self):
self.assertRaises(exception.ConsoleNotFoundInPoolForInstance,
db.console_get_by_pool_instance, self.ctxt,
self.consoles[0]['pool_id'],
uuidutils.generate_uuid())
def test_console_get_not_found(self):
self.assertRaises(exception.ConsoleNotFound, db.console_get,
self.ctxt, 100500)
def test_console_get_not_found_instance(self):
self.assertRaises(exception.ConsoleNotFoundForInstance, db.console_get,
self.ctxt, self.consoles[0]['id'],
uuidutils.generate_uuid())
class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(CellTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_cell_base_values(self):
return {
'name': 'myname',
'api_url': 'apiurl',
'transport_url': 'transporturl',
'weight_offset': 0.5,
'weight_scale': 1.5,
'is_parent': True,
}
def _cell_value_modify(self, value, step):
if isinstance(value, str):
return value + str(step)
elif isinstance(value, float):
return value + step + 0.6
elif isinstance(value, bool):
return bool(step % 2)
elif isinstance(value, int):
return value + step
def _create_cells(self):
test_values = []
for x in xrange(1, 4):
modified_val = dict([(k, self._cell_value_modify(v, x))
for k, v in self._get_cell_base_values().iteritems()])
db.cell_create(self.ctxt, modified_val)
test_values.append(modified_val)
return test_values
def test_cell_create(self):
cell = db.cell_create(self.ctxt, self._get_cell_base_values())
self.assertFalse(cell['id'] is None)
self._assertEqualObjects(cell, self._get_cell_base_values(),
ignored_keys=self._ignored_keys)
def test_cell_update(self):
db.cell_create(self.ctxt, self._get_cell_base_values())
new_values = {
'api_url': 'apiurl1',
'transport_url': 'transporturl1',
'weight_offset': 0.6,
'weight_scale': 1.6,
'is_parent': False,
}
test_cellname = self._get_cell_base_values()['name']
updated_cell = db.cell_update(self.ctxt, test_cellname, new_values)
self._assertEqualObjects(updated_cell, new_values,
ignored_keys=self._ignored_keys + ['name'])
def test_cell_delete(self):
new_cells = self._create_cells()
for cell in new_cells:
test_cellname = cell['name']
db.cell_delete(self.ctxt, test_cellname)
self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
test_cellname)
def test_cell_get(self):
new_cells = self._create_cells()
for cell in new_cells:
cell_get = db.cell_get(self.ctxt, cell['name'])
self._assertEqualObjects(cell_get, cell,
ignored_keys=self._ignored_keys)
def test_cell_get_all(self):
new_cells = self._create_cells()
cells = db.cell_get_all(self.ctxt)
self.assertEqual(len(new_cells), len(cells))
cells_byname = dict([(newcell['name'],
newcell) for newcell in new_cells])
for cell in cells:
self._assertEqualObjects(cell, cells_byname[cell['name']],
self._ignored_keys)
def test_cell_get_not_found(self):
self._create_cells()
self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
'cellnotinbase')
def test_cell_update_not_found(self):
self._create_cells()
self.assertRaises(exception.CellNotFound, db.cell_update, self.ctxt,
'cellnotinbase', self._get_cell_base_values())
def test_cell_create_exists(self):
db.cell_create(self.ctxt, self._get_cell_base_values())
self.assertRaises(exception.CellExists, db.cell_create,
self.ctxt, self._get_cell_base_values())
class ConsolePoolTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ConsolePoolTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.test_console_pool_1 = {
'address': '192.168.2.10',
'username': 'user_1',
'password': 'secret_123',
'console_type': 'type_1',
'public_hostname': 'public_hostname_123',
'host': 'localhost',
'compute_host': '127.0.0.1',
}
self.test_console_pool_2 = {
'address': '192.168.2.11',
'username': 'user_2',
'password': 'secret_1234',
'console_type': 'type_2',
'public_hostname': 'public_hostname_1234',
'host': '127.0.0.1',
'compute_host': 'localhost',
}
self.test_console_pool_3 = {
'address': '192.168.2.12',
'username': 'user_3',
'password': 'secret_12345',
'console_type': 'type_2',
'public_hostname': 'public_hostname_12345',
'host': '127.0.0.1',
'compute_host': '192.168.1.1',
}
def test_console_pool_create(self):
console_pool = db.console_pool_create(
self.ctxt, self.test_console_pool_1)
self.assertTrue(console_pool.get('id') is not None)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(
console_pool, self.test_console_pool_1, ignored_keys)
def test_console_pool_create_duplicate(self):
db.console_pool_create(self.ctxt, self.test_console_pool_1)
self.assertRaises(exception.ConsolePoolExists, db.console_pool_create,
self.ctxt, self.test_console_pool_1)
def test_console_pool_get_by_host_type(self):
params = [
self.test_console_pool_1,
self.test_console_pool_2,
]
for p in params:
db.console_pool_create(self.ctxt, p)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id', 'consoles']
cp = self.test_console_pool_1
db_cp = db.console_pool_get_by_host_type(
self.ctxt, cp['compute_host'], cp['host'], cp['console_type']
)
self._assertEqualObjects(cp, db_cp, ignored_keys)
def test_console_pool_get_by_host_type_no_resuls(self):
self.assertRaises(
exception.ConsolePoolNotFoundForHostType,
db.console_pool_get_by_host_type, self.ctxt, 'compute_host',
'host', 'console_type')
def test_console_pool_get_all_by_host_type(self):
params = [
self.test_console_pool_1,
self.test_console_pool_2,
self.test_console_pool_3,
]
for p in params:
db.console_pool_create(self.ctxt, p)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id', 'consoles']
cp = self.test_console_pool_2
db_cp = db.console_pool_get_all_by_host_type(
self.ctxt, cp['host'], cp['console_type'])
self._assertEqualListsOfObjects(
db_cp, [self.test_console_pool_2, self.test_console_pool_3],
ignored_keys)
def test_console_pool_get_all_by_host_type_no_results(self):
res = db.console_pool_get_all_by_host_type(
self.ctxt, 'cp_host', 'cp_console_type')
self.assertEqual([], res)
class DnsdomainTestCase(test.TestCase):
def setUp(self):
super(DnsdomainTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.domain = 'test.domain'
self.testzone = 'testzone'
self.project = 'fake'
def test_dnsdomain_register_for_zone(self):
db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertEqual(domain['domain'], self.domain)
self.assertEqual(domain['availability_zone'], self.testzone)
self.assertEqual(domain['scope'], 'private')
def test_dnsdomain_register_for_project(self):
db.dnsdomain_register_for_project(self.ctxt, self.domain, self.project)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertEqual(domain['domain'], self.domain)
self.assertEqual(domain['project_id'], self.project)
self.assertEqual(domain['scope'], 'public')
def test_dnsdomain_list(self):
d_list = ['test.domain.one', 'test.domain.two']
db.dnsdomain_register_for_zone(self.ctxt, d_list[0], self.testzone)
db.dnsdomain_register_for_project(self.ctxt, d_list[1], self.project)
db_list = db.dnsdomain_list(self.ctxt)
self.assertEqual(sorted(d_list), sorted(db_list))
def test_dnsdomain_unregister(self):
db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
db.dnsdomain_unregister(self.ctxt, self.domain)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertIsNone(domain)
class BwUsageTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(BwUsageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.useFixture(test.TimeOverride())
def test_bw_usage_get_by_uuids(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
uuid3_refreshed = now - datetime.timedelta(seconds=5)
expected_bw_usages = [{'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now},
{'uuid': 'fake_uuid2',
'mac': 'fake_mac2',
'start_period': start_period,
'bw_in': 200,
'bw_out': 300,
'last_ctr_in': 22345,
'last_ctr_out': 77890,
'last_refreshed': now},
{'uuid': 'fake_uuid3',
'mac': 'fake_mac3',
'start_period': start_period,
'bw_in': 400,
'bw_out': 500,
'last_ctr_in': 32345,
'last_ctr_out': 87890,
'last_refreshed': uuid3_refreshed}]
bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
['fake_uuid1', 'fake_uuid2'], start_period)
# No matches
self.assertEqual(len(bw_usages), 0)
# Add 3 entries
db.bw_usage_update(self.ctxt, 'fake_uuid1',
'fake_mac1', start_period,
100, 200, 12345, 67890)
db.bw_usage_update(self.ctxt, 'fake_uuid2',
'fake_mac2', start_period,
100, 200, 42, 42)
# Test explicit refreshed time
db.bw_usage_update(self.ctxt, 'fake_uuid3',
'fake_mac3', start_period,
400, 500, 32345, 87890,
last_refreshed=uuid3_refreshed)
# Update 2nd entry
db.bw_usage_update(self.ctxt, 'fake_uuid2',
'fake_mac2', start_period,
200, 300, 22345, 77890)
bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
['fake_uuid1', 'fake_uuid2', 'fake_uuid3'], start_period)
self.assertEqual(len(bw_usages), 3)
for i, expected in enumerate(expected_bw_usages):
self._assertEqualObjects(bw_usages[i], expected,
ignored_keys=self._ignored_keys)
def test_bw_usage_get(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
expected_bw_usage = {'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now}
bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
'fake_mac1')
self.assertIsNone(bw_usage)
db.bw_usage_update(self.ctxt, 'fake_uuid1',
'fake_mac1', start_period,
100, 200, 12345, 67890)
bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
'fake_mac1')
self._assertEqualObjects(bw_usage, expected_bw_usage,
ignored_keys=self._ignored_keys)
class Ec2TestCase(test.TestCase):
def setUp(self):
super(Ec2TestCase, self).setUp()
self.ctxt = context.RequestContext('fake_user', 'fake_project')
def test_ec2_ids_not_found_are_printable(self):
def check_exc_format(method, value):
try:
method(self.ctxt, value)
except exception.NotFound as exc:
self.assertTrue(unicode(value) in unicode(exc))
check_exc_format(db.get_ec2_volume_id_by_uuid, 'fake')
check_exc_format(db.get_volume_uuid_by_ec2_id, 123456)
check_exc_format(db.get_ec2_snapshot_id_by_uuid, 'fake')
check_exc_format(db.get_snapshot_uuid_by_ec2_id, 123456)
check_exc_format(db.get_ec2_instance_id_by_uuid, 'fake')
check_exc_format(db.get_instance_uuid_by_ec2_id, 123456)
def test_ec2_volume_create(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(vol['id'])
self.assertEqual(vol['uuid'], 'fake-uuid')
def test_get_ec2_volume_id_by_uuid(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
vol_id = db.get_ec2_volume_id_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(vol['id'], vol_id)
def test_get_volume_uuid_by_ec2_id(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
vol_uuid = db.get_volume_uuid_by_ec2_id(self.ctxt, vol['id'])
self.assertEqual(vol_uuid, 'fake-uuid')
def test_get_ec2_volume_id_by_uuid_not_found(self):
self.assertRaises(exception.VolumeNotFound,
db.get_ec2_volume_id_by_uuid,
self.ctxt, 'uuid-not-present')
def test_get_volume_uuid_by_ec2_id_not_found(self):
self.assertRaises(exception.VolumeNotFound,
db.get_volume_uuid_by_ec2_id,
self.ctxt, 100500)
def test_ec2_snapshot_create(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(snap['id'])
self.assertEqual(snap['uuid'], 'fake-uuid')
def test_get_ec2_snapshot_id_by_uuid(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
snap_id = db.get_ec2_snapshot_id_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(snap['id'], snap_id)
def test_get_snapshot_uuid_by_ec2_id(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
snap_uuid = db.get_snapshot_uuid_by_ec2_id(self.ctxt, snap['id'])
self.assertEqual(snap_uuid, 'fake-uuid')
def test_get_ec2_snapshot_id_by_uuid_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
db.get_ec2_snapshot_id_by_uuid,
self.ctxt, 'uuid-not-present')
def test_get_snapshot_uuid_by_ec2_id_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
db.get_snapshot_uuid_by_ec2_id,
self.ctxt, 100500)
def test_ec2_instance_create(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(inst['id'])
self.assertEqual(inst['uuid'], 'fake-uuid')
def test_get_ec2_instance_id_by_uuid(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst_id = db.get_ec2_instance_id_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(inst['id'], inst_id)
def test_get_instance_uuid_by_ec2_id(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst_uuid = db.get_instance_uuid_by_ec2_id(self.ctxt, inst['id'])
self.assertEqual(inst_uuid, 'fake-uuid')
def test_get_ec2_instance_id_by_uuid_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.get_ec2_instance_id_by_uuid,
self.ctxt, 'uuid-not-present')
def test_get_instance_uuid_by_ec2_id_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.get_instance_uuid_by_ec2_id,
self.ctxt, 100500)
class ArchiveTestCase(test.TestCase):
def setUp(self):
super(ArchiveTestCase, self).setUp()
self.context = context.get_admin_context()
self.engine = get_engine()
self.conn = self.engine.connect()
self.instance_id_mappings = db_utils.get_table(self.engine,
"instance_id_mappings")
self.shadow_instance_id_mappings = db_utils.get_table(self.engine,
"shadow_instance_id_mappings")
self.dns_domains = db_utils.get_table(self.engine, "dns_domains")
self.shadow_dns_domains = db_utils.get_table(self.engine,
"shadow_dns_domains")
self.consoles = db_utils.get_table(self.engine, "consoles")
self.console_pools = db_utils.get_table(self.engine, "console_pools")
self.shadow_consoles = db_utils.get_table(self.engine,
"shadow_consoles")
self.shadow_console_pools = db_utils.get_table(self.engine,
"shadow_console_pools")
self.instances = db_utils.get_table(self.engine, "instances")
self.shadow_instances = db_utils.get_table(self.engine,
"shadow_instances")
self.uuidstrs = []
for unused in range(6):
self.uuidstrs.append(stdlib_uuid.uuid4().hex)
self.ids = []
self.id_tablenames_to_cleanup = set(["console_pools", "consoles"])
self.uuid_tablenames_to_cleanup = set(["instance_id_mappings",
"instances"])
self.domain_tablenames_to_cleanup = set(["dns_domains"])
def tearDown(self):
super(ArchiveTestCase, self).tearDown()
for tablename in self.id_tablenames_to_cleanup:
for name in [tablename, "shadow_" + tablename]:
table = db_utils.get_table(self.engine, name)
del_statement = table.delete(table.c.id.in_(self.ids))
self.conn.execute(del_statement)
for tablename in self.uuid_tablenames_to_cleanup:
for name in [tablename, "shadow_" + tablename]:
table = db_utils.get_table(self.engine, name)
del_statement = table.delete(table.c.uuid.in_(self.uuidstrs))
self.conn.execute(del_statement)
for tablename in self.domain_tablenames_to_cleanup:
for name in [tablename, "shadow_" + tablename]:
table = db_utils.get_table(self.engine, name)
del_statement = table.delete(table.c.domain.in_(self.uuidstrs))
self.conn.execute(del_statement)
def test_shadow_tables(self):
metadata = MetaData(bind=self.engine)
metadata.reflect()
for table_name in metadata.tables:
if table_name.startswith("shadow_"):
self.assertIn(table_name[7:], metadata.tables)
continue
self.assertTrue(db_utils.check_shadow_table(self.engine,
table_name))
def test_archive_deleted_rows(self):
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
# Set 4 to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
qiim = select([self.instance_id_mappings]).where(self.
instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
qsiim = select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(rows), 0)
# Archive 2 rows
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we have 4 left in main
self.assertEqual(len(rows), 4)
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 2 in shadow
self.assertEqual(len(rows), 2)
# Archive 2 more rows
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we have 2 left in main
self.assertEqual(len(rows), 2)
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 4 in shadow
self.assertEqual(len(rows), 4)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we still have 2 left in main
self.assertEqual(len(rows), 2)
rows = self.conn.execute(qsiim).fetchall()
# Verify we still have 4 in shadow
self.assertEqual(len(rows), 4)
def test_archive_deleted_rows_for_every_uuid_table(self):
tablenames = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
tablenames.sort()
for tablename in tablenames:
ret = self._test_archive_deleted_rows_for_one_uuid_table(tablename)
if ret == 0:
self.uuid_tablenames_to_cleanup.add(tablename)
def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
"""
:returns: 0 on success, 1 if no uuid column, 2 if insert failed
"""
main_table = db_utils.get_table(self.engine, tablename)
if not hasattr(main_table.c, "uuid"):
# Not a uuid table, so skip it.
return 1
shadow_table = db_utils.get_table(self.engine, "shadow_" + tablename)
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = main_table.insert().values(uuid=uuidstr)
try:
self.conn.execute(ins_stmt)
except IntegrityError:
# This table has constraints that require a table-specific
# insert, so skip it.
return 2
# Set 4 to deleted
update_statement = main_table.update().\
where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
qmt = select([main_table]).where(main_table.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qmt).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
qst = select([shadow_table]).\
where(shadow_table.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qst).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(rows), 0)
# Archive 2 rows
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we have 4 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 4)
# Verify we have 2 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 2)
# Archive 2 more rows
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we have 2 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 2)
# Verify we have 4 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 4)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we still have 2 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 2)
# Verify we still have 4 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 4)
return 0
def test_archive_deleted_rows_no_id_column(self):
uuidstr0 = self.uuidstrs[0]
ins_stmt = self.dns_domains.insert().values(domain=uuidstr0)
self.conn.execute(ins_stmt)
update_statement = self.dns_domains.update().\
where(self.dns_domains.c.domain == uuidstr0).\
values(deleted=True)
self.conn.execute(update_statement)
qdd = select([self.dns_domains], self.dns_domains.c.domain ==
uuidstr0)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 1)
qsdd = select([self.shadow_dns_domains],
self.shadow_dns_domains.c.domain == uuidstr0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 0)
db.archive_deleted_rows(self.context, max_rows=1)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 1)
def test_archive_deleted_rows_fk_constraint(self):
# consoles.pool_id depends on console_pools.id
# SQLite doesn't enforce foreign key constraints without a pragma.
dialect = self.engine.url.get_dialect()
if dialect == sqlite.dialect:
# We're seeing issues with foreign key support in SQLite 3.6.20
# SQLAlchemy doesn't support it at all with < SQLite 3.6.19
# It works fine in SQLite 3.7.
# So return early to skip this test if running SQLite < 3.7
import sqlite3
tup = sqlite3.sqlite_version_info
if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
self.skipTest(
'sqlite version too old for reliable SQLA foreign_keys')
self.conn.execute("PRAGMA foreign_keys = ON")
ins_stmt = self.console_pools.insert().values(deleted=1)
result = self.conn.execute(ins_stmt)
id1 = result.inserted_primary_key[0]
self.ids.append(id1)
ins_stmt = self.consoles.insert().values(deleted=1,
pool_id=id1)
result = self.conn.execute(ins_stmt)
id2 = result.inserted_primary_key[0]
self.ids.append(id2)
# The first try to archive console_pools should fail, due to FK.
num = db.archive_deleted_rows_for_table(self.context, "console_pools")
self.assertEqual(num, 0)
# Then archiving consoles should work.
num = db.archive_deleted_rows_for_table(self.context, "consoles")
self.assertEqual(num, 1)
# Then archiving console_pools should work.
num = db.archive_deleted_rows_for_table(self.context, "console_pools")
self.assertEqual(num, 1)
def test_archive_deleted_rows_2_tables(self):
# Add 6 rows to each table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
ins_stmt2 = self.instances.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt2)
# Set 4 of each to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
update_statement2 = self.instances.update().\
where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement2)
# Verify we have 6 in each main table
qiim = select([self.instance_id_mappings]).where(
self.instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
self.assertEqual(len(rows), 6)
qi = select([self.instances]).where(self.instances.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(rows), 6)
# Verify we have 0 in each shadow table
qsiim = select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
self.assertEqual(len(rows), 0)
qsi = select([self.shadow_instances]).\
where(self.shadow_instances.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(rows), 0)
# Archive 7 rows, which should be 4 in one table and 3 in the other.
db.archive_deleted_rows(self.context, max_rows=7)
# Verify we have 5 left in the two main tables combined
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 5)
# Verify we have 7 in the two shadow tables combined.
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 7)
# Archive the remaining deleted rows.
db.archive_deleted_rows(self.context, max_rows=1)
# Verify we have 4 total left in both main tables.
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 4)
# Verify we have 8 in shadow
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 8)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(self.context, max_rows=500)
# Verify we have 4 total left in both main tables.
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 4)
# Verify we have 8 in shadow
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 8)
class InstanceGroupDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(InstanceGroupDBApiTestCase, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
metadata=None, members=None):
return db.instance_group_create(context, values, policies=policies,
metadata=metadata, members=members)
def test_instance_group_create_no_key(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
def test_instance_group_create_with_key(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
def test_instance_group_create_with_same_key(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
self.assertRaises(exception.InstanceGroupIdExists,
self._create_instance_group, self.context, values)
def test_instance_group_get(self):
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
result2 = db.instance_group_get(self.context, result1['uuid'])
self._assertEqualObjects(result1, result2)
def test_instance_group_update_simple(self):
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
values = {'name': 'new_name', 'user_id': 'new_user',
'project_id': 'new_project'}
db.instance_group_update(self.context, result1['uuid'],
values)
result2 = db.instance_group_get(self.context, result1['uuid'])
self.assertEquals(result1['uuid'], result2['uuid'])
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result2, values, ignored_keys)
def test_instance_group_delete(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
db.instance_group_delete(self.context, result['uuid'])
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_delete, self.context,
result['uuid'])
def test_instance_group_get_nonexistent(self):
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_get,
self.context,
'nonexistent')
def test_instance_group_delete_nonexistent(self):
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_delete,
self.context,
'nonexistent')
def test_instance_group_get_all(self):
groups = db.instance_group_get_all(self.context)
self.assertEquals(0, len(groups))
value = self._get_default_values()
result1 = self._create_instance_group(self.context, value)
groups = db.instance_group_get_all(self.context)
self.assertEquals(1, len(groups))
value = self._get_default_values()
result2 = self._create_instance_group(self.context, value)
groups = db.instance_group_get_all(self.context)
results = [result1, result2]
self._assertEqualListsOfObjects(results, groups)
def test_instance_group_get_all_by_project_id(self):
groups = db.instance_group_get_all_by_project_id(self.context,
'invalid_project_id')
self.assertEquals(0, len(groups))
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
groups = db.instance_group_get_all_by_project_id(self.context,
'fake_project')
self.assertEquals(1, len(groups))
values = self._get_default_values()
values['project_id'] = 'new_project_id'
result2 = self._create_instance_group(self.context, values)
groups = db.instance_group_get_all(self.context)
results = [result1, result2]
self._assertEqualListsOfObjects(results, groups)
projects = [{'name': 'fake_project', 'value': [result1]},
{'name': 'new_project_id', 'value': [result2]}]
for project in projects:
groups = db.instance_group_get_all_by_project_id(self.context,
project['name'])
self._assertEqualListsOfObjects(project['value'], groups)
def test_instance_group_update(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
id = result['uuid']
values = self._get_default_values()
values['name'] = 'new_fake_name'
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self.assertEquals(result['name'], 'new_fake_name')
# update metadata
values = self._get_default_values()
metadataInput = {'key11': 'value1',
'key12': 'value2'}
values['metadata'] = metadataInput
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
metadata = result['metadetails']
self._assertEqualObjects(metadata, metadataInput)
# update update members
values = self._get_default_values()
members = ['instance_id1', 'instance_id2']
values['members'] = members
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
# update update policies
values = self._get_default_values()
policies = ['policy1', 'policy2']
values['policies'] = policies
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
# test invalid ID
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_update, self.context,
'invalid_id', values)
class InstanceGroupMetadataDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_metadata_on_create(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
metadata = {'key11': 'value1',
'key12': 'value2'}
result = self._create_instance_group(self.context, values,
metadata=metadata)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self._assertEqualObjects(metadata, result['metadetails'])
def test_instance_group_metadata_add(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
metadata = db.instance_group_metadata_get(self.context, id)
self._assertEqualObjects(metadata, {})
metadata = {'key1': 'value1',
'key2': 'value2'}
db.instance_group_metadata_add(self.context, id, metadata)
metadata2 = db.instance_group_metadata_get(self.context, id)
self._assertEqualObjects(metadata, metadata2)
def test_instance_group_update(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
metadata = {'key1': 'value1',
'key2': 'value2'}
db.instance_group_metadata_add(self.context, id, metadata)
metadata2 = db.instance_group_metadata_get(self.context, id)
self._assertEqualObjects(metadata, metadata2)
# check add with existing keys
metadata = {'key1': 'value1',
'key2': 'value2',
'key3': 'value3'}
db.instance_group_metadata_add(self.context, id, metadata)
metadata3 = db.instance_group_metadata_get(self.context, id)
self._assertEqualObjects(metadata, metadata3)
def test_instance_group_delete(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
metadata = {'key1': 'value1',
'key2': 'value2',
'key3': 'value3'}
db.instance_group_metadata_add(self.context, id, metadata)
metadata3 = db.instance_group_metadata_get(self.context, id)
self._assertEqualObjects(metadata, metadata3)
db.instance_group_metadata_delete(self.context, id, 'key1')
metadata = db.instance_group_metadata_get(self.context, id)
self.assertTrue('key1' not in metadata)
db.instance_group_metadata_delete(self.context, id, 'key2')
metadata = db.instance_group_metadata_get(self.context, id)
self.assertTrue('key2' not in metadata)
def test_instance_group_metadata_invalid_ids(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
id = result['uuid']
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_metadata_get,
self.context, 'invalid')
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_metadata_delete, self.context,
'invalidid', 'key1')
metadata = {'key1': 'value1',
'key2': 'value2'}
db.instance_group_metadata_add(self.context, id, metadata)
self.assertRaises(exception.InstanceGroupMetadataNotFound,
db.instance_group_metadata_delete,
self.context, id, 'invalidkey')
class InstanceGroupMembersDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_members_on_create(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
members = ['instance_id1', 'instance_id2']
result = self._create_instance_group(self.context, values,
members=members)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
def test_instance_group_members_add(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members = db.instance_group_members_get(self.context, id)
self.assertEquals(members, [])
members2 = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members2)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members2)
def test_instance_group_members_update(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members2 = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members2)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members2)
# check add with existing keys
members3 = ['instance_id1', 'instance_id2', 'instance_id3']
db.instance_group_members_add(self.context, id, members3)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
def test_instance_group_members_delete(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members3 = ['instance_id1', 'instance_id2', 'instance_id3']
db.instance_group_members_add(self.context, id, members3)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
for instance_id in members3[:]:
db.instance_group_member_delete(self.context, id, instance_id)
members3.remove(instance_id)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
def test_instance_group_members_invalid_ids(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
id = result['uuid']
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_members_get,
self.context, 'invalid')
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_member_delete, self.context,
'invalidid', 'instance_id1')
members = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members)
self.assertRaises(exception.InstanceGroupMemberNotFound,
db.instance_group_member_delete,
self.context, id, 'invalid_id')
class InstanceGroupPoliciesDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_policies_on_create(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
policies = ['policy1', 'policy2']
result = self._create_instance_group(self.context, values,
policies=policies)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
def test_instance_group_policies_add(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
policies = db.instance_group_policies_get(self.context, id)
self.assertEquals(policies, [])
policies2 = ['policy1', 'policy2']
db.instance_group_policies_add(self.context, id, policies2)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
def test_instance_group_policies_update(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
policies2 = ['policy1', 'policy2']
db.instance_group_policies_add(self.context, id, policies2)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
policies3 = ['policy1', 'policy2', 'policy3']
db.instance_group_policies_add(self.context, id, policies3)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
def test_instance_group_policies_delete(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
policies3 = ['policy1', 'policy2', 'policy3']
db.instance_group_policies_add(self.context, id, policies3)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
for policy in policies3[:]:
db.instance_group_policy_delete(self.context, id, policy)
policies3.remove(policy)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
def test_instance_group_policies_invalid_ids(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
id = result['uuid']
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_policies_get,
self.context, 'invalid')
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_policy_delete, self.context,
'invalidid', 'policy1')
policies = ['policy1', 'policy2']
db.instance_group_policies_add(self.context, id, policies)
self.assertRaises(exception.InstanceGroupPolicyNotFound,
db.instance_group_policy_delete,
self.context, id, 'invalid_policy')
class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(PciDeviceDBApiTestCase, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
self.admin_context = context.get_admin_context()
self.ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
def _get_fake_pci_devs(self):
return {'id': 3353,
'compute_node_id': 1,
'address': '0000:0f:08:07',
'vendor_id': '8086',
'product_id': '1520',
'dev_type': 'type-VF',
'dev_id': 'pci_0000:0f:08:07',
'extra_info': None,
'label': 'label_8086_1520',
'status': 'available',
'instance_uuid': '00000000-0000-0000-0000-000000000010',
}, {'id': 3356,
'compute_node_id': 1,
'address': '0000:0f:03:07',
'vendor_id': '8083',
'product_id': '1523',
'dev_type': 'type-VF',
'dev_id': 'pci_0000:0f:08:07',
'extra_info': None,
'label': 'label_8086_1520',
'status': 'available',
'instance_uuid': '00000000-0000-0000-0000-000000000010',
}
def _create_fake_pci_devs(self):
v1, v2 = self._get_fake_pci_devs()
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
return (v1, v2)
def test_pci_device_get_by_addr(self):
v1, v2 = self._create_fake_pci_devs()
result = db.pci_device_get_by_addr(self.admin_context, 1,
'0000:0f:08:07')
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_get_by_addr_not_found(self):
self._create_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFound,
db.pci_device_get_by_addr, self.admin_context,
1, '0000:0f:08:09')
def test_pci_device_get_by_addr_low_priv(self):
self._create_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_get_by_addr,
self.context, 1, '0000:0f:08:07')
def test_pci_device_get_by_id(self):
v1, v2 = self._create_fake_pci_devs()
result = db.pci_device_get_by_id(self.admin_context, 3353)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_get_by_id_not_found(self):
self._create_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFoundById,
db.pci_device_get_by_id,
self.admin_context, 3354)
def test_pci_device_get_by_id_low_priv(self):
self._create_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_get_by_id,
self.context, 3553)
def test_pci_device_get_all_by_node(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
def test_pci_device_get_all_by_node_empty(self):
v1, v2 = self._get_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 9)
self.assertEqual(len(results), 0)
def test_pci_device_get_all_by_node_low_priv(self):
self._create_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_get_all_by_node,
self.context, 1)
def test_pci_device_get_by_instance_uuid(self):
v1, v2 = self._get_fake_pci_devs()
v1['status'] = 'allocated'
v2['status'] = 'allocated'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
results = db.pci_device_get_all_by_instance_uuid(
self.context,
'00000000-0000-0000-0000-000000000010')
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
def test_pci_device_get_by_instance_uuid_check_status(self):
v1, v2 = self._get_fake_pci_devs()
v1['status'] = 'allocated'
v2['status'] = 'claimed'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
results = db.pci_device_get_all_by_instance_uuid(
self.context,
'00000000-0000-0000-0000-000000000010')
self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
def test_pci_device_update(self):
v1, v2 = self._get_fake_pci_devs()
v1['status'] = 'allocated'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
result = db.pci_device_get_by_addr(
self.admin_context, 1, '0000:0f:08:07')
self._assertEqualObjects(v1, result, self.ignored_keys)
v1['status'] = 'claimed'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
result = db.pci_device_get_by_addr(
self.admin_context, 1, '0000:0f:08:07')
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_update_low_priv(self):
v1, v2 = self._get_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_update, self.context,
v1['compute_node_id'], v1['address'], v1)
def test_pci_device_destroy(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
db.pci_device_destroy(self.admin_context, v1['compute_node_id'],
v1['address'])
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v2], self.ignored_keys)
def test_pci_device_destroy_exception(self):
v1, v2 = self._get_fake_pci_devs()
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
self.assertRaises(exception.PciDeviceNotFound,
db.pci_device_destroy,
self.admin_context,
v2['compute_node_id'],
v2['address'])
|
TieWei/nova
|
nova/tests/db/test_db_api.py
|
Python
|
apache-2.0
| 311,863
|
"""
Support for Yamaha Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.yamaha/
"""
import logging
import requests
import voluptuous as vol
from homeassistant.components.media_player import (
DOMAIN, MEDIA_PLAYER_SCHEMA, MEDIA_TYPE_MUSIC, PLATFORM_SCHEMA,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_SELECT_SOURCE, SUPPORT_STOP,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOUND_MODE, MediaPlayerDevice)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_HOST, CONF_NAME, STATE_IDLE, STATE_OFF, STATE_ON,
STATE_PLAYING)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['rxv==0.5.1']
_LOGGER = logging.getLogger(__name__)
ATTR_ENABLED = 'enabled'
ATTR_PORT = 'port'
CONF_SOURCE_IGNORE = 'source_ignore'
CONF_SOURCE_NAMES = 'source_names'
CONF_ZONE_IGNORE = 'zone_ignore'
CONF_ZONE_NAMES = 'zone_names'
DATA_YAMAHA = 'yamaha_known_receivers'
DEFAULT_NAME = "Yamaha Receiver"
ENABLE_OUTPUT_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_ENABLED): cv.boolean,
vol.Required(ATTR_PORT): cv.string,
})
SERVICE_ENABLE_OUTPUT = 'yamaha_enable_output'
SUPPORT_YAMAHA = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE | SUPPORT_PLAY \
| SUPPORT_SELECT_SOUND_MODE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_SOURCE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ZONE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SOURCE_NAMES, default={}): {cv.string: cv.string},
vol.Optional(CONF_ZONE_NAMES, default={}): {cv.string: cv.string},
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Yamaha platform."""
import rxv
# Keep track of configured receivers so that we don't end up
# discovering a receiver dynamically that we have static config
# for. Map each device from its zone_id to an instance since
# YamahaDevice is not hashable (thus not possible to add to a set).
if hass.data.get(DATA_YAMAHA) is None:
hass.data[DATA_YAMAHA] = {}
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
source_ignore = config.get(CONF_SOURCE_IGNORE)
source_names = config.get(CONF_SOURCE_NAMES)
zone_ignore = config.get(CONF_ZONE_IGNORE)
zone_names = config.get(CONF_ZONE_NAMES)
if discovery_info is not None:
name = discovery_info.get('name')
model = discovery_info.get('model_name')
ctrl_url = discovery_info.get('control_url')
desc_url = discovery_info.get('description_url')
receivers = rxv.RXV(
ctrl_url, model_name=model, friendly_name=name,
unit_desc_url=desc_url).zone_controllers()
_LOGGER.debug("Receivers: %s", receivers)
# when we are dynamically discovered config is empty
zone_ignore = []
elif host is None:
receivers = []
for recv in rxv.find():
receivers.extend(recv.zone_controllers())
else:
ctrl_url = "http://{}:80/YamahaRemoteControl/ctrl".format(host)
receivers = rxv.RXV(ctrl_url, name).zone_controllers()
devices = []
for receiver in receivers:
if receiver.zone in zone_ignore:
continue
device = YamahaDevice(
name, receiver, source_ignore, source_names, zone_names)
# Only add device if it's not already added
if device.zone_id not in hass.data[DATA_YAMAHA]:
hass.data[DATA_YAMAHA][device.zone_id] = device
devices.append(device)
else:
_LOGGER.debug("Ignoring duplicate receiver: %s", name)
def service_handler(service):
"""Handle for services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
devices = [device for device in hass.data[DATA_YAMAHA].values()
if not entity_ids or device.entity_id in entity_ids]
for device in devices:
port = service.data[ATTR_PORT]
enabled = service.data[ATTR_ENABLED]
device.enable_output(port, enabled)
device.schedule_update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_ENABLE_OUTPUT, service_handler,
schema=ENABLE_OUTPUT_SCHEMA)
add_entities(devices)
class YamahaDevice(MediaPlayerDevice):
"""Representation of a Yamaha device."""
def __init__(
self, name, receiver, source_ignore, source_names, zone_names):
"""Initialize the Yamaha Receiver."""
self.receiver = receiver
self._muted = False
self._volume = 0
self._pwstate = STATE_OFF
self._current_source = None
self._sound_mode = None
self._sound_mode_list = None
self._source_list = None
self._source_ignore = source_ignore or []
self._source_names = source_names or {}
self._zone_names = zone_names or {}
self._reverse_mapping = None
self._playback_support = None
self._is_playback_supported = False
self._play_status = None
self._name = name
self._zone = receiver.zone
def update(self):
"""Get the latest details from the device."""
try:
self._play_status = self.receiver.play_status()
except requests.exceptions.ConnectionError:
_LOGGER.info("Receiver is offline: %s", self._name)
return
if self.receiver.on:
if self._play_status is None:
self._pwstate = STATE_ON
elif self._play_status.playing:
self._pwstate = STATE_PLAYING
else:
self._pwstate = STATE_IDLE
else:
self._pwstate = STATE_OFF
self._muted = self.receiver.mute
self._volume = (self.receiver.volume / 100) + 1
if self.source_list is None:
self.build_source_list()
current_source = self.receiver.input
self._current_source = self._source_names.get(
current_source, current_source)
self._playback_support = self.receiver.get_playback_support()
self._is_playback_supported = self.receiver.is_playback_supported(
self._current_source)
surround_programs = self.receiver.surround_programs()
if surround_programs:
self._sound_mode = self.receiver.surround_program
self._sound_mode_list = surround_programs
else:
self._sound_mode = None
self._sound_mode_list = None
def build_source_list(self):
"""Build the source list."""
self._reverse_mapping = {alias: source for source, alias in
self._source_names.items()}
self._source_list = sorted(
self._source_names.get(source, source) for source in
self.receiver.inputs()
if source not in self._source_ignore)
@property
def name(self):
"""Return the name of the device."""
name = self._name
zone_name = self._zone_names.get(self._zone, self._zone)
if zone_name != "Main_Zone":
# Zone will be one of Main_Zone, Zone_2, Zone_3
name += " " + zone_name.replace('_', ' ')
return name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def sound_mode(self):
"""Return the current sound mode."""
return self._sound_mode
@property
def sound_mode_list(self):
"""Return the current sound mode."""
return self._sound_mode_list
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def zone_id(self):
"""Return a zone_id to ensure 1 media player per zone."""
return '{0}:{1}'.format(self.receiver.ctrl_url, self._zone)
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = SUPPORT_YAMAHA
supports = self._playback_support
mapping = {
'play': (SUPPORT_PLAY | SUPPORT_PLAY_MEDIA),
'pause': SUPPORT_PAUSE,
'stop': SUPPORT_STOP,
'skip_f': SUPPORT_NEXT_TRACK,
'skip_r': SUPPORT_PREVIOUS_TRACK,
}
for attr, feature in mapping.items():
if getattr(supports, attr, False):
supported_features |= feature
return supported_features
def turn_off(self):
"""Turn off media player."""
self.receiver.on = False
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
receiver_vol = 100 - (volume * 100)
negative_receiver_vol = -receiver_vol
self.receiver.volume = negative_receiver_vol
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self.receiver.mute = mute
def turn_on(self):
"""Turn the media player on."""
self.receiver.on = True
self._volume = (self.receiver.volume / 100) + 1
def media_play(self):
"""Send play command."""
self._call_playback_function(self.receiver.play, "play")
def media_pause(self):
"""Send pause command."""
self._call_playback_function(self.receiver.pause, "pause")
def media_stop(self):
"""Send stop command."""
self._call_playback_function(self.receiver.stop, "stop")
def media_previous_track(self):
"""Send previous track command."""
self._call_playback_function(self.receiver.previous, "previous track")
def media_next_track(self):
"""Send next track command."""
self._call_playback_function(self.receiver.next, "next track")
def _call_playback_function(self, function, function_text):
import rxv
try:
function()
except rxv.exceptions.ResponseException:
_LOGGER.warning(
"Failed to execute %s on %s", function_text, self._name)
def select_source(self, source):
"""Select input source."""
self.receiver.input = self._reverse_mapping.get(source, source)
def play_media(self, media_type, media_id, **kwargs):
"""Play media from an ID.
This exposes a pass through for various input sources in the
Yamaha to direct play certain kinds of media. media_type is
treated as the input type that we are setting, and media id is
specific to it.
For the NET RADIO mediatype the format for ``media_id`` is a
"path" in your vtuner hierarchy. For instance:
``Bookmarks>Internet>Radio Paradise``. The separators are
``>`` and the parts of this are navigated by name behind the
scenes. There is a looping construct built into the yamaha
library to do this with a fallback timeout if the vtuner
service is unresponsive.
NOTE: this might take a while, because the only API interface
for setting the net radio station emulates button pressing and
navigating through the net radio menu hierarchy. And each sub
menu must be fetched by the receiver from the vtuner service.
"""
if media_type == "NET RADIO":
self.receiver.net_radio(media_id)
def enable_output(self, port, enabled):
"""Enable or disable an output port.."""
self.receiver.enable_output(port, enabled)
def select_sound_mode(self, sound_mode):
"""Set Sound Mode for Receiver.."""
self.receiver.surround_program = sound_mode
@property
def media_artist(self):
"""Artist of current playing media."""
if self._play_status is not None:
return self._play_status.artist
@property
def media_album_name(self):
"""Album of current playing media."""
if self._play_status is not None:
return self._play_status.album
@property
def media_content_type(self):
"""Content type of current playing media."""
# Loose assumption that if playback is supported, we are playing music
if self._is_playback_supported:
return MEDIA_TYPE_MUSIC
return None
@property
def media_title(self):
"""Artist of current playing media."""
if self._play_status is not None:
song = self._play_status.song
station = self._play_status.station
# If both song and station is available, print both, otherwise
# just the one we have.
if song and station:
return '{}: {}'.format(station, song)
return song or station
|
tinloaf/home-assistant
|
homeassistant/components/media_player/yamaha.py
|
Python
|
apache-2.0
| 13,441
|
__author__ = 'Sulantha'
import logging
class PipelineLogger:
logFunctions={'info':logging.info,
'debug':logging.debug,
'warning':logging.warning,
'error':logging.error,
'critical':logging.critical,
'exception':logging.exception}
@staticmethod
def log(moduleName, level, message):
level = level.lower()
logging.getLogger(moduleName)
PipelineLogger.logFunctions[level](message)
|
sulantha2006/Processing_Pipeline
|
Utils/PipelineLogger.py
|
Python
|
apache-2.0
| 498
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.admin.images import views
urlpatterns = patterns('openstack_dashboard.dashboards.admin.images.views',
url(r'^images/$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<image_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<image_id>[^/]+)/detail/$',
views.DetailView.as_view(), name='detail')
)
|
spandanb/horizon
|
openstack_dashboard/dashboards/admin/images/urls.py
|
Python
|
apache-2.0
| 1,311
|
import turtle
kay = turtle.Turtle()
kay.penup()
kay.goto(-125,0)
kay.pendown()
kay.width(5)
kay.color()
kay.color('blue','yellow')
kay.circle(50)
kay.penup()
kay.goto(0,0)
kay.pendown()
kay.color()
kay.color('black','yellow')
kay.circle(50)
kay.penup()
kay.goto(125,0)
kay.pendown()
kay.color()
kay.color('red','yellow')
kay.circle(50)
kay.penup()
kay.goto(-62.5,-50)
kay.pendown()
kay.color()
kay.color('yellow','yellow')
kay.circle(50)
kay.penup()
kay.goto(62.5,-50)
kay.pendown()
kay.color()
kay.color('green','yellow')
kay.circle(50)
kay.getscreen().exitonclick()
|
khoanguyen0791/cs170
|
CS170_homework/Circles.py
|
Python
|
apache-2.0
| 579
|
import time
from Facturacion.Objects.Bill import Bill, ItemLine
from Facturacion.Controllers.ControlDB import BillsController
from Facturacion.Collections.Vendors import Vendors
from Facturacion.Collections.Clients import Clients
from Facturacion.Collections.Items import Items
class Bills:
facturas = {}
controller = BillsController()
@classmethod
def load_bills(cls):
for bill in cls.controller.get_bills():
cls.facturas[bill[0]] = Bill(bill[0], Vendors.vendedores[bill[1]], Clients.clientes[bill[2]], bill[3])
cls.load_items(bill[0])
@classmethod
def load_items(cls, cod_factura):
factura = cls.facturas[cod_factura]
for item in cls.controller.get_items(cod_factura):
factura.add_item(ItemLine(factura, Items.articulos[item[0]], item[1]))
@classmethod
def get_max_code(cls):
if not cls.facturas:
return 1
return max(cls.facturas, key=int) + 1
@classmethod
def items_to_il(cls, cod_factura, dict_articulos):
il_list = []
for item, cant in dict_articulos.items():
il_list.append(ItemLine(Bills.facturas[cod_factura], item, cant))
return il_list
@classmethod
def add_bill(cls, cod_factura, cif_vendedor, cif_cliente, dict_articulos):
if cod_factura in cls.facturas:
return
vendedor = Vendors.vendedores[cif_vendedor]
cliente = Clients.clientes[cif_cliente]
fecha = time.strftime("%d/%m/%Y")
factura = Bill(cod_factura, vendedor, cliente, fecha)
cls.facturas[cod_factura] = factura
cls.controller.add_bill(factura)
for linea in cls.items_to_il(cod_factura, dict_articulos):
factura.add_item(linea)
cls.controller.add_line(linea)
|
IhToN/DAW1-PRG
|
Facturacion/Collections/Bills.py
|
Python
|
apache-2.0
| 1,808
|
# Copyright (c) 2016, GohighSec
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
import six
from hacking import core
import pycodestyle
"""
Guidelines for writing new hacking checks
- Use only for Barbican specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range B3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the B3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to barbican/tests/test_hacking.py
"""
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
assert_no_xrange_re = re.compile(r"\s*xrange\s*\(")
assert_True = re.compile(r".*assertEqual\(True, .*\)")
assert_None = re.compile(r".*assertEqual\(None, .*\)")
assert_Not_Equal = re.compile(r".*assertNotEqual\(None, .*\)")
assert_Is_Not = re.compile(r".*assertIsNot\(None, .*\)")
no_log_warn = re.compile(r".*LOG.warn\(.*\)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
CHECK_DESC = 'No check message specified'
def __init__(self, tree, filename):
"""This object is created automatically by pycodestyle.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pycodestyle."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
class CheckLoggingFormatArgs(BaseASTChecker):
"""Check for improper use of logging format arguments.
LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.",
('volume1', 500))
The format arguments should not be a tuple as it is easy to miss.
"""
name = "check_logging_format_args"
version = "1.0"
CHECK_DESC = 'B310 Log method arguments should not be a tuple.'
LOG_METHODS = [
'debug', 'info',
'warn', 'warning',
'error', 'exception',
'critical', 'fatal',
'trace', 'log'
]
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, six.string_types):
return node
else: # could be Subscript, Call or many more
return None
def visit_Call(self, node):
"""Look for the 'LOG.*' calls."""
# extract the obj_name and method_name
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
if isinstance(node.func.value, ast.Name):
method_name = node.func.attr
elif isinstance(node.func.value, ast.Attribute):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# obj must be a logger instance and method must be a log helper
if (obj_name != 'LOG'
or method_name not in self.LOG_METHODS):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# the call must have arguments
if not len(node.args):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# any argument should not be a tuple
for arg in node.args:
if isinstance(arg, ast.Tuple):
self.add_error(arg)
return super(CheckLoggingFormatArgs, self).generic_visit(node)
@core.flake8ext
def check_oslo_namespace_imports(physical_line, logical_line, filename):
"""'oslo_' should be used instead of 'oslo.'
B317
"""
if pycodestyle.noqa(physical_line):
return
if re.match(oslo_namespace_imports, logical_line):
msg = ("B317: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
@core.flake8ext
def dict_constructor_with_list_copy(logical_line):
"""Use a dict comprehension instead of a dict constructor
B318
"""
msg = ("B318: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
@core.flake8ext
def no_xrange(logical_line):
"""Do not use 'xrange'
B319
"""
if assert_no_xrange_re.match(logical_line):
yield(0, "B319: Do not use xrange().")
@core.flake8ext
def validate_assertTrue(logical_line):
"""Use 'assertTrue' instead of 'assertEqual'
B312
"""
if re.match(assert_True, logical_line):
msg = ("B312: Unit tests should use assertTrue(value) instead"
" of using assertEqual(True, value).")
yield(0, msg)
@core.flake8ext
def validate_assertIsNone(logical_line):
"""Use 'assertIsNone' instead of 'assertEqual'
B311
"""
if re.match(assert_None, logical_line):
msg = ("B311: Unit tests should use assertIsNone(value) instead"
" of using assertEqual(None, value).")
yield(0, msg)
@core.flake8ext
def no_log_warn_check(logical_line):
"""Disallow 'LOG.warn'
B320
"""
msg = ("B320: LOG.warn is deprecated, please use LOG.warning!")
if re.match(no_log_warn, logical_line):
yield(0, msg)
@core.flake8ext
def validate_assertIsNotNone(logical_line):
"""Use 'assertIsNotNone'
B321
"""
if re.match(assert_Not_Equal, logical_line) or \
re.match(assert_Is_Not, logical_line):
msg = ("B321: Unit tests should use assertIsNotNone(value) instead"
" of using assertNotEqual(None, value) or"
" assertIsNot(None, value).")
yield(0, msg)
|
openstack/barbican
|
barbican/hacking/checks.py
|
Python
|
apache-2.0
| 7,978
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 03 13:06:56 2015
@author: Eunice
"""
if __name__ == '__main__':
import sys
sys.path.append("..")
from engine import bar
# 以上模块仅测试用
from engine.broker.fillstrategy import DefaultStrategy
from engine.broker.backtesting import TradePercentage
from engine import strategy
from engine.technical import ma
from engine.technical import cross
class thrSMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, short_l, mid_l, long_l, up_cum):
strategy.BacktestingStrategy.__init__(self, feed)
self.__instrument = instrument
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__prices = feed[instrument].getPriceDataSeries()
self.__malength1 = int(short_l)
self.__malength2 = int(mid_l)
self.__malength3 = int(long_l)
self.__circ = int(up_cum)
self.__ma1 = ma.SMA(self.__prices, self.__malength1)
self.__ma2 = ma.SMA(self.__prices, self.__malength2)
self.__ma3 = ma.SMA(self.__prices, self.__malength3)
def getPrice(self):
return self.__prices
def getSMA(self):
return self.__ma1,self.__ma2, self.__ma3
def onEnterCanceled(self, position):
self.__position = None
def onEnterOK(self):
pass
def onExitOk(self, position):
self.__position = None
#self.info("long close")
def onExitCanceled(self, position):
self.__position.exitMarket()
def buyCon1(self):
if cross.cross_above(self.__ma1, self.__ma2) > 0:
return True
def buyCon2(self):
m1 = 0
m2 = 0
for i in range(self.__circ):
if self.__ma1[-i-1] > self.__ma3[-i-1]:
m1 += 1
if self.__ma2[-i-1] > self.__ma3[-i-1]:
m2 += 1
if m1 >= self.__circ and m2 >= self.__circ:
return True
def sellCon1(self):
if cross.cross_below(self.__ma1, self.__ma2) > 0:
return True
def onBars(self, bars):
# If a position was not opened, check if we should enter a long position.
if self.__ma2[-1]is None:
return
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_below(self.__ma1, self.__ma2) > 0:
self.__position.exitMarket()
#self.info("sell %s" % (bars.getDateTime()))
if self.__position is None:
if self.buyCon1() and self.buyCon2():
shares = int(self.getBroker().getCash() * 0.2 / bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument, shares)
print bars[self.__instrument].getDateTime(), bars[self.__instrument].getPrice()
#self.info("buy %s" % (bars.getDateTime()))
def testStrategy():
from engine import bar
strat = thrSMA
instrument = '600288'
market = 'SH'
fromDate = '20150101'
toDate ='20150601'
frequency = bar.Frequency.MINUTE
paras = [2, 20, 60, 10]
plot = True
#############################################path set ############################33
import os
print os.path.split(os.path.realpath(__file__))
if frequency == bar.Frequency.MINUTE:
path = os.path.join(os.environ.get('STRATEGYPATH'), '..', 'histdata', 'minute')
elif frequency == bar.Frequency.DAY:
path = os.path.join(os.environ.get('STRATEGYPATH'), '..', 'histdata', 'day')
filepath = os.path.join(path, instrument + market + ".csv")
#############################################don't change ############################33
from engine.barfeed.csvfeed import Feed
barfeed = Feed(frequency)
barfeed.setDateTimeFormat('%Y-%m-%d %H:%M:%S')
barfeed.loadBars(instrument, market, fromDate, toDate, filepath)
engine_id = instrument + '.' + market
strat = strat(barfeed, engine_id, *paras)
from engine.stratanalyzer import returns
from engine.stratanalyzer import sharpe
from engine.stratanalyzer import drawdown
from engine.stratanalyzer import trades
retAnalyzer = returns.Returns()
strat.attachAnalyzer(retAnalyzer)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
drawDownAnalyzer = drawdown.DrawDown()
strat.attachAnalyzer(drawDownAnalyzer)
tradesAnalyzer = trades.Trades()
strat.attachAnalyzer(tradesAnalyzer)
strat.run()
#夏普率
sharp = sharpeRatioAnalyzer.getSharpeRatio(0.05)
#最大回撤
maxdd = drawDownAnalyzer.getMaxDrawDown()
#收益率
return_ = retAnalyzer.getCumulativeReturns()[-1]
#收益曲线
return_list = []
for item in retAnalyzer.getCumulativeReturns():
return_list.append(item)
def run_strategy(ticker, account_id, paras):
print ticker
print account_id
print paras
strat = testStrategy()
if __name__ == "__main__":
testStrategy()
|
Yam-cn/potato
|
stratlib/thrSMA.py
|
Python
|
apache-2.0
| 5,091
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip('\n')
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(PreTrainedTokenizer):
r"""
Constructs a BertTokenizer.
:class:`~pytorch_transformers.BertTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None,
unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]",
mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs):
"""Constructs a BertTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input
Only has an effect when do_basic_tokenize=True
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization before wordpiece.
**never_split**: (`optional`) list of string
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token,
pad_token=pad_token, cls_token=cls_token,
mask_token=mask_token, **kwargs)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
@property
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def add_special_tokens_single_sentence(self, token_ids):
"""
Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
"""
return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)]
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]
"""
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return (vocab_file,)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
""" Instantiate a BertTokenizer from pre-trained vocabulary files.
"""
if pretrained_model_name_or_path in PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES:
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
return super(BertTokenizer, cls)._from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text, never_split=None):
""" Basic Tokenization of a piece of text.
Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
never_split = self.never_split + (never_split if never_split is not None else [])
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
jessevig/bertviz
|
bertviz/transformers_neuron_view/tokenization_bert.py
|
Python
|
apache-2.0
| 20,131
|
#!/usr/bin/python
__author__ = 'CGS'
import os, shutil, sys, distutils.core, subprocess
# Some configuration needed for this file
apps_directory = ""
apps = {"variants": "apps/variants"}
PRODUCTION = False
# TODO: better management of errors
# Some basic checks
if os.getuid() != 0:
sys.exit("This program requires super user privileges.")
if len(sys.argv) <= 1:
sys.exit("Please, give the name of the app you want to install. Choose among the followings: " +
str(apps.keys()))
if sys.argv[0] != "installCGSapps.py" and "/" in sys.argv[0]:
# If the script was not launch in the current directory, we have to make some modifications
tmp = sys.argv[0].split("/")
script_name = tmp.pop()
app_directory_prefix = sys.argv[0].replace("/"+script_name,"/")
else:
app_directory_prefix = ""
# We take the folder where hue is installed
try:
hue_directory = subprocess.Popen("whereis hue", stdin=False, shell=True, stdout=subprocess.PIPE)
hue_directory = str(hue_directory.communicate()[0]).split(" ")[2].strip()
except:
hue_directory = "/usr/lib/hue"
if not os.path.exists(hue_directory) and "HUE_DIRECTORY" in os.environ:
hue_directory = os.environ["HUE_DIRECTORY"]
if os.path.exists(hue_directory) and not os.path.exists(hue_directory+"/myapps"):
try:
os.makedirs(hue_directory+"/myapps")
except:
sys.exit("Impossible to create the folder 'myapps' in '"+hue_directory+"'.")
apps_directory = hue_directory + "/myapps"
# Some basic checks first
if not os.path.exists(hue_directory):
sys.exit("This installation file did not find the hue directory, please create a HUE_DIRECTORY environment"
"variable.")
# We install each application
aborted = 0
for i in xrange(1, len(sys.argv)):
app_name = sys.argv[i]
if not app_name in apps:
sys.exit("Invalid app name. Choose among the followings: "+str(apps.keys()))
if not os.path.exists(app_directory_prefix+apps[app_name]):
sys.exit("It seems the source of the app '"+app_name+"' is missing from the uncompressed zip.")
app_directory = apps_directory+"/"+app_name
"""
# We try to delete the eventual old folder
if os.path.exists(app_directory):
if PRODUCTION == True:
reinstall = raw_input("It seems the '"+app_name+"' already exists. Do you want to reinstall it [Y/n]?")
else:
reinstall = "Y"
if reinstall != "Y" and reinstall != "y":
print("Installation of '"+app_name+"' aborted.")
aborted += 1
continue
else:
try:
shutil.rmtree(app_directory)
except Exception as e:
print(e.message)
sys.exit("Impossible to delete the folder "+app_directory+". Check the access rights.")
# We create the app
# TODO: we do not catch correctly the errors of 'subprocess'
try:
print("Creating the app '"+app_name+"'...")
app_install = subprocess.Popen("cd " + apps_directory + " && " + hue_directory +
"/build/env/bin/hue create_desktop_app " + app_name,
stdin=False, shell=True, stdout=subprocess.PIPE)
app_install.communicate()
app_install = subprocess.Popen("cd " + apps_directory + " && python " + hue_directory +
"/tools/app_reg/app_reg.py --install " + app_name,
stdin=False, shell=True, stdout=subprocess.PIPE)
app_install.communicate()
except Exception as e:
print(e.message)
sys.exit("Error while creating the app...")
"""
# We copy the content of the application to the new directory
app_src = app_directory_prefix+apps[app_name]
try:
print("Copying source code to app folder...")
distutils.dir_util.copy_tree(app_src, app_directory)
except:
sys.exit("Impossible to copy data from '"+app_src+"' to '"+app_directory+"'.")
# We restart hue
try:
app_install = subprocess.Popen("service hue restart", stdin=False, shell=True, stdout=subprocess.PIPE)
app_install.communicate()
except Exception as e:
print(e.message)
sys.exit("Error while restarting hue.")
# The happy end
if aborted == 0:
print("Installation successful.")
elif aborted != len(sys.argv) - 1:
print("Installation of the 'non-aborted' apps successful.")
|
jpoullet2000/cgs-apps
|
installForTest.py
|
Python
|
apache-2.0
| 4,453
|
# Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import threading
import paramiko
from touchdown.tests.fixtures.fixture import Fixture
class DummyServer(paramiko.ServerInterface):
def get_allowed_auths(self, username):
return "publickey,password"
def check_auth_password(self, username, password):
return paramiko.AUTH_SUCCESSFUL
def check_auth_publickey(self, username, key):
return paramiko.AUTH_SUCCESSFUL
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_channel_exec_request(self, channel, command):
return True
def check_channel_shell_request(self, channel):
return True
def check_channel_pty_request(
self, channel, term, width, height, pixelwidth, pixelheight, modes
):
return True
class SshConnectionFixture(Fixture):
def __enter__(self):
self.listen_socket = socket.socket()
self.listen_socket.bind(("0.0.0.0", 0))
self.listen_socket.listen(1)
self.address, self.port = self.listen_socket.getsockname()
self.fixtures.push(lambda *exc_info: self.listen_socket.close())
self.event = threading.Event()
self.ssh_connection = self.workspace.add_ssh_connection(
name="test-ssh-connection", hostname=self.address, port=self.port
)
self.listen_thread = threading.Thread(target=self.server_thread)
self.listen_thread.daemon = True
self.listen_thread.start()
return self
def server_thread(self):
self.client_socket, addr = self.listen_socket.accept()
self.fixtures.push(lambda *exc_info: self.client_socket.close())
self.server_transport = paramiko.Transport(self.client_socket)
self.fixtures.push(lambda *exc_info: self.server_transport.close())
self.server_transport.add_server_key(
paramiko.RSAKey.from_private_key_file(
os.path.join(os.path.dirname(__file__), "..", "assets/id_rsa_test")
)
)
self.server = DummyServer()
self.server_transport.start_server(self.event, self.server)
|
yaybu/touchdown
|
touchdown/tests/fixtures/ssh_connection.py
|
Python
|
apache-2.0
| 2,710
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Classes to track metrics about cache line evictions."""
import abc
import numpy as np
from scipy import stats
from cache_replacement.policy_learning.cache_model import utils
class CacheEvictionMetric(abc.ABC):
"""Tracks the value of a metric about predictions on cache lines."""
@abc.abstractmethod
def update(self, prediction_probs, eviction_mask, oracle_scores):
"""Updates the value of the metric based on a batch of data.
Args:
prediction_probs (torch.FloatTensor): batch of probability distributions
over cache lines of shape (batch_size, num_cache_lines), each
corresponding to a cache access. In each distribution, the lines are
ordered from top-1 to top-num_cache_lines.
eviction_mask (torch.ByteTensor): of shape (batch_size), where
eviction_mask[i] = True if the i-th cache access results in an eviction.
The metric value is tracked for two populations: (i) all cache accesses
and (ii) for evictions.
oracle_scores (list[list[float]]): the oracle scores of the cache lines,
of shape (batch_size, num_cache_lines).
"""
raise NotImplementedError()
@abc.abstractmethod
def write_to_tensorboard(self, tb_writer, tb_tag, step):
"""Writes the value of the tracked metric(s) to tensorboard.
Args:
tb_writer (tf.Writer): tensorboard writer to write to.
tb_tag (str): metrics are written to tb_tag/metric_name(s).
step (int): the step to use when writing to tensorboard.
"""
raise NotImplementedError()
class SuccessRateMetric(CacheEvictionMetric):
"""Tracks the success rate of predicting the top-1 to top-k elements.
Writes the following metrics to tensorboard:
tb_tag/eviction_top_i for i = 1, ..., k
tb_tag/total_top_i for i = 1, ..., k
"""
def __init__(self, k):
"""Sets the value of k to track up to.
Args:
k (int): metric tracks top-1 to top-k.
"""
self._k = k
self._num_top_i_successes = {"total": [0] * k, "eviction": [0] * k}
self._num_accesses = {"total": 0, "eviction": 0}
def update(self, prediction_probs, eviction_mask, oracle_scores):
del oracle_scores
sorted_probs, _ = prediction_probs.sort(descending=True)
num_elems = sorted_probs.shape[-1]
for i in range(self._k):
top_i_successes = (
prediction_probs[:, 0] >= sorted_probs[:, min(i, num_elems - 1)])
self._num_top_i_successes["total"][i] += top_i_successes.sum().item()
self._num_top_i_successes["eviction"][i] += (
eviction_mask * top_i_successes).sum().item()
self._num_accesses["total"] += prediction_probs.shape[0]
self._num_accesses["eviction"] += eviction_mask.sum().item()
def write_to_tensorboard(self, tb_writer, tb_tag, step):
for key in self._num_top_i_successes:
for i in range(self._k):
top_i_success_rate = (self._num_top_i_successes[key][i] /
(self._num_accesses[key] + 1e-8))
utils.log_scalar(
tb_writer, "{}/{}_top_{}".format(tb_tag, key, i + 1),
top_i_success_rate, step)
class KendallWeightedTau(CacheEvictionMetric):
"""Tracks value of Kendall's weighted tau w.r.t. labeled ordering."""
def __init__(self):
self._weighted_taus = []
self._masks = []
def update(self, prediction_probs, eviction_mask, oracle_scores):
del oracle_scores
_, predicted_order = prediction_probs.sort(descending=True)
for unbatched_order in predicted_order.cpu().data.numpy():
# Need to negate arguments for rank: see weightedtau docs
# NOTE: This is incorporating potentially masked & padded probs
weighted_tau, _ = stats.weightedtau(
-unbatched_order, -np.array(range(len(unbatched_order))), rank=False)
self._weighted_taus.append(weighted_tau)
self._masks.extend(eviction_mask.cpu().data.numpy())
def write_to_tensorboard(self, tb_writer, tb_tag, step):
weighted_taus = np.array(self._weighted_taus)
eviction_masks = np.array(self._masks)
eviction_mean_weighted_tau = np.sum(
weighted_taus * eviction_masks) / (np.sum(eviction_masks) + 1e-8)
utils.log_scalar(
tb_writer, "{}/eviction_weighted_tau".format(tb_tag),
eviction_mean_weighted_tau, step)
utils.log_scalar(
tb_writer, "{}/total_weighted_tau".format(tb_tag),
np.mean(weighted_taus), step)
class OracleScoreGap(CacheEvictionMetric):
"""Tracks the gap between the oracle score of evicted vs. optimal line.
Given lines l_1, ..., l_N with model probabilities p_1, ..., p_N and oracle
scores s_1, ..., s_N, computes two gaps:
- the optimal line is l_1 with score s_1
- the evicted line is l_i, where i = argmax_j p_j
- the quotient gap is computed as log(s_1 / s_i)
- the difference gap is computed as log(s_i - s_1 + 1) [+1 to avoid log(0)]
- scores are typically negative reuse distances.
"""
def __init__(self):
self._optimal_scores = []
self._evicted_scores = []
self._masks = []
def update(self, prediction_probs, eviction_mask, oracle_scores):
chosen_indices = prediction_probs.argmax(-1).cpu().data.numpy()
# Default to optimal score = evicted score = 1, if no scores
# Simpler than just excluding the scores, because of the masks.
# Need to do explicit len check for numpy array
# pylint: disable=g-explicit-length-test
self._optimal_scores.append(
[scores[0] if len(scores) > 0 else 1 for scores in oracle_scores])
self._evicted_scores.append(
[scores[index] if len(scores) > 0 else 1
for (index, scores) in zip(chosen_indices, oracle_scores)])
self._masks.append(eviction_mask.cpu().data.numpy())
def write_to_tensorboard(self, tb_writer, tb_tag, step):
eviction_masks = np.array(self._masks)
difference_gap = np.log10(
np.array(self._evicted_scores) - np.array(self._optimal_scores) + 1)
quotient_gap = np.log10(
np.array(self._optimal_scores) / np.array(self._evicted_scores))
gaps = {
"difference": difference_gap,
"quotient": quotient_gap,
}
for gap_type, gap in gaps.items():
eviction_mean_gap = np.sum(
gap * eviction_masks) / (np.sum(eviction_masks) + 1e-8)
utils.log_scalar(
tb_writer, "{}/eviction_oracle_score_{}_gap".format(tb_tag, gap_type),
eviction_mean_gap, step)
utils.log_scalar(
tb_writer, "{}/oracle_score_{}_gap".format(tb_tag, gap_type),
np.mean(gap), step)
|
google-research/google-research
|
cache_replacement/policy_learning/cache_model/metric.py
|
Python
|
apache-2.0
| 7,155
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='NASFCOS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False, eps=0),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='NASFCOS_FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=dict(type='BN'),
conv_cfg=dict(type='DCNv2', deform_groups=2)),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=dict(type='GN', num_groups=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
|
open-mmlab/mmdetection
|
configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py
|
Python
|
apache-2.0
| 3,012
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from models import Project
class ProjectsTest(TestCase):
fixtures = ['test_data.json']
def test_project_listing(self):
"""
Verify that the project listing page contains all projects within the
page's context.
"""
response = self.client.get(reverse("projects:list"))
self.failUnlessEqual(response.status_code, 200)
try:
response.context['project_list']
except KeyError:
self.fail("Template context did not contain project_list object.")
for project in Project.objects.published():
self.assertTrue(project in response.context['project_list'])
def test_verify_author_detail_pages(self):
"""
Verify that each author has a detail page and that the author is
contained within the page's context.
"""
for project in Project.objects.all():
response = self.client.get(project.get_absolute_url())
if project.published():
self.assertTrue(response.status_code == 200)
try:
self.failUnlessEqual(response.context['project'], project)
except KeyError:
self.fail("Template context did not contain project object.")
else:
self.assertTrue(response.status_code == 404)
|
mazelife/django-belleville
|
belleville/projects/tests.py
|
Python
|
apache-2.0
| 1,492
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import gender_view
from google.ads.googleads.v8.services.types import gender_view_service
from .base import GenderViewServiceTransport, DEFAULT_CLIENT_INFO
class GenderViewServiceGrpcTransport(GenderViewServiceTransport):
"""gRPC backend transport for GenderViewService.
Service to manage gender views.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_gender_view(
self,
) -> Callable[
[gender_view_service.GetGenderViewRequest], gender_view.GenderView
]:
r"""Return a callable for the get gender view method over gRPC.
Returns the requested gender view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetGenderViewRequest],
~.GenderView]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_gender_view" not in self._stubs:
self._stubs["get_gender_view"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.GenderViewService/GetGenderView",
request_serializer=gender_view_service.GetGenderViewRequest.serialize,
response_deserializer=gender_view.GenderView.deserialize,
)
return self._stubs["get_gender_view"]
__all__ = ("GenderViewServiceGrpcTransport",)
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/services/gender_view_service/transports/grpc.py
|
Python
|
apache-2.0
| 10,280
|
#
# Autogenerated by Thrift Compiler (0.14.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class AdjacencyType(object):
CONJUNCTIVE = 0
DISJUNCTIVE = 1
_VALUES_TO_NAMES = {
0: "CONJUNCTIVE",
1: "DISJUNCTIVE",
}
_NAMES_TO_VALUES = {
"CONJUNCTIVE": 0,
"DISJUNCTIVE": 1,
}
class NodeType(object):
OPERATOR = 0
STAGE = 1
_VALUES_TO_NAMES = {
0: "OPERATOR",
1: "STAGE",
}
_NAMES_TO_VALUES = {
"OPERATOR": 0,
"STAGE": 1,
}
class OperatorType(object):
JOIN = 0
MAPJOIN = 1
EXTRACT = 2
FILTER = 3
FORWARD = 4
GROUPBY = 5
LIMIT = 6
SCRIPT = 7
SELECT = 8
TABLESCAN = 9
FILESINK = 10
REDUCESINK = 11
UNION = 12
UDTF = 13
LATERALVIEWJOIN = 14
LATERALVIEWFORWARD = 15
HASHTABLESINK = 16
HASHTABLEDUMMY = 17
PTF = 18
MUX = 19
DEMUX = 20
EVENT = 21
ORCFILEMERGE = 22
RCFILEMERGE = 23
MERGEJOIN = 24
SPARKPRUNINGSINK = 25
TOPNKEY = 26
_VALUES_TO_NAMES = {
0: "JOIN",
1: "MAPJOIN",
2: "EXTRACT",
3: "FILTER",
4: "FORWARD",
5: "GROUPBY",
6: "LIMIT",
7: "SCRIPT",
8: "SELECT",
9: "TABLESCAN",
10: "FILESINK",
11: "REDUCESINK",
12: "UNION",
13: "UDTF",
14: "LATERALVIEWJOIN",
15: "LATERALVIEWFORWARD",
16: "HASHTABLESINK",
17: "HASHTABLEDUMMY",
18: "PTF",
19: "MUX",
20: "DEMUX",
21: "EVENT",
22: "ORCFILEMERGE",
23: "RCFILEMERGE",
24: "MERGEJOIN",
25: "SPARKPRUNINGSINK",
26: "TOPNKEY",
}
_NAMES_TO_VALUES = {
"JOIN": 0,
"MAPJOIN": 1,
"EXTRACT": 2,
"FILTER": 3,
"FORWARD": 4,
"GROUPBY": 5,
"LIMIT": 6,
"SCRIPT": 7,
"SELECT": 8,
"TABLESCAN": 9,
"FILESINK": 10,
"REDUCESINK": 11,
"UNION": 12,
"UDTF": 13,
"LATERALVIEWJOIN": 14,
"LATERALVIEWFORWARD": 15,
"HASHTABLESINK": 16,
"HASHTABLEDUMMY": 17,
"PTF": 18,
"MUX": 19,
"DEMUX": 20,
"EVENT": 21,
"ORCFILEMERGE": 22,
"RCFILEMERGE": 23,
"MERGEJOIN": 24,
"SPARKPRUNINGSINK": 25,
"TOPNKEY": 26,
}
class TaskType(object):
MAP = 0
REDUCE = 1
OTHER = 2
_VALUES_TO_NAMES = {
0: "MAP",
1: "REDUCE",
2: "OTHER",
}
_NAMES_TO_VALUES = {
"MAP": 0,
"REDUCE": 1,
"OTHER": 2,
}
class StageType(object):
CONDITIONAL = 0
COPY = 1
DDL = 2
MAPRED = 3
EXPLAIN = 4
FETCH = 5
FUNC = 6
MAPREDLOCAL = 7
MOVE = 8
STATS = 9
DEPENDENCY_COLLECTION = 10
COLUMNSTATS = 11
REPL_DUMP = 12
REPL_BOOTSTRAP_LOAD = 13
REPL_STATE_LOG = 14
REPL_TXN = 15
REPL_INCREMENTAL_LOAD = 16
SCHEDULED_QUERY_MAINT = 17
ACK = 18
RANGER_DUMP = 19
RANGER_LOAD = 20
ATLAS_DUMP = 21
ATLAS_LOAD = 22
_VALUES_TO_NAMES = {
0: "CONDITIONAL",
1: "COPY",
2: "DDL",
3: "MAPRED",
4: "EXPLAIN",
5: "FETCH",
6: "FUNC",
7: "MAPREDLOCAL",
8: "MOVE",
9: "STATS",
10: "DEPENDENCY_COLLECTION",
11: "COLUMNSTATS",
12: "REPL_DUMP",
13: "REPL_BOOTSTRAP_LOAD",
14: "REPL_STATE_LOG",
15: "REPL_TXN",
16: "REPL_INCREMENTAL_LOAD",
17: "SCHEDULED_QUERY_MAINT",
18: "ACK",
19: "RANGER_DUMP",
20: "RANGER_LOAD",
21: "ATLAS_DUMP",
22: "ATLAS_LOAD",
}
_NAMES_TO_VALUES = {
"CONDITIONAL": 0,
"COPY": 1,
"DDL": 2,
"MAPRED": 3,
"EXPLAIN": 4,
"FETCH": 5,
"FUNC": 6,
"MAPREDLOCAL": 7,
"MOVE": 8,
"STATS": 9,
"DEPENDENCY_COLLECTION": 10,
"COLUMNSTATS": 11,
"REPL_DUMP": 12,
"REPL_BOOTSTRAP_LOAD": 13,
"REPL_STATE_LOG": 14,
"REPL_TXN": 15,
"REPL_INCREMENTAL_LOAD": 16,
"SCHEDULED_QUERY_MAINT": 17,
"ACK": 18,
"RANGER_DUMP": 19,
"RANGER_LOAD": 20,
"ATLAS_DUMP": 21,
"ATLAS_LOAD": 22,
}
class Adjacency(object):
"""
Attributes:
- node
- children
- adjacencyType
"""
def __init__(self, node=None, children=None, adjacencyType=None,):
self.node = node
self.children = children
self.adjacencyType = adjacencyType
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.node = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.children = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.children.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.adjacencyType = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Adjacency')
if self.node is not None:
oprot.writeFieldBegin('node', TType.STRING, 1)
oprot.writeString(self.node.encode('utf-8') if sys.version_info[0] == 2 else self.node)
oprot.writeFieldEnd()
if self.children is not None:
oprot.writeFieldBegin('children', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.children))
for iter6 in self.children:
oprot.writeString(iter6.encode('utf-8') if sys.version_info[0] == 2 else iter6)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.adjacencyType is not None:
oprot.writeFieldBegin('adjacencyType', TType.I32, 3)
oprot.writeI32(self.adjacencyType)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Graph(object):
"""
Attributes:
- nodeType
- roots
- adjacencyList
"""
def __init__(self, nodeType=None, roots=None, adjacencyList=None,):
self.nodeType = nodeType
self.roots = roots
self.adjacencyList = adjacencyList
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.nodeType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.roots = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.roots.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.adjacencyList = []
(_etype16, _size13) = iprot.readListBegin()
for _i17 in range(_size13):
_elem18 = Adjacency()
_elem18.read(iprot)
self.adjacencyList.append(_elem18)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Graph')
if self.nodeType is not None:
oprot.writeFieldBegin('nodeType', TType.I32, 1)
oprot.writeI32(self.nodeType)
oprot.writeFieldEnd()
if self.roots is not None:
oprot.writeFieldBegin('roots', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.roots))
for iter19 in self.roots:
oprot.writeString(iter19.encode('utf-8') if sys.version_info[0] == 2 else iter19)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.adjacencyList is not None:
oprot.writeFieldBegin('adjacencyList', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.adjacencyList))
for iter20 in self.adjacencyList:
iter20.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Operator(object):
"""
Attributes:
- operatorId
- operatorType
- operatorAttributes
- operatorCounters
- done
- started
"""
def __init__(self, operatorId=None, operatorType=None, operatorAttributes=None, operatorCounters=None, done=None, started=None,):
self.operatorId = operatorId
self.operatorType = operatorType
self.operatorAttributes = operatorAttributes
self.operatorCounters = operatorCounters
self.done = done
self.started = started
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.operatorId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.operatorType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.operatorAttributes = {}
(_ktype22, _vtype23, _size21) = iprot.readMapBegin()
for _i25 in range(_size21):
_key26 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val27 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.operatorAttributes[_key26] = _val27
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.operatorCounters = {}
(_ktype29, _vtype30, _size28) = iprot.readMapBegin()
for _i32 in range(_size28):
_key33 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val34 = iprot.readI64()
self.operatorCounters[_key33] = _val34
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.done = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.started = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Operator')
if self.operatorId is not None:
oprot.writeFieldBegin('operatorId', TType.STRING, 1)
oprot.writeString(self.operatorId.encode('utf-8') if sys.version_info[0] == 2 else self.operatorId)
oprot.writeFieldEnd()
if self.operatorType is not None:
oprot.writeFieldBegin('operatorType', TType.I32, 2)
oprot.writeI32(self.operatorType)
oprot.writeFieldEnd()
if self.operatorAttributes is not None:
oprot.writeFieldBegin('operatorAttributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.operatorAttributes))
for kiter35, viter36 in self.operatorAttributes.items():
oprot.writeString(kiter35.encode('utf-8') if sys.version_info[0] == 2 else kiter35)
oprot.writeString(viter36.encode('utf-8') if sys.version_info[0] == 2 else viter36)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.operatorCounters is not None:
oprot.writeFieldBegin('operatorCounters', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.I64, len(self.operatorCounters))
for kiter37, viter38 in self.operatorCounters.items():
oprot.writeString(kiter37.encode('utf-8') if sys.version_info[0] == 2 else kiter37)
oprot.writeI64(viter38)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.done is not None:
oprot.writeFieldBegin('done', TType.BOOL, 5)
oprot.writeBool(self.done)
oprot.writeFieldEnd()
if self.started is not None:
oprot.writeFieldBegin('started', TType.BOOL, 6)
oprot.writeBool(self.started)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Task(object):
"""
Attributes:
- taskId
- taskType
- taskAttributes
- taskCounters
- operatorGraph
- operatorList
- done
- started
"""
def __init__(self, taskId=None, taskType=None, taskAttributes=None, taskCounters=None, operatorGraph=None, operatorList=None, done=None, started=None,):
self.taskId = taskId
self.taskType = taskType
self.taskAttributes = taskAttributes
self.taskCounters = taskCounters
self.operatorGraph = operatorGraph
self.operatorList = operatorList
self.done = done
self.started = started
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.taskId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.taskType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.taskAttributes = {}
(_ktype40, _vtype41, _size39) = iprot.readMapBegin()
for _i43 in range(_size39):
_key44 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val45 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.taskAttributes[_key44] = _val45
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.taskCounters = {}
(_ktype47, _vtype48, _size46) = iprot.readMapBegin()
for _i50 in range(_size46):
_key51 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val52 = iprot.readI64()
self.taskCounters[_key51] = _val52
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.operatorGraph = Graph()
self.operatorGraph.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.operatorList = []
(_etype56, _size53) = iprot.readListBegin()
for _i57 in range(_size53):
_elem58 = Operator()
_elem58.read(iprot)
self.operatorList.append(_elem58)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.done = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.started = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Task')
if self.taskId is not None:
oprot.writeFieldBegin('taskId', TType.STRING, 1)
oprot.writeString(self.taskId.encode('utf-8') if sys.version_info[0] == 2 else self.taskId)
oprot.writeFieldEnd()
if self.taskType is not None:
oprot.writeFieldBegin('taskType', TType.I32, 2)
oprot.writeI32(self.taskType)
oprot.writeFieldEnd()
if self.taskAttributes is not None:
oprot.writeFieldBegin('taskAttributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.taskAttributes))
for kiter59, viter60 in self.taskAttributes.items():
oprot.writeString(kiter59.encode('utf-8') if sys.version_info[0] == 2 else kiter59)
oprot.writeString(viter60.encode('utf-8') if sys.version_info[0] == 2 else viter60)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.taskCounters is not None:
oprot.writeFieldBegin('taskCounters', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.I64, len(self.taskCounters))
for kiter61, viter62 in self.taskCounters.items():
oprot.writeString(kiter61.encode('utf-8') if sys.version_info[0] == 2 else kiter61)
oprot.writeI64(viter62)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.operatorGraph is not None:
oprot.writeFieldBegin('operatorGraph', TType.STRUCT, 5)
self.operatorGraph.write(oprot)
oprot.writeFieldEnd()
if self.operatorList is not None:
oprot.writeFieldBegin('operatorList', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.operatorList))
for iter63 in self.operatorList:
iter63.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.done is not None:
oprot.writeFieldBegin('done', TType.BOOL, 7)
oprot.writeBool(self.done)
oprot.writeFieldEnd()
if self.started is not None:
oprot.writeFieldBegin('started', TType.BOOL, 8)
oprot.writeBool(self.started)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Stage(object):
"""
Attributes:
- stageId
- stageType
- stageAttributes
- stageCounters
- taskList
- done
- started
"""
def __init__(self, stageId=None, stageType=None, stageAttributes=None, stageCounters=None, taskList=None, done=None, started=None,):
self.stageId = stageId
self.stageType = stageType
self.stageAttributes = stageAttributes
self.stageCounters = stageCounters
self.taskList = taskList
self.done = done
self.started = started
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.stageId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.stageType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.stageAttributes = {}
(_ktype65, _vtype66, _size64) = iprot.readMapBegin()
for _i68 in range(_size64):
_key69 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val70 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.stageAttributes[_key69] = _val70
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.stageCounters = {}
(_ktype72, _vtype73, _size71) = iprot.readMapBegin()
for _i75 in range(_size71):
_key76 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val77 = iprot.readI64()
self.stageCounters[_key76] = _val77
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.taskList = []
(_etype81, _size78) = iprot.readListBegin()
for _i82 in range(_size78):
_elem83 = Task()
_elem83.read(iprot)
self.taskList.append(_elem83)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.done = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.started = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Stage')
if self.stageId is not None:
oprot.writeFieldBegin('stageId', TType.STRING, 1)
oprot.writeString(self.stageId.encode('utf-8') if sys.version_info[0] == 2 else self.stageId)
oprot.writeFieldEnd()
if self.stageType is not None:
oprot.writeFieldBegin('stageType', TType.I32, 2)
oprot.writeI32(self.stageType)
oprot.writeFieldEnd()
if self.stageAttributes is not None:
oprot.writeFieldBegin('stageAttributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.stageAttributes))
for kiter84, viter85 in self.stageAttributes.items():
oprot.writeString(kiter84.encode('utf-8') if sys.version_info[0] == 2 else kiter84)
oprot.writeString(viter85.encode('utf-8') if sys.version_info[0] == 2 else viter85)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.stageCounters is not None:
oprot.writeFieldBegin('stageCounters', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.I64, len(self.stageCounters))
for kiter86, viter87 in self.stageCounters.items():
oprot.writeString(kiter86.encode('utf-8') if sys.version_info[0] == 2 else kiter86)
oprot.writeI64(viter87)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.taskList is not None:
oprot.writeFieldBegin('taskList', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.taskList))
for iter88 in self.taskList:
iter88.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.done is not None:
oprot.writeFieldBegin('done', TType.BOOL, 6)
oprot.writeBool(self.done)
oprot.writeFieldEnd()
if self.started is not None:
oprot.writeFieldBegin('started', TType.BOOL, 7)
oprot.writeBool(self.started)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Query(object):
"""
Attributes:
- queryId
- queryType
- queryAttributes
- queryCounters
- stageGraph
- stageList
- done
- started
"""
def __init__(self, queryId=None, queryType=None, queryAttributes=None, queryCounters=None, stageGraph=None, stageList=None, done=None, started=None,):
self.queryId = queryId
self.queryType = queryType
self.queryAttributes = queryAttributes
self.queryCounters = queryCounters
self.stageGraph = stageGraph
self.stageList = stageList
self.done = done
self.started = started
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.queryId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.queryType = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.queryAttributes = {}
(_ktype90, _vtype91, _size89) = iprot.readMapBegin()
for _i93 in range(_size89):
_key94 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val95 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.queryAttributes[_key94] = _val95
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.queryCounters = {}
(_ktype97, _vtype98, _size96) = iprot.readMapBegin()
for _i100 in range(_size96):
_key101 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val102 = iprot.readI64()
self.queryCounters[_key101] = _val102
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.stageGraph = Graph()
self.stageGraph.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.stageList = []
(_etype106, _size103) = iprot.readListBegin()
for _i107 in range(_size103):
_elem108 = Stage()
_elem108.read(iprot)
self.stageList.append(_elem108)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.done = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.started = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Query')
if self.queryId is not None:
oprot.writeFieldBegin('queryId', TType.STRING, 1)
oprot.writeString(self.queryId.encode('utf-8') if sys.version_info[0] == 2 else self.queryId)
oprot.writeFieldEnd()
if self.queryType is not None:
oprot.writeFieldBegin('queryType', TType.STRING, 2)
oprot.writeString(self.queryType.encode('utf-8') if sys.version_info[0] == 2 else self.queryType)
oprot.writeFieldEnd()
if self.queryAttributes is not None:
oprot.writeFieldBegin('queryAttributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.queryAttributes))
for kiter109, viter110 in self.queryAttributes.items():
oprot.writeString(kiter109.encode('utf-8') if sys.version_info[0] == 2 else kiter109)
oprot.writeString(viter110.encode('utf-8') if sys.version_info[0] == 2 else viter110)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.queryCounters is not None:
oprot.writeFieldBegin('queryCounters', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.I64, len(self.queryCounters))
for kiter111, viter112 in self.queryCounters.items():
oprot.writeString(kiter111.encode('utf-8') if sys.version_info[0] == 2 else kiter111)
oprot.writeI64(viter112)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.stageGraph is not None:
oprot.writeFieldBegin('stageGraph', TType.STRUCT, 5)
self.stageGraph.write(oprot)
oprot.writeFieldEnd()
if self.stageList is not None:
oprot.writeFieldBegin('stageList', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.stageList))
for iter113 in self.stageList:
iter113.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.done is not None:
oprot.writeFieldBegin('done', TType.BOOL, 7)
oprot.writeBool(self.done)
oprot.writeFieldEnd()
if self.started is not None:
oprot.writeFieldBegin('started', TType.BOOL, 8)
oprot.writeBool(self.started)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class QueryPlan(object):
"""
Attributes:
- queries
- done
- started
"""
def __init__(self, queries=None, done=None, started=None,):
self.queries = queries
self.done = done
self.started = started
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.queries = []
(_etype117, _size114) = iprot.readListBegin()
for _i118 in range(_size114):
_elem119 = Query()
_elem119.read(iprot)
self.queries.append(_elem119)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.done = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.started = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('QueryPlan')
if self.queries is not None:
oprot.writeFieldBegin('queries', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.queries))
for iter120 in self.queries:
iter120.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.done is not None:
oprot.writeFieldBegin('done', TType.BOOL, 2)
oprot.writeBool(self.done)
oprot.writeFieldEnd()
if self.started is not None:
oprot.writeFieldBegin('started', TType.BOOL, 3)
oprot.writeBool(self.started)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(Adjacency)
Adjacency.thrift_spec = (
None, # 0
(1, TType.STRING, 'node', 'UTF8', None, ), # 1
(2, TType.LIST, 'children', (TType.STRING, 'UTF8', False), None, ), # 2
(3, TType.I32, 'adjacencyType', None, None, ), # 3
)
all_structs.append(Graph)
Graph.thrift_spec = (
None, # 0
(1, TType.I32, 'nodeType', None, None, ), # 1
(2, TType.LIST, 'roots', (TType.STRING, 'UTF8', False), None, ), # 2
(3, TType.LIST, 'adjacencyList', (TType.STRUCT, [Adjacency, None], False), None, ), # 3
)
all_structs.append(Operator)
Operator.thrift_spec = (
None, # 0
(1, TType.STRING, 'operatorId', 'UTF8', None, ), # 1
(2, TType.I32, 'operatorType', None, None, ), # 2
(3, TType.MAP, 'operatorAttributes', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
(4, TType.MAP, 'operatorCounters', (TType.STRING, 'UTF8', TType.I64, None, False), None, ), # 4
(5, TType.BOOL, 'done', None, None, ), # 5
(6, TType.BOOL, 'started', None, None, ), # 6
)
all_structs.append(Task)
Task.thrift_spec = (
None, # 0
(1, TType.STRING, 'taskId', 'UTF8', None, ), # 1
(2, TType.I32, 'taskType', None, None, ), # 2
(3, TType.MAP, 'taskAttributes', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
(4, TType.MAP, 'taskCounters', (TType.STRING, 'UTF8', TType.I64, None, False), None, ), # 4
(5, TType.STRUCT, 'operatorGraph', [Graph, None], None, ), # 5
(6, TType.LIST, 'operatorList', (TType.STRUCT, [Operator, None], False), None, ), # 6
(7, TType.BOOL, 'done', None, None, ), # 7
(8, TType.BOOL, 'started', None, None, ), # 8
)
all_structs.append(Stage)
Stage.thrift_spec = (
None, # 0
(1, TType.STRING, 'stageId', 'UTF8', None, ), # 1
(2, TType.I32, 'stageType', None, None, ), # 2
(3, TType.MAP, 'stageAttributes', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
(4, TType.MAP, 'stageCounters', (TType.STRING, 'UTF8', TType.I64, None, False), None, ), # 4
(5, TType.LIST, 'taskList', (TType.STRUCT, [Task, None], False), None, ), # 5
(6, TType.BOOL, 'done', None, None, ), # 6
(7, TType.BOOL, 'started', None, None, ), # 7
)
all_structs.append(Query)
Query.thrift_spec = (
None, # 0
(1, TType.STRING, 'queryId', 'UTF8', None, ), # 1
(2, TType.STRING, 'queryType', 'UTF8', None, ), # 2
(3, TType.MAP, 'queryAttributes', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
(4, TType.MAP, 'queryCounters', (TType.STRING, 'UTF8', TType.I64, None, False), None, ), # 4
(5, TType.STRUCT, 'stageGraph', [Graph, None], None, ), # 5
(6, TType.LIST, 'stageList', (TType.STRUCT, [Stage, None], False), None, ), # 6
(7, TType.BOOL, 'done', None, None, ), # 7
(8, TType.BOOL, 'started', None, None, ), # 8
)
all_structs.append(QueryPlan)
QueryPlan.thrift_spec = (
None, # 0
(1, TType.LIST, 'queries', (TType.STRUCT, [Query, None], False), None, ), # 1
(2, TType.BOOL, 'done', None, None, ), # 2
(3, TType.BOOL, 'started', None, None, ), # 3
)
fix_spec(all_structs)
del all_structs
|
jcamachor/hive
|
ql/src/gen/thrift/gen-py/queryplan/ttypes.py
|
Python
|
apache-2.0
| 43,635
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from platformio.commands.device import DeviceMonitorFilter
class Timestamp(DeviceMonitorFilter):
NAME = "time"
def __init__(self, *args, **kwargs):
super(Timestamp, self).__init__(*args, **kwargs)
self._line_started = False
def rx(self, text):
if self._line_started and "\n" not in text:
return text
timestamp = datetime.now().strftime("%H:%M:%S.%f")[:-3]
if not self._line_started:
self._line_started = True
text = "%s > %s" % (timestamp, text)
if text.endswith("\n"):
self._line_started = False
return text[:-1].replace("\n", "\n%s > " % timestamp) + "\n"
return text.replace("\n", "\n%s > " % timestamp)
|
platformio/platformio-core
|
platformio/commands/device/filters/time.py
|
Python
|
apache-2.0
| 1,381
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
if count < 10:
return "Number of donuts: "+str(count)
else:
return "Number of donuts: many"
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
if len(s) < 2:
return ""
else:
return s[:2]+s[-2:]
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
h = s[0]
t = s.replace(h,"*")
return h+t[1:]
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
ah = a[:2]
bh = b[:2]
return bh+a[2:]+" "+ah+b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
|
chinbat/google-python-exercises
|
basic/string1.py
|
Python
|
apache-2.0
| 3,640
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
#
# PART 1. how IronPython choose the CLI method, treat parameters WHEN NO OVERLOADS PRESENT
#
from iptest.assert_util import *
from iptest.type_util import *
skiptest("win32")
load_iron_python_test()
from IronPythonTest.BinderTest import *
myint1, myint2 = myint(20), myint(-20)
mylong1, mylong2 = mylong(3), mylong(-4)
myfloat1, myfloat2 = myfloat(4.5), myfloat(-4.5)
mycomplex1 = mycomplex(3)
funcs = '''
M100 M201 M202 M203 M204 M205 M301 M302 M303 M304
M310 M311 M312 M313 M320 M321 M400 M401 M402 M403
M404 M410 M411 M450 M451
M500 M510 M600 M610 M611 M620 M630
M650 M651 M652 M653
M680 M700 M701
M710 M715
'''.split()
args = '''
NoArg Int32 Double BigInt Bool String SByte Int16 Int64 Single
Byte UInt16 UInt32 UInt64 Char Decml Object I C1 C2
S1 A C6 E1 E2
ArrInt32 ArrI ParamArrInt32 ParamArrI ParamArrS Int32ParamArrInt32 IParamArrI
IListInt Array IEnumerableInt IEnumeratorInt
NullableInt RefInt32 OutInt32
DefValInt32 Int32DefValInt32
'''.split()
arg2func = dict(list(zip(args, funcs)))
func2arg = dict(list(zip(funcs, args)))
TypeE = TypeError
OverF = OverflowError
def _get_funcs(args): return [arg2func[x] for x in args.split()]
def _self_defined_method(name): return len(name) == 4 and name[0] == "M"
def _my_call(func, arg):
if isinstance(arg, tuple):
l = len(arg)
# by purpose
if l == 0: func()
elif l == 1: func(arg[0])
elif l == 2: func(arg[0], arg[1])
elif l == 3: func(arg[0], arg[1], arg[2])
elif l == 4: func(arg[0], arg[1], arg[2], arg[3])
elif l == 5: func(arg[0], arg[1], arg[2], arg[3], arg[4])
elif l == 6: func(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5])
else: func(*arg)
else:
func(arg)
def _helper(func, positiveArgs, flagValue, negativeArgs, exceptType):
for arg in positiveArgs:
try:
_my_call(func, arg)
except Exception as e:
Fail("unexpected exception %s when calling %s with %s\n%s" % (e, func, arg, func.__doc__))
else:
AreEqual(Flag.Value, flagValue)
Flag.Value = -188
for arg in negativeArgs:
try:
_my_call(func, arg)
except Exception as e:
if not isinstance(e, exceptType):
Fail("expected '%s', but got '%s' when calling %s with %s\n%s" % (exceptType, e, func, arg, func.__doc__))
else:
Fail("expected exception (but didn't get one) when calling func %s on args %s\n%s" % (func, arg, func.__doc__))
def test_this_matrix():
'''
This will test the full matrix.
To print the matrix, enable the following flag
'''
print_the_matrix = False
funcnames = "M201 M680 M202 M203 M204 M205 M301 M302 M303 M304 M310 M311 M312 M313 M320 M321 M400".split()
matrix = (
#### M201 M680 M202 M203 M204 M205 M301 M302 M303 M304 M310 M311 M312 M313 M320 M321 M400
#### int int? double bigint bool str sbyte i16 i64 single byte ui16 ui32 ui64 char decm obj
( "SByteMax", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "ByteMax", True, True, True, True, True, TypeE, OverF, True, True, True, True, True, True, True, TypeE, True, True, ),
( "Int16Max", True, True, True, True, True, TypeE, OverF, True, True, True, OverF, True, True, True, TypeE, True, True, ),
( "UInt16Max", True, True, True, True, True, TypeE, OverF, OverF, True, True, OverF, True, True, True, TypeE, True, True, ),
( "intMax", True, True, True, True, True, TypeE, OverF, OverF, True, True, OverF, OverF, True, True, TypeE, True, True, ),
( "UInt32Max", OverF, OverF, True, True, True, TypeE, OverF, OverF, True, True, OverF, OverF, True, True, TypeE, True, True, ),
( "Int64Max", OverF, OverF, True, True, True, TypeE, OverF, OverF, True, True, OverF, OverF, OverF, True, TypeE, True, True, ),
( "UInt64Max", OverF, OverF, True, True, True, TypeE, OverF, OverF, OverF, True, OverF, OverF, OverF, True, TypeE, True, True, ),
( "DecimalMax", OverF, OverF, True, True, True, TypeE, OverF, OverF, OverF, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "SingleMax", OverF, OverF, True, True, True, TypeE, OverF, OverF, OverF, True, OverF, OverF, OverF, OverF, TypeE, OverF, True, ),
( "floatMax", OverF, OverF, True, True, True, TypeE, OverF, OverF, OverF, True, OverF, OverF, OverF, OverF, TypeE, OverF, True, ),
#### M201 M680 M202 M203 M204 M205 M301 M302 M303 M304 M310 M311 M312 M313 M320 M321 M400
#### int int? double bigint bool str sbyte i16 i64 single byte ui16 ui32 ui64 char decm obj
( "SByteMin", True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "ByteMin", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "Int16Min", True, True, True, True, True, TypeE, OverF, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "UInt16Min", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "intMin", True, True, True, True, True, TypeE, OverF, OverF, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "UInt32Min", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "Int64Min", OverF, OverF, True, True, True, TypeE, OverF, OverF, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "UInt64Min", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "DecimalMin", OverF, OverF, True , True, True, TypeE, OverF, OverF, OverF, True, OverF, OverF, OverF, OverF, TypeE, True , True, ),
( "SingleMin", OverF, OverF, True, True, True, TypeE, OverF, OverF, OverF, True, OverF, OverF, OverF, OverF, TypeE, OverF, True, ),
( "floatMin", OverF, OverF, True, True, True, TypeE, OverF, OverF, OverF, True, OverF, OverF, OverF, OverF, TypeE, OverF, True, ),
#### M201 M680 M202 M203 M204 M205 M301 M302 M303 M304 M310 M311 M312 M313 M320 M321 M400
#### int int? double bigint bool str sbyte i16 i64 single byte ui16 ui32 ui64 char decm obj
( "SBytePlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "BytePlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "Int16PlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "UInt16PlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "intPlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( myint1, True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "UInt32PlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "Int64PlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "UInt64PlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "DecimalPlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "SinglePlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( "floatPlusOne", True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( myfloat1, True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
#### M201 M680 M202 M203 M204 M205 M301 M302 M303 M304 M310 M311 M312 M313 M320 M321 M400
#### int int? double bigint bool str sbyte i16 i64 single byte ui16 ui32 ui64 char decm obj
( "SByteMinusOne", True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "Int16MinusOne", True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "intMinusOne", True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( myint2, True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "Int64MinusOne", True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "DecimalMinusOne", True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "SingleMinusOne", True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( "floatMinusOne", True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( myfloat2, True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
################################################## pass in bool #########################################################
#### M201 M680 M202 M203 M204 M205 M301 M302 M303 M304 M310 M311 M312 M313 M320 M321 M400
#### int int? double bigint bool str sbyte i16 i64 single byte ui16 ui32 ui64 char decm obj
( True, True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( False, True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
################################################## pass in BigInt #########################################################
#### int int? double bigint bool str sbyte i16 i64 single byte ui16 ui32 ui64 char decm obj
#### M201 M680 M202 M203 M204 M205 M301 M302 M303 M304 M310 M311 M312 M313 M320 M321 M400
( 10, True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( -10, True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
( 1234567890123456, OverF, OverF, True , True, True, TypeE, OverF, OverF, True, True, OverF, OverF, OverF, True, TypeE, True, True, ),
( mylong1, True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ),
( mylong2, True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ),
################################################## pass in Complex #########################################################
#### M201 M680 M202 M203 M204 M205 M301 M302 M303 M304 M310 M311 M312 M313 M320 M321 M400
#### int int? double bigint bool str sbyte i16 i64 single byte ui16 ui32 ui64 char decm obj
( (3+0j), TypeE, TypeE, TypeE, TypeE, True, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, True, ),
( (3+1j), TypeE, TypeE, TypeE, TypeE, True, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, True, ),
( mycomplex1, TypeE, TypeE, TypeE, TypeE, True, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, True, )
)
if is_silverlight==False:
InvariantCulture = System.Globalization.CultureInfo.InvariantCulture
matrix = list(matrix)
################################################## pass in char #########################################################
#### M201 M680 M202 M203 M204 M205 M301 M302 M303 M304 M310 M311 M312 M313 M320 M321 M400
#### int int? double bigint bool str sbyte i16 i64 single byte ui16 ui32 ui64 char decm obj
matrix.append((System.Char.Parse('A'), TypeE, TypeE, TypeE, TypeE, True, True, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, TypeE, True, True, True, ))
################################################## pass in float #########################################################
#### single/double becomes Int32, but this does not apply to other primitive types
#### int int? double bigint bool str sbyte i16 i64 single byte ui16 ui32 ui64 char decm obj
matrix.append((System.Single.Parse("8.01", InvariantCulture), True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ))
matrix.append((System.Double.Parse("10.2", InvariantCulture), True, True, True, True, True, TypeE, True, True, True, True, True, True, True, True, TypeE, True, True, ))
matrix.append((System.Single.Parse("-8.1", InvariantCulture), True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ))
matrix.append((System.Double.Parse("-1.8", InvariantCulture), True, True, True, True, True, TypeE, True, True, True, True, OverF, OverF, OverF, OverF, TypeE, True, True, ))
matrix = tuple(matrix)
for scenario in matrix:
if isinstance(scenario[0], str):
value = clr_numbers[scenario[0]]
if print_the_matrix: print('(%18s,' % ('"'+ scenario[0] +'"'), end=' ')
else:
value = scenario[0]
if print_the_matrix: print('(%18s,' % value, end=' ')
for i in range(len(funcnames)):
funcname = funcnames[i]
func = getattr(target, funcname)
if print_the_matrix:
try:
func(value)
print("True, ", end=' ')
except TypeError:
print("TypeE,", end=' ')
except OverflowError:
print("OverF,", end=' ')
print("),")
else:
try:
func(value)
except Exception as e:
if scenario[i+1] not in [TypeE, OverF]:
Fail("unexpected exception %s, when func %s on arg %s (%s)\n%s" % (e, funcname, scenario[0], type(value), func.__doc__))
if isinstance(e, scenario[i+1]): pass
else: Fail("expect %s, but got %s when func %s on arg %s (%s)\n%s" % (scenario[i+1], e, funcname, scenario[0], type(value), func.__doc__))
else:
if scenario[i+1] in [TypeE, OverF]:
Fail("expect %s, but got none when func %s on arg %s (%s)\n%s" % (scenario[i+1], funcname, scenario[0], type(value), func.__doc__))
left = Flag.Value ; Flag.Value = -99 # reset
right = int(funcname[1:])
if left != right:
Fail("left %s != right %s when func %s on arg %s (%s)\n%s" % (left, right, funcname, scenario[0], type(value), func.__doc__))
# these funcs should behavior same as M201(Int32)
# should have NullableInt too ?
for funcname in _get_funcs('RefInt32 ParamArrInt32 Int32ParamArrInt32'):
for scenario in matrix:
if isinstance(scenario[0], str): value = clr_numbers[scenario[0]]
else: value = scenario[0]
func = getattr(target, funcname)
if scenario[1] not in [TypeE, OverF]:
func(value)
left = Flag.Value
right = int(funcname[1:])
if left != right:
Fail("left %s != right %s when func %s on arg %s" % (left, right, funcname, scenario[0]))
Flag.Value = -99 # reset
else:
try: func(value)
except scenario[1]: pass # 1 is M201
else: Fail("expect %s, but got none when func %s on arg %s" % (scenario[1], funcname, scenario[0]))
def test_char_string_asked():
# char asked
_helper(target.M320, ['a', System.Char.MaxValue, System.Char.MinValue, 'abc'[2]], 320, ['abc', ('a b')], TypeError)
# string asked
_helper(target.M205, ['a', System.Char.MaxValue, System.Char.MinValue, 'abc'[2], 'abc', 'a b' ], 205, [('a', 'b'), 23, ], TypeError)
def test_pass_extensible_types():
# number covered by that matrix
# string or char
mystr1, mystr2 = mystr('a'), mystr('abc')
_helper(target.M205, [mystr1, mystr2, ], 205, [], TypeError) # String
_helper(target.M320, [mystr1, ], 320, [mystr2, ], TypeError) # Char
# check the bool conversion result
def test_bool_asked():
for arg in ['a', 3, object(), True]:
target.M204(arg)
Assert(Flag.BValue, "argument is %s" % arg)
Flag.BValue = False
if is_silverlight==False:
for arg in [0, System.Byte.Parse('0'), System.UInt64.Parse('0'), 0.0, 0, False, None, tuple(), list()]:
target.M204(arg)
Assert(not Flag.BValue, "argument is %s" % (arg,))
Flag.BValue = True
def test_user_defined_conversion():
class CP1:
def __int__(self): return 100
class CP2(object):
def __int__(self): return 99
class CP3: pass
cp1, cp2, cp3 = CP1(), CP2(), CP3()
### 1. not work for Nullable<Int32> required (?)
### 2. (out int): should pass in nothing
### int params int int? ref int defVal int+defVal
works = 'M201 M600 M680 M620 M700 M710 M715'
for fn in works.split():
_helper(getattr(target, fn), [cp1, cp2, ], int(fn[1:]), [cp3, ], TypeError)
for fn in dir(target):
### bool obj
if _self_defined_method(fn) and fn not in (works + 'M204 M400 '):
_helper(getattr(target, fn), [], 0, [cp1, cp2, cp3, ], TypeError)
def test_pass_in_derived_python_types():
class CP1(I): pass
class CP2(C1): pass
class CP3(C2): pass
class CP4(C6, I): pass
cp1, cp2, cp3, cp4 = CP1(), CP2(), CP3(), CP4()
# I asked
_helper(target.M401, [C1(), C2(), S1(), cp1, cp2, cp3, cp4,], 401,[C3(), object()], TypeError)
# C2 asked
_helper(target.M403, [C2(), cp3, ], 403, [C3(), object(), C1(), cp1, cp2, cp4, ], TypeError)
class CP1(A): pass
class CP2(C6): pass
cp1, cp2 = CP1(), CP2()
# A asked
_helper(target.M410, [C6(), cp1, cp2, cp4,], 410, [C3(), object(), C1(), cp3, ], TypeError)
# C6 asked
_helper(target.M411, [C6(), cp2, cp4, ], 411, [C3(), object(), C1(), cp1, cp3,], TypeError)
def test_nullable_int():
_helper(target.M680, [None, 100, 100, System.Byte.MaxValue, System.UInt32.MinValue, myint1, mylong2, 3.6, ], 680, [(), 3+1j], TypeError)
def test_out_int():
if is_silverlight==False:
_helper(target.M701, [], 701, [1, 10, None, System.Byte.Parse('3')], TypeError) # not allow to pass in anything
def test_collections():
arrayInt = array_int((10, 20))
tupleInt = ((10, 20), )
listInt = ([10, 20], )
tupleBool = ((True, False, True, True, False), )
tupleLong1, tupleLong2 = ((10, 20), ), ((System.Int64.MaxValue, System.Int32.MaxValue * 2),)
arrayByte = array_byte((10, 20))
arrayObj = array_object(['str', 10])
# IList<int>
_helper(target.M650, [arrayInt, tupleInt, listInt, arrayObj, tupleLong1, tupleLong2, ], 650, [arrayByte, ], TypeError)
# arrayObj, tupleLong1, tupleLong2 : conversion happens late
# Array
_helper(target.M651, [arrayInt, arrayObj, arrayByte, ], 651, [listInt, tupleInt, tupleLong1, tupleLong2, ], TypeError)
# IEnumerable[int]
_helper(target.M652, [arrayInt, arrayObj, arrayByte, listInt, tupleInt, tupleLong1, tupleLong2, ], 652, [], TypeError)
# IEnumerator[int]
_helper(target.M653, [], 653, [arrayInt, arrayObj, arrayByte, listInt, tupleInt, tupleLong1, tupleLong2, ], TypeError)
# Int32[]
_helper(target.M500, [arrayInt, tupleInt, tupleLong1, tupleBool, ], 500, [listInt, arrayByte, arrayObj, ], TypeError)
_helper(target.M500, [], 500, [tupleLong2, ], OverflowError)
# params Int32[]
_helper(target.M600, [arrayInt, tupleInt, tupleLong1, tupleBool, ], 600, [listInt, arrayByte, arrayObj, ], TypeError)
_helper(target.M600, [], 600, [tupleLong2, ], OverflowError)
# Int32, params Int32[]
_helper(target.M620, [(10, 10), (10, 10), (10, 10), (10, 10), (10, arrayInt), (10, (10, 20)), ], 620, [(10, [10, 20]), ], TypeError)
_helper(target.M620, [], 620, [(10, 123456789101234), ], OverflowError)
arrayI1 = System.Array[I]( (C1(), C2()) )
arrayI2 = System.Array[I]( () )
arrayObj3 = System.Array[object]( (C1(), C2()) )
tupleI = ((C1(), C2()),)
listI = ([C1(), C2()],)
_helper(target.M510, [arrayI1, arrayI2, tupleI, ], 510, [arrayObj3, listI, ], TypeError) # I[]
_helper(target.M610, [arrayI1, arrayI2, tupleI, ], 610, [arrayObj3, listI, ], TypeError) # params I[]
def test_no_arg_asked():
# no args asked
_helper(target.M100, [()], 100, [2, None, (2, None)], TypeError)
def test_enum():
# E1 asked
_helper(target.M450, [E1.A, ], 450, [10, E2.A], TypeError)
# E2: ushort asked
if is_silverlight==False:
_helper(target.M451, [E2.A, ], 451, [10, E1.A, System.UInt16.Parse("3")], TypeError)
def _repeat_with_one_arg(goodStr, getArg):
passSet = _get_funcs(goodStr)
skipSet = []
for fn in passSet:
if fn in skipSet: continue
arg = getArg()
getattr(target, fn)(arg)
left = Flag.Value
right = int(fn[1:])
if left != right:
Fail("left %s != right %s when func %s on arg %s" % (left, right, fn, arg))
for fn in dir(target):
if _self_defined_method(fn) and (fn not in passSet) and (fn not in skipSet):
arg = getArg()
try: getattr(target, fn)(arg)
except TypeError : pass
else: Fail("expect TypeError, but got none when func %s on arg %s" % (fn, arg))
def test_pass_in_none():
test_str = '''
Bool String Object I C1 C2 A C6
ArrInt32 ArrI ParamArrInt32 ParamArrI ParamArrS IParamArrI
IListInt Array IEnumerableInt IEnumeratorInt NullableInt
'''
# Big integers are only nullable in CLR 2
if not is_net40:
test_str = "BigInt " + test_str
_repeat_with_one_arg(test_str, lambda : None)
def test_pass_in_clrReference():
import clr
_repeat_with_one_arg('Object RefInt32 OutInt32', lambda : clr.Reference[int](0))
_repeat_with_one_arg('Object', lambda : clr.Reference[object](None))
_repeat_with_one_arg('Object RefInt32 OutInt32', lambda : clr.Reference[int](10))
_repeat_with_one_arg('Object ', lambda : clr.Reference[float](123.123))
_repeat_with_one_arg('Object', lambda : clr.Reference[type](str)) # ref.Value = (type)
def test_pass_in_nothing():
passSet = _get_funcs('NoArg ParamArrInt32 ParamArrS ParamArrI OutInt32 DefValInt32')
skipSet = [ ] # be empty before release
for fn in passSet:
if fn in skipSet: continue
getattr(target, fn)()
left = Flag.Value
right = int(fn[1:])
if left != right:
Fail("left %s != right %s when func %s on arg Nothing" % (left, right, fn))
for fn in dir(target):
if _self_defined_method(fn) and (fn not in passSet) and (fn not in skipSet):
try: getattr(target, fn)()
except TypeError : pass
else: Fail("expect TypeError, but got none when func %s on arg Nothing" % fn)
def test_other_concern():
target = COtherConcern()
# static void M100()
target.M100()
AreEqual(Flag.Value, 100); Flag.Value = 99
COtherConcern.M100()
AreEqual(Flag.Value, 100); Flag.Value = 99
AssertError(TypeError, target.M100, target)
AssertError(TypeError, COtherConcern.M100, target)
# static void M101(COtherConcern arg)
target.M101(target)
AreEqual(Flag.Value, 101); Flag.Value = 99
COtherConcern.M101(target)
AreEqual(Flag.Value, 101); Flag.Value = 99
AssertError(TypeError, target.M101)
AssertError(TypeError, COtherConcern.M101)
# void M102(COtherConcern arg)
target.M102(target)
AreEqual(Flag.Value, 102); Flag.Value = 99
COtherConcern.M102(target, target)
AreEqual(Flag.Value, 102); Flag.Value = 99
AssertError(TypeError, target.M102)
AssertError(TypeError, COtherConcern.M102, target)
# generic method
target.M200[int](100)
AreEqual(Flag.Value, 200); Flag.Value = 99
target.M200[int](100.1234)
AreEqual(Flag.Value, 200); Flag.Value = 99
target.M200[int](100)
AreEqual(Flag.Value, 200); Flag.Value = 99
AssertError(OverflowError, target.M200[System.Byte], 300)
AssertError(OverflowError, target.M200[int], 12345678901234)
# We should ignore Out attribute on non-byref.
# It's used in native interop scenarios to designate a buffer (StringBUilder, arrays, etc.)
# the caller allocates, passes to the method and expects the callee to populate it with data.
AssertError(TypeError, target.M222)
AreEqual(target.M222(0), None)
AreEqual(Flag.Value, 222)
# what does means when passing in None
target.M300(None)
AreEqual(Flag.Value, 300); Flag.Value = 99
AreEqual(Flag.BValue, True)
target.M300(C1())
AreEqual(Flag.BValue, False)
# void M400(ref Int32 arg1, out Int32 arg2, Int32 arg3) etc...
AreEqual(target.M400(1, 100), (100, 100))
AreEqual(target.M401(1, 100), (100, 100))
AreEqual(target.M402(100, 1), (100, 100))
# default Value
target.M450()
AreEqual(Flag.Value, 80); Flag.Value = 99
# 8 args
target.M500(1,2,3,4,5,6,7,8)
AreEqual(Flag.Value, 500)
AssertError(TypeError, target.M500)
AssertError(TypeError, target.M500, 1)
AssertError(TypeError, target.M500, 1,2,3,4,5,6,7,8,9)
# IDictionary
for x in [ {1:1}, {"str": 3} ]:
target.M550(x)
AreEqual(Flag.Value, 550); Flag.Value = 99
AssertError(TypeError, target.M550, [1, 2])
# not supported
for fn in (target.M600, target.M601, target.M602):
for l in ( {1:'a'}, [1,2], (1,2) ):
AssertError(TypeError, fn, l)
# delegate
def f(x): return x * x
AssertError(TypeError, target.M700, f)
from IronPythonTest import IntIntDelegate
for x in (lambda x: x, lambda x: x*2, f):
target.M700(IntIntDelegate(x))
AreEqual(Flag.Value, x(10)); Flag.Value = 99
target.M701(lambda x: x*2)
AreEqual(Flag.Value, 20); Flag.Value = 99
AssertError(TypeError, target.M701, lambda : 10)
# keywords
x = target.M800(arg1 = 100, arg2 = 200, arg3 = 'this'); AreEqual(x, 'THIS')
x = target.M800(arg3 = 'Python', arg1 = 100, arg2 = 200); AreEqual(x, 'PYTHON')
x = target.M800(100, arg3 = 'iron', arg2 = C1()); AreEqual(x, 'IRON')
try: target.M800(100, 'Yes', arg2 = C1())
except TypeError: pass
else: Fail("expect: got multiple values for keyword argument arg2")
# more ref/out sanity check
import clr
def f1(): return clr.Reference[object](None)
def f2(): return clr.Reference[int](10)
def f3(): return clr.Reference[S1](S1())
def f4(): return clr.Reference[C1](C2()) # C2 inherits C1
for (f, a, b, c, d) in [
('M850', False, False, True, False),
('M851', False, False, False, True),
('M852', False, False, True, False),
('M853', False, False, False, True),
]:
expect = (f in 'M850 M852') and S1 or C1
func = getattr(target, f)
for i in range(4):
ref = (f1, f2, f3, f4)[i]()
if (a,b,c,d)[i]:
func(ref); AreEqual(type(ref.Value), expect)
else:
AssertError(TypeError, func, ref)
# call 854
AssertError(TypeError, target.M854, clr.Reference[object](None))
AssertError(TypeError, target.M854, clr.Reference[int](10))
# call 855
AssertError(TypeError, target.M855, clr.Reference[object](None))
AssertError(TypeError, target.M855, clr.Reference[int](10))
# call 854 and 855 with Reference[bool]
target.M854(clr.Reference[bool](True)); AreEqual(Flag.Value, 854)
target.M855(clr.Reference[bool](True)); AreEqual(Flag.Value, 855)
# practical
ref = clr.Reference[int](0)
ref2 = clr.Reference[int](0)
ref.Value = 300
ref2.Value = 100
## M860(ref arg1, arg2, out arg3): arg3 = arg1 + arg2; arg1 = 100;
x = target.M860(ref, 200, ref2)
AreEqual(x, None)
AreEqual(ref.Value, 100)
AreEqual(ref2.Value, 500)
# pass one clr.Reference(), and leave the other one open
ref.Value = 300
AssertError(TypeError, target.M860, ref, 200)
# the other way
x = target.M860(300, 200)
AreEqual(x, (100, 500))
# GOtherConcern<T>
target = GOtherConcern[int]()
for x in [100, 200, 4.56, myint1]:
target.M100(x)
AreEqual(Flag.Value, 100); Flag.Value = 99
GOtherConcern[int].M100(target, 200)
AreEqual(Flag.Value, 100); Flag.Value = 99
AssertError(TypeError, target.M100, 'abc')
AssertError(OverflowError, target.M100, 12345678901234)
def test_iterator_sequence():
class C:
def __init__(self): self.x = 0
def __iter__(self): return self
def __next__(self):
if self.x < 10:
y = self.x
self.x += 1
return y
else:
self.x = 0
raise StopIteration
def __len__(self): return 10
# different size
c = C()
list1 = [1, 2, 3]
tuple1 = [4, 5, 6, 7]
str1 = "890123"
all = (list1, tuple1, str1, c)
target = COtherConcern()
for x in all:
# IEnumerable / IEnumerator
target.M620(x)
AreEqual(Flag.Value, len(x)); Flag.Value = 0
# built in types are not IEnumerator, they are enumerable
if not isinstance(x, C):
AssertError(TypeError, target.M621, x)
else:
target.M621(x)
AreEqual(Flag.Value, len(x))
# IEnumerable<char> / IEnumerator<char>
target.M630(x)
AreEqual(Flag.Value, len(x)); Flag.Value = 0
AssertError(TypeError, target.M631, x)
# IEnumerable<int> / IEnumerator<int>
target.M640(x)
AreEqual(Flag.Value, len(x)); Flag.Value = 0
AssertError(TypeError, target.M641, x)
# IList / IList<char> / IList<int>
for x in (list1, tuple1):
target.M622(x)
AreEqual(Flag.Value, len(x))
target.M632(x)
AreEqual(Flag.Value, len(x))
target.M642(x)
AreEqual(Flag.Value, len(x))
for x in (str1, c):
AssertError(TypeError, target.M622, x)
AssertError(TypeError, target.M632, x)
AssertError(TypeError, target.M642, x)
def test_explicit_inheritance():
target = CInheritMany1()
Assert(hasattr(target, "M"))
target.M()
AreEqual(Flag.Value, 100)
I1.M(target); AreEqual(Flag.Value, 100); Flag.Value = 0
target = CInheritMany2()
target.M(); AreEqual(Flag.Value, 201)
I1.M(target); AreEqual(Flag.Value, 200)
target = CInheritMany3()
Assert(not hasattr(target, "M"))
try: target.M()
except AttributeError: pass
else: Fail("Expected AttributeError, got none")
I1.M(target); AreEqual(Flag.Value, 300)
I2.M(target); AreEqual(Flag.Value, 301)
target = CInheritMany4()
target.M(); AreEqual(Flag.Value, 401)
I3[object].M(target); AreEqual(Flag.Value, 400)
AssertError(TypeError, I3[int].M, target)
target = CInheritMany5()
I1.M(target); AreEqual(Flag.Value, 500)
I2.M(target); AreEqual(Flag.Value, 501)
I3[object].M(target); AreEqual(Flag.Value, 502)
target.M(); AreEqual(Flag.Value, 503)
target = CInheritMany6[int]()
target.M(); AreEqual(Flag.Value, 601)
I3[int].M(target); AreEqual(Flag.Value, 600)
AssertError(TypeError, I3[object].M, target)
target = CInheritMany7[int]()
Assert(hasattr(target, "M"))
target.M(); AreEqual(Flag.Value, 700)
I3[int].M(target); AreEqual(Flag.Value, 700)
target = CInheritMany8()
Assert(not hasattr(target, "M"))
try: target.M()
except AttributeError: pass
else: Fail("Expected AttributeError, got none")
I1.M(target); AreEqual(Flag.Value, 800); Flag.Value = 0
I4.M(target, 100); AreEqual(Flag.Value, 801)
# target.M(100) ????
# original repro
from System.Collections.Generic import Dictionary
d = Dictionary[object,object]()
d.GetEnumerator() # not throw
def test_nullable_property_double():
from IronPythonTest import NullableTest
nt = NullableTest()
nt.DProperty = 1
AreEqual(nt.DProperty, 1.0)
nt.DProperty = 2.0
AreEqual(nt.DProperty, 2.0)
nt.DProperty = None
AreEqual(nt.DProperty, None)
@disabled("Merlin 309716")
def test_nullable_property_long():
from IronPythonTest import NullableTest
nt = NullableTest()
nt.LProperty = 1
AreEqual(nt.LProperty, 1)
nt.LProperty = 2
AreEqual(nt.LProperty, 2)
nt.LProperty = None
AreEqual(nt.LProperty, None)
def test_nullable_property_bool():
from IronPythonTest import NullableTest
nt = NullableTest()
nt.BProperty = 1.0
AreEqual(nt.BProperty, True)
nt.BProperty = 0.0
AreEqual(nt.BProperty, False)
nt.BProperty = True
AreEqual(nt.BProperty, True)
nt.BProperty = None
AreEqual(nt.BProperty, None)
def test_nullable_property_enum():
from IronPythonTest import NullableTest
nt = NullableTest()
nt.EProperty = NullableTest.NullableEnums.NE1
AreEqual(nt.EProperty, NullableTest.NullableEnums.NE1)
nt.EProperty = None
AreEqual(nt.EProperty, None)
def test_nullable_parameter():
from IronPythonTest import NullableTest
nt = NullableTest()
result = nt.Method(1)
AreEqual(result, 1.0)
result = nt.Method(2.0)
AreEqual(result, 2.0)
result = nt.Method(None)
AreEqual(result, None)
# Skip on silverlight because the System.Configuration is not available
@skip("silverlight")
def test_xequals_call_for_optimization():
"""
Testing specifically for System.Configuration.ConfigurationManager
because currently its .Equals method will throw null reference
exception when called with null argument. This is a case that could
slip through our dynamic site checks.
"""
import clr
clr.AddReference("System.Configuration");
from System.Configuration import ConfigurationManager
c = ConfigurationManager.ConnectionStrings
#Invoke tests multiple times to make sure DynamicSites are utilized
for i in range(3):
AreEqual(1, c.Count)
for i in range(3):
count = c.Count
AreEqual(1, count)
AreEqual(c.Count, count)
for i in range(3):
#just ensure it doesn't throw
c[0].Name
#Just to be sure this doesn't throw...
c.Count
c.Count
def test_interface_only_access():
pc = InterfaceOnlyTest.PrivateClass
# property set
pc.Hello = InterfaceOnlyTest.PrivateClass
# property get
AreEqual(pc.Hello, pc)
# method call w/ interface param
pc.Foo(pc)
# method call w/ interface ret val
AreEqual(pc.RetInterface(), pc)
# events
global fired
fired = False
def fired(*args):
global fired
fired = True
return args[0]
# add event
pc.MyEvent += fired
# fire event
AreEqual(pc.FireEvent(pc.GetEventArgs()), pc)
AreEqual(fired, True)
# remove event
pc.MyEvent -= fired
def test_ref_bytearr():
target = COtherConcern()
arr = System.Array[System.Byte]((2,3,4))
res = target.M702(arr)
AreEqual(Flag.Value, 702)
AreEqual(type(res), System.Array[System.Byte])
AreEqual(len(res), 0)
i, res = target.M703(arr)
AreEqual(Flag.Value, 703)
AreEqual(i, 42)
AreEqual(type(res), System.Array[System.Byte])
AreEqual(len(res), 0)
i, res = target.M704(arr, arr)
AreEqual(Flag.Value, 704)
AreEqual(i, 42)
AreEqual(arr, res)
sarr = clr.StrongBox[System.Array[System.Byte]](arr)
res = target.M702(sarr)
AreEqual(Flag.Value, 702)
AreEqual(res, None)
res = sarr.Value
AreEqual(type(res), System.Array[System.Byte])
AreEqual(len(res), 0)
sarr.Value = arr
i = target.M703(sarr)
AreEqual(Flag.Value, 703)
AreEqual(i, 42)
AreEqual(len(sarr.Value), 0)
i = target.M704(arr, sarr)
AreEqual(Flag.Value, 704)
AreEqual(i, 42)
AreEqual(sarr.Value, arr)
def test_struct_prop_assign():
from IronPythonTest.BinderTest import SOtherConcern
a = SOtherConcern()
a.P100 = 42
AreEqual(a.P100, 42)
def test_generic_type_inference():
from IronPythonTest import GenericTypeInference, GenericTypeInferenceInstance, SelfEnumerable
from System import Array, Exception, ArgumentException
from System.Collections.Generic import IEnumerable, List
from System.Collections.Generic import Dictionary as Dict
class UserGenericType(GenericTypeInferenceInstance): pass
# public PythonType MInst<T>(T x) -> pytype(T)
AreEqual(UserGenericType().MInst(42), int)
class UserObject(object): pass
userInst = UserObject()
userInt, userLong, userFloat, userComplex, userStr = myint(), mylong(), myfloat(), mycomplex(), mystr()
userTuple, userList, userDict = mytuple(), mylist(), mydict()
objArray = System.Array[object]( (1,2,3) )
doubleArray = System.Array[float]( (1.0,2.0,3.0) )
for target in [GenericTypeInference, GenericTypeInferenceInstance(), UserGenericType()]:
tests = [
# simple single type tests, no constraints
# public static PythonType M0<T>(T x) -> pytypeof(T)
# target method, args, Result, KeywordCall, Exception
(target.M0, (1, ), int, True, None),
(target.M0, (userInst, ), object, True, None),
(target.M0, (userInt, ), object, True, None),
(target.M0, (userStr, ), object, True, None),
(target.M0, (userLong, ), object, True, None),
(target.M0, (userFloat, ), object, True, None),
(target.M0, (userComplex, ), object, True, None),
(target.M0, (userTuple, ), tuple, True, None),
(target.M0, (userList, ), list, True, None),
(target.M0, (userDict, ), dict, True, None),
(target.M0, ((), ), tuple, True, None),
(target.M0, ([], ), list, True, None),
(target.M0, ({}, ), dict, True, None),
# multiple arguments
# public static PythonType M1<T>(T x, T y) -> pytypeof(T)
# public static PythonType M2<T>(T x, T y, T z) -> pytypeof(T)
(target.M1, (1, 2), int, True, None),
(target.M2, (1, 2, 3), int, True, None),
(target.M1, (userInst, userInst), object, True, None),
(target.M2, (userInst, userInst, userInst), object, True, None),
(target.M1, (1, 2.0), None, True, TypeError),
(target.M1, (1, 'abc'), None, True, TypeError),
(target.M1, (object(), userInst), object, True, None),
(target.M1, ([], userList), list, True, None),
# params arguments
# public static PythonType M3<T>(params T[] args) -> pytypeof(T)
(target.M3, (), None, False, TypeError),
(target.M3, (1, ), int, False, None),
(target.M3, (1, 2), int, False, None),
(target.M3, (1, 2, 3), int, False, None),
(target.M3, (1, 2.0), object, False, TypeError),
(target.M3, (1, 'abc'), object, False, TypeError),
(target.M3, (object(), userInst), object, False, None),
(target.M3, ([], userList), list, False, None),
# public static PythonType M4<T>(T x, params T[] args) -> pytypeof(T)
(target.M4, (1, 2), int, False, None),
(target.M4, (1, 2.0), object, False, TypeError),
(target.M4, (1, 'abc'), object, False, TypeError),
(target.M4, (object(), userInst), object, False, None),
(target.M4, ([], userList), list, False, None),
# simple constraints
# public static PythonType M5<T>(T x) where T : class -> pytype(T)
# public static PythonType M6<T>(T x) where T : struct -> pytype(T)
# public static PythonType M7<T>(T x) where T : IList -> pytype(T)
(target.M5, (1, ), None, False, TypeError),
(target.M6, ('abc', ), None, False, TypeError),
(target.M7, (object(), ), None, False, TypeError),
(target.M7, (2, ), None, False, TypeError),
(target.M5, ('abc', ), str, False, None),
(target.M5, (object(), ), object, False, None),
(target.M6, (1, ), int, False, None),
(target.M7, ([], ), list, False, None),
(target.M7, (objArray, ), type(objArray),False, None),
# simple dependent constraints
# public static PythonTuple M8<T0, T1>(T0 x, T1 y) where T0 : T1 -> (pytype(T0), pytype(T1))
(target.M8, (1, 2), (int, int), False, None),
(target.M8, ('abc', object()), (str, object),False, None),
(target.M8, (object(), 'abc'), None, False, TypeError),
(target.M8, (1, object()), (int, object),False, None),
(target.M8, (object(), 1), None, False, TypeError),
# no types can be inferred, error
# public static PythonTuple M9<T0, T1>(object x, T1 y) where T0 : T1
# public static PythonTuple M9b<T0, T1>(T0 x, object y) where T0 : T1
# public static PythonType M11<T>(object x)
# public static PythonType M12<T0, T1>(T0 x, object y)
(target.M9, (1, 2), None, False, TypeError),
(target.M9b, (1, 2), None, False, TypeError),
(target.M9, (object(), object()), None, True, TypeError),
(target.M9b, (object(), object()), None, True, TypeError),
(target.M11, (1, ), None, False, TypeError),
(target.M12, (1, 2), None, False, TypeError),
# multiple dependent constraints
# public static PythonTuple M10<T0, T1, T2>(T0 x, T1 y, T2 z) where T0 : T1 where T1 : T2 -> (pytype(T0), pytype(T1), pytype(T2))
(target.M10, (ArgumentException(), Exception(), object()), (ArgumentException, Exception, object),False, None),
(target.M10, (Exception(), ArgumentException(), object()), None,False, TypeError),
(target.M10, (ArgumentException(), object(), Exception()), None,False, TypeError),
(target.M10, (object(), ArgumentException(), Exception()), None,False, TypeError),
(target.M10, (object(), Exception(), ArgumentException()), None,False, TypeError),
# public static PythonType M11<T>(object x) -> pytypeof(T)
# public static PythonType M12<T0, T1>(T0 x, object y) -> pytypeof(T0)
(target.M11, (object(), ), None, True, TypeError),
(target.M12, (3, object()), None, True, TypeError),
# public static PythonType M13<T>(T x, Func<T> y) -> pytype(T), func()
# public static PythonType M14<T>(T x, Action<T> y) -> pytype(T)
# public static PythonTuple M15<T>(T x, IList<T> y) -> pytype, list...
# public static PythonType M16<T>(T x, Dictionary<T, IList<T>> list) -> pytype, listKeys...
(target.M13, (1, lambda: 42), (object, 42), False, None),
(target.M14, (1, lambda x: None), object, False, None),
(target.M15, (1, [2, ]), (object, 2), True, None),
(target.M15, (1, (2, )), (object, 2), True, None),
(target.M15, (1, objArray), (object, 1,2,3), True, None),
(target.M15, (1, doubleArray), None, True, TypeError),
(target.M16, (1, {1: [1,2]}), None, False, TypeError),
# public static PythonType M17<T>(T x, IEnumerable<T> y) -> pytype(T)
(target.M17, (SelfEnumerable(), SelfEnumerable()), SelfEnumerable, True, None),
(target.M17, (1, [1,2,3]), object, True, None),
(target.M17, (1.0, [1,2,3]), object, True, None),
(target.M17, (object(), [1,2,3]), object, True, None),
# public static PythonType M18<T>(T x) where T : IEnumerable<T> -> pytype(T)
(target.M18, (SelfEnumerable(), ), SelfEnumerable, True, None),
# public static PythonType M19<T0, T1>(T0 x, T1 y) where T0 : IList<T1> -> pytype(T0), pytype(T1)
(target.M19, ([], 1), None, True, TypeError),
(target.M19, (List[int](), 1), (List[int], int), True, None),
# public static PythonType M20<T0, T1>(T0 x, T1 y) -> pytype(T0), pytype(T1)
(target.M20, ([], 1), (list, int), True, None),
(target.M20, (List[int](), 1), (List[int], int), True, None),
# constructed types
# public static PythonType M21<T>(IEnumerable<T> enumerable)
(target.M21, ([1,2,3], ), object, False, None),
# overloaded by function
# public static PythonTuple M22<T>(IEnumerable<T> enumerable, Func<T, bool> predicate) -> pytype(T), True
# public static PythonTuple M22<T>(IEnumerable<T> enumerable, Func<T, int, bool> predicate) -> pytype(T), False
(target.M22, ([1,2,3], lambda x:True), (object, True), True, None),
(target.M22, ([1,2,3], lambda x,y:True), (object, False), True, None),
# public static PythonType M23<T>(List<T> x) -> pytype(T)
# public static PythonType M24<T>(List<List<T>> x) -> pytype(T)
# public static PythonType M25<T>(Dictionary<T, T> x) -> pytype(T)
(target.M23, (List[int](), ), int, True, None),
(target.M24, (List[List[int]](), ), int, True, None),
(target.M25, (Dict[int, int](), ), int, True, None),
(target.M25, (Dict[int, str](), ), None, True, TypeError),
# constructed types and constraints
# public static PythonType M26<T>(List<T> x) where T : class -> pytype(T)
# public static PythonType M27<T>(List<T> x) where T : struct -> pytype(T)
# public static PythonType M28<T>(List<T> x) where T : new() -> pytype(T)
(target.M26, (List[int](), ), None, False, TypeError),
(target.M27, (List[str](), ), None, False, TypeError),
(target.M28, (List[str](), ), None, False, TypeError),
(target.M26, (List[str](), ), str, False, None),
(target.M27, (List[int](), ), int, False, None),
(target.M28, (List[List[str]](), ), List[str], False, None),
# public static PythonType M29<T>(Dictionary<Dictionary<T, T>, Dictionary<T, T>> x)
(target.M29, (Dict[Dict[int, int], Dict[int, int]](), ), int, True, None),
# constraints and constructed types
# public static PythonType M30<T>(Func<T, bool> y) where T : struct -> pytype(T)
# public static PythonType M31<T>(Func<T, bool> y) where T : IList -> pytype(T)
# public static PythonType M32<T>(List<T> y) where T : new() -> pytype(T)
# public static PythonType M33<T>(List<T> y) where T : class -> pytype(T)
(target.M30, (lambda x: False, ), int, True, TypeError),
(target.M31, (lambda x: False, ), int, True, TypeError),
(target.M32, (List[str](), ), int, True, TypeError),
(target.M33, (List[int](), ), int, True, TypeError),
# public static PythonType M34<T>(IList<T> x, IList<T> y) -> pytype(T)
(target.M34, ((), [], ), object, True, None),
# T[] and IList<T> overloads:
(target.M35, (objArray, ), System.Array[object], False, None),
]
# TODO: more by-ref and arrays tests:
x = Array.Resize(Array.CreateInstance(int, 10), 20)
AreEqual(x.Length, 20)
for method, args, res, kwArgs, excep in tests:
generic_method_tester(method, args, res, kwArgs, excep)
def generic_method_tester(method, args, res, kwArgs, excep):
#print method, args, res, excep
if excep is None:
# test method w/ multiple calling conventions
if len(args) == 1:
AreEqual(method(args[0]), res)
if kwArgs:
AreEqual(method(x = args[0]), res)
AreEqual(method(**{'x' : args[0]}), res)
elif len(args) == 2:
AreEqual(method(args[0], args[1]), res)
if kwArgs:
AreEqual(method(x = args[0], y = args[1]), res)
AreEqual(method(args[0], y = args[1]), res)
AreEqual(method(y = args[1], x = args[0]), res)
AreEqual(method(*(args[0], ), **{'y' : args[1]}), res)
AreEqual(method(**{'x' : args[0], 'y' : args[1]}), res)
elif len(args) == 3:
AreEqual(method(args[0], args[1], args[2]), res)
if kwArgs:
AreEqual(method(x = args[0], y = args[1], z = args[2]), res)
AreEqual(method(args[0], y = args[1], z = args[2]), res)
AreEqual(method(args[0], args[1], z = args[2]), res)
AreEqual(method(z = args[2], y = args[1], x = args[0]), res)
AreEqual(method(*(args[0], args[1]), **{'z' : args[2]}), res)
AreEqual(method(*(args[0], ), **{'y': args[1], 'z' : args[2]}), res)
AreEqual(method(**{'x' : args[0], 'y' : args[1], 'z' : args[2]}), res)
else:
raise Exception("need to add new case for len %d " % len(args))
AreEqual(method(*args), res)
AreEqual(method(args[0], *args[1:]), res)
else:
# test error method w/ multiple calling conventions
if len(args) == 0:
f = lambda : method()
fkw, fkw2 = None, None
elif len(args) == 1:
f = lambda : method(args[0])
fkw = lambda : method(x = args[0])
fkw2 = lambda : method(**{'x' : args[0]})
elif len(args) == 2:
f = lambda : method(args[0], args[1])
fkw = lambda : method(x = args[0], y = args[1])
fkw2 = lambda : method(**{'x' : args[0], 'y' : args[1]})
elif len(args) == 3:
f = lambda : method(args[0], args[1], args[2])
fkw = lambda : method(x = args[0], y = args[1], z = args[2])
fkw2 = lambda : method(**{'x' : args[0], 'y' : args[1], 'z' : args[2]})
else:
raise Exception("need to add new case for len %d " % len(args))
if not kwArgs:
fkw = None
fkw2 = None
# test w/o splatting
AssertError(excep, f)
if fkw: AssertError(excep, fkw)
if fkw2: AssertError(excep, fkw2)
# test with splatting
AssertError(excep, method, *args)
print('>>>> methods in reference type')
target = CNoOverloads()
run_test(__name__)
|
moto-timo/ironpython3
|
Tests/test_methodbinder1.py
|
Python
|
apache-2.0
| 57,530
|
from panda3d.core import *
from panda3d.direct import *
from NametagConstants import *
from Nametag3d import *
from Nametag2d import *
class NametagGroup:
CCNormal = CCNormal
CCNoChat = CCNoChat
CCNonPlayer = CCNonPlayer
CCSuit = CCSuit
CCToonBuilding = CCToonBuilding
CCSuitBuilding = CCSuitBuilding
CCHouseBuilding = CCHouseBuilding
CCSpeedChat = CCSpeedChat
CCFreeChat = CCFreeChat
CHAT_TIMEOUT_MAX = 12.0
CHAT_TIMEOUT_MIN = 4.0
CHAT_TIMEOUT_PROP = 0.5
def __init__(self):
self.nametag2d = Nametag2d()
self.nametag3d = Nametag3d()
self.icon = PandaNode('icon')
self.chatTimeoutTask = None
self.font = None
self.speechFont = None
self.name = ''
self.displayName = ''
self.wordWrap = None
self.qtColor = VBase4(1,1,1,1)
self.colorCode = CCNormal
self.avatar = None
self.active = True
self.chatPages = []
self.chatPage = 0
self.chatFlags = 0
self.objectCode = None
self.manager = None
self.nametags = []
self.addNametag(self.nametag2d)
self.addNametag(self.nametag3d)
self.visible3d = True # Is a 3D nametag visible, or do we need a 2D popup?
self.tickTask = taskMgr.add(self.__tickTask, self.getUniqueId(), sort=45)
self.stompTask = None
self.stompText = None
self.stompFlags = 0
def destroy(self):
taskMgr.remove(self.tickTask)
if self.manager is not None:
self.unmanage(self.manager)
for nametag in list(self.nametags):
self.removeNametag(nametag)
if self.stompTask:
self.stompTask.remove()
def getNametag2d(self):
return self.nametag2d
def getNametag3d(self):
return self.nametag3d
def getNameIcon(self):
return self.icon
def getNumChatPages(self):
if not self.chatFlags & (CFSpeech|CFThought):
return 0
return len(self.chatPages)
def setPageNumber(self, page):
self.chatPage = page
self.updateTags()
def getChatStomp(self):
return bool(self.stompTask)
def getChat(self):
if self.chatPage >= len(self.chatPages):
return ''
else:
return self.chatPages[self.chatPage]
def getStompText(self):
return self.stompText
def getStompDelay(self):
return 0.2
def getUniqueId(self):
return 'Nametag-%d' % id(self)
def hasButton(self):
return bool(self.getButtons())
def getButtons(self):
if self.getNumChatPages() < 2:
# Either only one page or no pages displayed. This means no button,
# unless the game code specifically requests one.
if self.chatFlags & CFQuitButton:
return NametagGlobals.quitButtons
elif self.chatFlags & CFPageButton:
return NametagGlobals.pageButtons
else:
return None
elif self.chatPage == self.getNumChatPages()-1:
# Last page of a multiple-page chat. This calls for a quit button,
# unless the game says otherwise.
if not self.chatFlags & CFNoQuitButton:
return NametagGlobals.quitButtons
else:
return None
else:
# Non-last page of a multiple-page chat. This calls for a page
# button, but only if the game requests it:
if self.chatFlags & CFPageButton:
return NametagGlobals.pageButtons
else:
return None
def setActive(self, active):
self.active = active
def isActive(self):
return self.active
def setAvatar(self, avatar):
self.avatar = avatar
def setFont(self, font):
self.font = font
self.updateTags()
def setSpeechFont(self, font):
self.speechFont = font
self.updateTags()
def setWordwrap(self, wrap):
self.wordWrap = wrap
self.updateTags()
def setColorCode(self, cc):
self.colorCode = cc
self.updateTags()
def setName(self, name):
self.name = name
self.updateTags()
def setDisplayName(self, name):
self.displayName = name
self.updateTags()
def setQtColor(self, color):
self.qtColor = color
self.updateTags()
def setChat(self, chatString, chatFlags):
if not self.chatFlags&CFSpeech:
# We aren't already displaying some chat. Therefore, we don't have
# to stomp.
self._setChat(chatString, chatFlags)
else:
# Stomp!
self.clearChat()
self.stompText = chatString
self.stompFlags = chatFlags
self.stompTask = taskMgr.doMethodLater(self.getStompDelay(), self.__updateStomp,
'ChatStomp-' + self.getUniqueId())
def _setChat(self, chatString, chatFlags):
if chatString:
self.chatPages = chatString.split('\x07')
self.chatFlags = chatFlags
else:
self.chatPages = []
self.chatFlags = 0
self.setPageNumber(0) # Calls updateTags() for us.
self._stopChatTimeout()
if chatFlags&CFTimeout:
self._startChatTimeout()
def __updateStomp(self, task):
self._setChat(self.stompText, self.stompFlags)
self.stompTask = None
def setContents(self, contents):
# This function is a little unique, it's meant to override contents on
# EXISTING nametags only:
for tag in self.nametags:
tag.setContents(contents)
def setObjectCode(self, objectCode):
self.objectCode = objectCode
def getObjectCode(self):
return self.objectCode
def _startChatTimeout(self):
length = len(self.getChat())
timeout = min(max(length*self.CHAT_TIMEOUT_PROP, self.CHAT_TIMEOUT_MIN), self.CHAT_TIMEOUT_MAX)
self.chatTimeoutTask = taskMgr.doMethodLater(timeout, self.__doChatTimeout,
'ChatTimeout-' + self.getUniqueId())
def __doChatTimeout(self, task):
self._setChat('', 0)
return task.done
def _stopChatTimeout(self):
if self.chatTimeoutTask:
taskMgr.remove(self.chatTimeoutTask)
def clearShadow(self):
pass
def clearChat(self):
self._setChat('', 0)
if self.stompTask:
self.stompTask.remove()
def updateNametag(self, tag):
tag.font = self.font
tag.speechFont = self.speechFont
tag.name = self.name
tag.wordWrap = self.wordWrap or DEFAULT_WORDWRAPS[self.colorCode]
tag.displayName = self.displayName or self.name
tag.qtColor = self.qtColor
tag.colorCode = self.colorCode
tag.chatString = self.getChat()
tag.buttons = self.getButtons()
tag.chatFlags = self.chatFlags
tag.avatar = self.avatar
tag.icon = self.icon
tag.update()
def __testVisible3D(self):
# We must determine if a 3D nametag is visible or not, since this
# affects the visibility state of 2D nametags.
# Next, we iterate over all of our nametags until we find a visible
# one:
for nametag in self.nametags:
if not isinstance(nametag, Nametag3d):
continue # It's not in the 3D system, disqualified.
if nametag.isOnScreen():
return True
# If we got here, none of the tags were a match...
return False
def __tickTask(self, task):
for nametag in self.nametags:
nametag.tick()
if (NametagGlobals.masterNametagsActive and self.active) or self.hasButton():
nametag.setClickRegionEvent(self.getUniqueId())
else:
nametag.setClickRegionEvent(None)
if NametagGlobals.onscreenChatForced and self.chatFlags & CFSpeech:
# Because we're *forcing* chat onscreen, we skip the visible3d test
# and go ahead and display it anyway.
visible3d = False
elif not NametagGlobals.masterArrowsOn and not self.chatFlags:
# We're forcing margins offscreen; therefore, we should pretend
# that the 3D nametag is always visible.
visible3d = True
else:
visible3d = self.__testVisible3D()
if visible3d ^ self.visible3d:
self.visible3d = visible3d
for nametag in self.nametags:
if isinstance(nametag, MarginPopup):
nametag.setVisible(not visible3d)
return task.cont
def updateTags(self):
for nametag in self.nametags:
self.updateNametag(nametag)
def addNametag(self, nametag):
self.nametags.append(nametag)
self.updateNametag(nametag)
if self.manager is not None and isinstance(nametag, MarginPopup):
nametag.manage(manager)
def removeNametag(self, nametag):
self.nametags.remove(nametag)
if self.manager is not None and isinstance(nametag, MarginPopup):
nametag.unmanage(manager)
nametag.destroy()
def manage(self, manager):
self.manager = manager
for tag in self.nametags:
if isinstance(tag, MarginPopup):
tag.manage(manager)
def unmanage(self, manager):
self.manager = None
for tag in self.nametags:
if isinstance(tag, MarginPopup):
tag.unmanage(manager)
tag.destroy()
|
silly-wacky-3-town-toon/SOURCE-COD
|
otp/nametag/NametagGroup.py
|
Python
|
apache-2.0
| 9,754
|
# coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python2 python3
"""Generator and discriminator for a progressive GAN model.
See https://arxiv.org/abs/1710.10196 for details about the model.
See https://github.com/tkarras/progressive_growing_of_gans for the original
theano implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples.progressive_gan import layers
class ResolutionSchedule(object):
"""Image resolution upscaling schedule."""
def __init__(self, start_resolutions=(4, 4), scale_base=2, num_resolutions=4):
"""Initializer.
Args:
start_resolutions: An tuple of integers of HxW format for start image
resolutions. Defaults to (4, 4).
scale_base: An integer of resolution base multiplier. Defaults to 2.
num_resolutions: An integer of how many progressive resolutions (including
`start_resolutions`). Defaults to 4.
"""
self._start_resolutions = start_resolutions
self._scale_base = scale_base
self._num_resolutions = num_resolutions
@property
def start_resolutions(self):
return tuple(self._start_resolutions)
@property
def scale_base(self):
return self._scale_base
@property
def num_resolutions(self):
return self._num_resolutions
@property
def final_resolutions(self):
"""Returns the final resolutions."""
return tuple([
r * self._scale_base**(self._num_resolutions - 1)
for r in self._start_resolutions
])
def scale_factor(self, block_id):
"""Returns the scale factor for network block `block_id`."""
if block_id < 1 or block_id > self._num_resolutions:
raise ValueError('`block_id` must be in [1, {}]'.format(
self._num_resolutions))
return self._scale_base**(self._num_resolutions - block_id)
def block_name(block_id):
"""Returns the scope name for the network block `block_id`."""
return 'progressive_gan_block_{}'.format(block_id)
def min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks):
"""Returns the minimum total number of images.
Computes the minimum total number of images required to reach the desired
`resolution`.
Args:
stable_stage_num_images: Number of images in the stable stage.
transition_stage_num_images: Number of images in the transition stage.
num_blocks: Number of network blocks.
Returns:
An integer of the minimum total number of images.
"""
return (num_blocks * stable_stage_num_images +
(num_blocks - 1) * transition_stage_num_images)
def compute_progress(current_image_id, stable_stage_num_images,
transition_stage_num_images, num_blocks):
"""Computes the training progress.
The training alternates between stable phase and transition phase.
The `progress` indicates the training progress, i.e. the training is at
- a stable phase p if progress = p
- a transition stage between p and p + 1 if progress = p + fraction
where p = 0,1,2.,...
Note the max value of progress is `num_blocks` - 1.
In terms of LOD (of the original implementation):
progress = `num_blocks` - 1 - LOD
Args:
current_image_id: An scalar integer `Tensor` of the current image id, count
from 0.
stable_stage_num_images: An integer representing the number of images in
each stable stage.
transition_stage_num_images: An integer representing the number of images in
each transition stage.
num_blocks: Number of network blocks.
Returns:
A scalar float `Tensor` of the training progress.
"""
# Note when current_image_id >= min_total_num_images - 1 (which means we
# are already at the highest resolution), we want to keep progress constant.
# Therefore, cap current_image_id here.
capped_current_image_id = tf.minimum(
current_image_id,
min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks) - 1)
stage_num_images = stable_stage_num_images + transition_stage_num_images
progress_integer = tf.math.floordiv(capped_current_image_id, stage_num_images)
progress_fraction = tf.maximum(
0.0,
tf.cast(
tf.math.mod(capped_current_image_id, stage_num_images) -
stable_stage_num_images,
dtype=tf.float32) /
tf.cast(transition_stage_num_images, dtype=tf.float32))
return tf.cast(progress_integer, dtype=tf.float32) + progress_fraction
def _generator_alpha(block_id, progress):
"""Returns the block output parameter for the generator network.
The generator has N blocks with `block_id` = 1,2,...,N. Each block
block_id outputs a fake data output(block_id). The generator output is a
linear combination of all block outputs, i.e.
SUM_block_id(output(block_id) * alpha(block_id, progress)) where
alpha(block_id, progress) = _generator_alpha(block_id, progress). Note it
garantees that SUM_block_id(alpha(block_id, progress)) = 1 for any progress.
With a fixed block_id, the plot of alpha(block_id, progress) against progress
is a 'triangle' with its peak at (block_id - 1, 1).
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block output parameter.
"""
return tf.maximum(0.0,
tf.minimum(progress - (block_id - 2), block_id - progress))
def _discriminator_alpha(block_id, progress):
"""Returns the block input parameter for discriminator network.
The discriminator has N blocks with `block_id` = 1,2,...,N. Each block
block_id accepts an
- input(block_id) transformed from the real data and
- the output of block block_id + 1, i.e. output(block_id + 1)
The final input is a linear combination of them,
i.e. alpha * input(block_id) + (1 - alpha) * output(block_id + 1)
where alpha = _discriminator_alpha(block_id, progress).
With a fixed block_id, alpha(block_id, progress) stays to be 1
when progress <= block_id - 1, then linear decays to 0 when
block_id - 1 < progress <= block_id, and finally stays at 0
when progress > block_id.
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block input parameter.
"""
return tf.clip_by_value(block_id - progress, 0.0, 1.0)
def blend_images(x, progress, resolution_schedule, num_blocks):
"""Blends images of different resolutions according to `progress`.
When training `progress` is at a stable stage for resolution r, returns
image `x` downscaled to resolution r and then upscaled to `final_resolutions`,
call it x'(r).
Otherwise when training `progress` is at a transition stage from resolution
r to 2r, returns a linear combination of x'(r) and x'(2r).
Args:
x: An image `Tensor` of NHWC format with resolution `final_resolutions`.
progress: A scalar float `Tensor` of training progress.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks.
Returns:
An image `Tensor` which is a blend of images of different resolutions.
"""
x_blend = []
for block_id in range(1, num_blocks + 1):
alpha = _generator_alpha(block_id, progress)
scale = resolution_schedule.scale_factor(block_id)
x_blend.append(alpha * layers.upscale(layers.downscale(x, scale), scale))
return tf.add_n(x_blend)
def num_filters(block_id, fmap_base=4096, fmap_decay=1.0, fmap_max=256):
"""Computes number of filters of block `block_id`."""
return int(min(fmap_base / math.pow(2.0, block_id * fmap_decay), fmap_max))
def generator(z,
progress,
num_filters_fn,
resolution_schedule,
num_blocks=None,
kernel_size=3,
colors=3,
to_rgb_activation=None,
scope='progressive_gan_generator',
reuse=None):
"""Generator network for the progressive GAN model.
Args:
z: A `Tensor` of latent vector. The first dimension must be batch size.
progress: A scalar float `Tensor` of training progress.
num_filters_fn: A function that maps `block_id` to # of filters for the
block.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks. None means maximum number of
blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
kernel_size: An integer of convolution kernel size.
colors: Number of output color channels. Defaults to 3.
to_rgb_activation: Activation function applied when output rgb.
scope: A string or variable scope.
reuse: Whether to reuse `scope`. Defaults to None which means to inherit the
reuse option of the parent scope.
Returns:
A `Tensor` of model output and a dictionary of model end points.
"""
if num_blocks is None:
num_blocks = resolution_schedule.num_resolutions
start_h, start_w = resolution_schedule.start_resolutions
final_h, final_w = resolution_schedule.final_resolutions
def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
return layers.custom_conv2d(
x=x,
filters=filters,
kernel_size=kernel_size,
padding=padding,
activation=lambda x: layers.pixel_norm(tf.nn.leaky_relu(x)),
he_initializer_slope=0.0,
scope=scope)
def _to_rgb(x):
return layers.custom_conv2d(
x=x,
filters=colors,
kernel_size=1,
padding='SAME',
activation=to_rgb_activation,
scope='to_rgb')
end_points = {}
with tf.variable_scope(scope, reuse=reuse):
with tf.name_scope('input'):
x = tf.layers.flatten(z)
end_points['latent_vector'] = x
with tf.variable_scope(block_name(1)):
x = tf.expand_dims(tf.expand_dims(x, 1), 1)
x = layers.pixel_norm(x)
# Pad the 1 x 1 image to 2 * (start_h - 1) x 2 * (start_w - 1)
# with zeros for the next conv.
x = tf.pad(
tensor=x,
paddings=[[0] * 2, [start_h - 1] * 2, [start_w - 1] * 2, [0] * 2])
# The output is start_h x start_w x num_filters_fn(1).
x = _conv2d('conv0', x, (start_h, start_w), num_filters_fn(1), 'VALID')
x = _conv2d('conv1', x, kernel_size, num_filters_fn(1))
lods = [x]
for block_id in range(2, num_blocks + 1):
with tf.variable_scope(block_name(block_id)):
x = layers.upscale(x, resolution_schedule.scale_base)
x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id))
x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id))
lods.append(x)
outputs = []
for block_id in range(1, num_blocks + 1):
with tf.variable_scope(block_name(block_id)):
lod = _to_rgb(lods[block_id - 1])
scale = resolution_schedule.scale_factor(block_id)
lod = layers.upscale(lod, scale)
end_points['upscaled_rgb_{}'.format(block_id)] = lod
# alpha_i is used to replace lod_select. Note sum(alpha_i) is
# garanteed to be 1.
alpha = _generator_alpha(block_id, progress)
end_points['alpha_{}'.format(block_id)] = alpha
outputs.append(lod * alpha)
predictions = tf.add_n(outputs)
batch_size = tf.compat.dimension_value(z.shape[0])
predictions.set_shape([batch_size, final_h, final_w, colors])
end_points['predictions'] = predictions
return predictions, end_points
def discriminator(x,
progress,
num_filters_fn,
resolution_schedule,
num_blocks=None,
kernel_size=3,
scope='progressive_gan_discriminator',
reuse=None):
"""Discriminator network for the progressive GAN model.
Args:
x: A `Tensor`of NHWC format representing images of size `resolution`.
progress: A scalar float `Tensor` of training progress.
num_filters_fn: A function that maps `block_id` to # of filters for the
block.
resolution_schedule: An object of `ResolutionSchedule`.
num_blocks: An integer of number of blocks. None means maximum number of
blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
kernel_size: An integer of convolution kernel size.
scope: A string or variable scope.
reuse: Whether to reuse `scope`. Defaults to None which means to inherit the
reuse option of the parent scope.
Returns:
A `Tensor` of model output and a dictionary of model end points.
"""
if num_blocks is None:
num_blocks = resolution_schedule.num_resolutions
def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
return layers.custom_conv2d(
x=x,
filters=filters,
kernel_size=kernel_size,
padding=padding,
activation=tf.nn.leaky_relu,
he_initializer_slope=0.0,
scope=scope)
def _from_rgb(x, block_id):
return _conv2d('from_rgb', x, 1, num_filters_fn(block_id))
end_points = {}
with tf.variable_scope(scope, reuse=reuse):
x0 = x
end_points['rgb'] = x0
lods = []
for block_id in range(num_blocks, 0, -1):
with tf.variable_scope(block_name(block_id)):
scale = resolution_schedule.scale_factor(block_id)
lod = layers.downscale(x0, scale)
end_points['downscaled_rgb_{}'.format(block_id)] = lod
lod = _from_rgb(lod, block_id)
# alpha_i is used to replace lod_select.
alpha = _discriminator_alpha(block_id, progress)
end_points['alpha_{}'.format(block_id)] = alpha
lods.append((lod, alpha))
lods_iter = iter(lods)
x, _ = next(lods_iter)
for block_id in range(num_blocks, 1, -1):
with tf.variable_scope(block_name(block_id)):
x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id))
x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id - 1))
x = layers.downscale(x, resolution_schedule.scale_base)
lod, alpha = next(lods_iter)
x = alpha * lod + (1.0 - alpha) * x
with tf.variable_scope(block_name(1)):
x = layers.scalar_concat(x, layers.minibatch_mean_stddev(x))
x = _conv2d('conv0', x, kernel_size, num_filters_fn(1))
x = _conv2d('conv1', x, resolution_schedule.start_resolutions,
num_filters_fn(0), 'VALID')
end_points['last_conv'] = x
logits = layers.custom_dense(x=x, units=1, scope='logits')
end_points['logits'] = logits
return logits, end_points
|
tensorflow/gan
|
tensorflow_gan/examples/progressive_gan/networks.py
|
Python
|
apache-2.0
| 15,202
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all active creative wrappers.
To create creative wrappers, run create_creative_wrappers.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreativeWrapperService.getCreativeWrappersByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201405')
# Create statement object to only select active creative wrappers.
values = [{
'key': 'status',
'value': {
'xsi_type': 'TextValue',
'value': 'ACTIVE'
}
}]
query = 'WHERE status = :status'
statement = dfp.FilterStatement(query, values)
# Get creative wrappers by statement.
while True:
response = creative_wrapper_service.getCreativeWrappersByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for creative_wrapper in response['results']:
print ('Creative wrapper with ID \'%s\' applying to label \'%s\' was '
'found.' % (creative_wrapper['id'], creative_wrapper['labelId']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
dietrichc/streamline-ppc-reports
|
examples/dfp/v201405/creative_wrapper_service/get_active_creative_wrappers.py
|
Python
|
apache-2.0
| 2,368
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListAnnotations
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_DatasetService_ListAnnotations_sync]
from google.cloud import aiplatform_v1
def sample_list_annotations():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListAnnotationsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_annotations(request=request)
# Handle the response
for response in page_result:
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_ListAnnotations_sync]
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_sync.py
|
Python
|
apache-2.0
| 1,539
|
# Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import logging
from botocore.exceptions import ClientError
import mock
from c7n.exceptions import PolicyValidationError
from c7n.executor import MainThreadExecutor
from c7n.resources.aws import shape_validate
from c7n.resources.ebs import (
CopyInstanceTags,
EncryptInstanceVolumes,
CopySnapshot,
Delete,
ErrorHandler,
SnapshotQueryParser as QueryParser
)
from .common import BaseTest
class SnapshotQueryParse(BaseTest):
def test_query(self):
qfilters = [
{'Name': 'tag:Name', 'Values': ['Snapshot1']},
{'Name': 'status', 'Values': ['completed']}]
self.assertEqual(qfilters, QueryParser.parse(qfilters))
def test_invalid_query(self):
self.assertRaises(
PolicyValidationError, QueryParser.parse, {})
self.assertRaises(
PolicyValidationError, QueryParser.parse, [None])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [{'X': 1}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'status', 'Values': 'completed'}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'status', 'Values': ['Completed']}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'snapshot-id', 'Values': [1]}])
class SnapshotErrorHandler(BaseTest):
def test_tag_error(self):
snaps = [{'SnapshotId': 'aa'}]
error_response = {
"Error": {
"Message": "The snapshot 'aa' does not exist.",
"Code": "InvalidSnapshot.NotFound",
}
}
client = mock.MagicMock()
client.create_tags.side_effect = ClientError(error_response, 'CreateTags')
p = self.load_policy({
"name": "snap-copy",
"resource": "ebs-snapshot",
'actions': [{'type': 'tag', 'tags': {'bar': 'foo'}}]})
tagger = p.resource_manager.actions[0]
tagger.process_resource_set(client, snaps, [{'Key': 'bar', 'Value': 'foo'}])
client.create_tags.assert_called_once()
def test_remove_snapshot(self):
snaps = [{'SnapshotId': 'a'}, {'SnapshotId': 'b'}, {'SnapshotId': 'c'}]
t1 = list(snaps)
ErrorHandler.remove_snapshot('c', t1)
self.assertEqual([t['SnapshotId'] for t in t1], ['a', 'b'])
ErrorHandler.remove_snapshot('d', snaps)
self.assertEqual(len(snaps), 3)
def test_get_bad_snapshot_malformed(self):
operation_name = "DescribeSnapshots"
error_response = {
"Error": {
"Message": 'Invalid id: "snap-malformedsnap"',
"Code": "InvalidSnapshotID.Malformed",
}
}
e = ClientError(error_response, operation_name)
snap = ErrorHandler.extract_bad_snapshot(e)
self.assertEqual(snap, "snap-malformedsnap")
def test_get_bad_snapshot_notfound(self):
operation_name = "DescribeSnapshots"
error_response = {
"Error": {
"Message": "The snapshot 'snap-notfound' does not exist.",
"Code": "InvalidSnapshot.NotFound",
}
}
e = ClientError(error_response, operation_name)
snap = ErrorHandler.extract_bad_snapshot(e)
self.assertEqual(snap, "snap-notfound")
def test_get_bad_volume_malformed(self):
operation_name = "DescribeVolumes"
error_response = {
"Error": {
"Message": 'Invalid id: "vol-malformedvolume"',
"Code": "InvalidVolumeID.Malformed",
}
}
e = ClientError(error_response, operation_name)
vol = ErrorHandler.extract_bad_volume(e)
self.assertEqual(vol, "vol-malformedvolume")
def test_get_bad_volume_notfound(self):
operation_name = "DescribeVolumes"
error_response = {
"Error": {
"Message": "The volume 'vol-notfound' does not exist.",
"Code": "InvalidVolume.NotFound",
}
}
e = ClientError(error_response, operation_name)
vol = ErrorHandler.extract_bad_volume(e)
self.assertEqual(vol, "vol-notfound")
def test_snapshot_copy_related_tags_missing_volumes(self):
factory = self.replay_flight_data(
"test_ebs_snapshot_copy_related_tags_missing_volumes")
p = self.load_policy(
{
"name": "copy-related-tags",
"resource": "aws.ebs-snapshot",
"filters": [{"tag:Test": "Test"}],
"actions": [
{
"type": "copy-related-tag",
"resource": "ebs",
"key": "VolumeId",
"tags": "*"
}
]
},
session_factory=factory
)
try:
resources = p.run()
except ClientError:
# it should filter missing volume and not throw an error
self.fail("This should have been handled in ErrorHandler.extract_bad_volume")
self.assertEqual(len(resources), 1)
try:
factory().client("ec2").describe_volumes(
VolumeIds=[resources[0]["VolumeId"]]
)
except ClientError as e:
# this should not filter missing volume and will throw an error
msg = e.response["Error"]["Message"]
err = e.response["Error"]["Code"]
self.assertEqual(err, "InvalidVolume.NotFound")
self.assertEqual(msg, f"The volume '{resources[0]['VolumeId']}' does not exist.")
class SnapshotAccessTest(BaseTest):
def test_snapshot_access(self):
# pre conditions, 2 snapshots one shared to a separate account, and one
# shared publicly. 2 non matching volumes, one not shared, one shared
# explicitly to its own account.
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_cross_account")
p = self.load_policy(
{
"name": "snap-copy",
"resource": "ebs-snapshot",
"filters": ["cross-account"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
{r["SnapshotId"]: r["c7n:CrossAccountViolations"] for r in resources},
{"snap-7f9496cf": ["619193117841"], "snap-af0eb71b": ["all"]},
)
class SnapshotDetachTest(BaseTest):
def test_volume_detach(self):
factory = self.replay_flight_data('test_ebs_detach')
p = self.load_policy(
{
'name': 'volume-detach',
'resource': 'ebs',
'filters': [{'VolumeId': 'vol-0850cf7c8e949c318'}],
'actions': [
{
'type': 'detach'
}
]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory(region="us-east-1").client('ec2')
volumelist = []
volumelist.append(resources[0]['VolumeId'])
response = client.describe_volumes(VolumeIds=volumelist)
for resp in response['Volumes']:
for attachment in resp['Attachments']:
self.assertTrue(attachment['State'] == "detached" or
attachment['State'] == "detaching")
class SnapshotCopyTest(BaseTest):
def test_snapshot_copy(self):
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
self.change_environment(AWS_DEFAULT_REGION="us-west-2")
factory = self.replay_flight_data("test_ebs_snapshot_copy")
p = self.load_policy(
{
"name": "snap-copy",
"resource": "ebs-snapshot",
"filters": [{"tag:ASV": "RoadKill"}],
"actions": [
{
"type": "copy",
"target_region": "us-east-1",
"target_key": "82645407-2faa-4d93-be71-7d6a8d59a5fc",
}
],
},
config=dict(region="us-west-2"),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory(region="us-east-1").client("ec2")
tags = client.describe_tags(
Filters=[
{"Name": "resource-id", "Values": [resources[0]["c7n:CopiedSnapshot"]]}
]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in tags}
self.assertEqual(tags["ASV"], "RoadKill")
class SnapshotAmiSnapshotTest(BaseTest):
def test_snapshot_ami_snapshot_filter(self):
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
# DEFAULT_REGION needs to be set to west for recording
factory = self.replay_flight_data("test_ebs_ami_snapshot_filter")
# first case should return only resources that are ami snapshots
p = self.load_policy(
{
"name": "ami-snap-filter",
"resource": "ebs-snapshot",
"filters": [{"type": "skip-ami-snapshots", "value": False}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
# second case should return resources that are NOT ami snapshots
policy = self.load_policy(
{
"name": "non-ami-snap-filter",
"resource": "ebs-snapshot",
"filters": [{"type": "skip-ami-snapshots", "value": True}],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 2)
class SnapshotUnusedTest(BaseTest):
def test_snapshot_unused(self):
factory = self.replay_flight_data("test_ebs_snapshot_unused")
p = self.load_policy(
{
"name": "snap-unused",
"resource": "ebs-snapshot",
"filters": [{"type": "unused", "value": True}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
policy = self.load_policy(
{
"name": "snap-used",
"resource": "ebs-snapshot",
"filters": [{"type": "unused", "value": False}],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 2)
class SnapshotTrimTest(BaseTest):
def test_snapshot_trim(self):
factory = self.replay_flight_data("test_ebs_snapshot_delete")
p = self.load_policy(
{
"name": "snapshot-trim",
"resource": "ebs-snapshot",
"filters": [{"tag:InstanceId": "not-null"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class AttachedInstanceTest(BaseTest):
def test_ebs_instance_filter(self):
factory = self.replay_flight_data("test_ebs_instance_filter")
p = self.load_policy(
{
"name": "attached-instance-test",
"resource": "ebs",
"filters": [
{"type": "instance", "key": "tag:Name", "value": "CompiledLambda"}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class ResizeTest(BaseTest):
def test_resize_action(self):
factory = self.replay_flight_data("test_ebs_modifyable_action")
client = factory().client("ec2")
# Change a volume from 32 gb gp2 and 100 iops (sized based) to
# 64gb and 500 iops.
vol_id = "vol-0073dcd216489ea1b"
p = self.load_policy(
{
"name": "resizable",
"resource": "ebs",
"filters": ["modifyable", {"VolumeId": vol_id}],
"actions": [
{
"type": "modify",
"volume-type": "io1",
"size-percent": 200,
"iops-percent": 500,
}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(resources[0]["Iops"], 100)
self.assertEqual(resources[0]["Size"], 32)
vol = client.describe_volumes(VolumeIds=[vol_id])["Volumes"][0]
self.assertEqual(vol["Iops"], 500)
self.assertEqual(vol["Size"], 64)
def test_resize_filter(self):
# precondition, 6 volumes, 4 not modifyable.
factory = self.replay_flight_data("test_ebs_modifyable_filter")
output = self.capture_logging("custodian.filters", level=logging.DEBUG)
p = self.load_policy(
{"name": "resizable", "resource": "ebs", "filters": ["modifyable"]},
session_factory=factory,
)
resources = p.run()
self.assertEqual(
{r["VolumeId"] for r in resources},
{"vol-0073dcd216489ea1b", "vol-0e4cba7adc4764f79"},
)
self.assertEqual(
output.getvalue().strip(),
("filtered 4 of 6 volumes due to [('instance-type', 2), "
"('vol-mutation', 1), ('vol-type', 1)]")
)
class CopyInstanceTagsTest(BaseTest):
def test_copy_instance_tags(self):
# More a functional/coverage test then a unit test.
self.patch(CopyInstanceTags, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_copy_instance_tags")
volume_id = "vol-2b047792"
results = factory().client("ec2").describe_tags(
Filters=[{"Name": "resource-id", "Values": [volume_id]}]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags, {})
policy = self.load_policy(
{
"name": "test-copy-instance-tags",
"resource": "ebs",
"actions": [{"type": "copy-instance-tags", "tags": ["Name"]}],
},
config={"region": "us-west-2"},
session_factory=factory,
)
policy.run()
results = factory().client("ec2").describe_tags(
Filters=[{"Name": "resource-id", "Values": [volume_id]}]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags["Name"], "CompileLambda")
class VolumePostFindingTest(BaseTest):
def test_volume_post_finding(self):
factory = self.replay_flight_data('test_ebs_snapshot')
p = self.load_policy({
'name': 'vol-finding',
'resource': 'aws.ebs',
'actions': [{
'type': 'post-finding',
'types': [
'Software and Configuration Checks/OrgStandard/abc-123']}]},
session_factory=factory)
resources = p.resource_manager.resources()
rfinding = p.resource_manager.actions[0].format_resource(
resources[0])
self.maxDiff = None
self.assertEqual(
rfinding,
{'Details': {
'AwsEc2Volume': {
'Attachments': [{'AttachTime': '2017-03-28T14:55:28+00:00',
'DeleteOnTermination': True,
'InstanceId': 'i-0a0b51bcf11a8cdfb',
'Status': 'attached'}],
'CreateTime': '2017-03-28T14:55:28.486000+00:00',
'Size': 8,
'SnapshotId': 'snap-037f1f9e6c8ea4d65'}},
'Id': 'arn:aws:ec2:us-east-1:644160558196:volume/vol-01adbb6a4f175941d',
'Partition': 'aws',
'Region': 'us-east-1',
'Type': 'AwsEc2Volume'})
shape_validate(
rfinding['Details']['AwsEc2Volume'],
'AwsEc2VolumeDetails', 'securityhub')
class VolumeSnapshotTest(BaseTest):
def test_volume_snapshot(self):
factory = self.replay_flight_data("test_ebs_snapshot")
policy = self.load_policy(
{
"name": "test-ebs-snapshot",
"resource": "ebs",
"filters": [{"VolumeId": "vol-01adbb6a4f175941d"}],
"actions": ["snapshot"],
},
session_factory=factory,
)
policy.run()
snapshot_data = factory().client("ec2").describe_snapshots(
Filters=[{"Name": "volume-id", "Values": ["vol-01adbb6a4f175941d"]}]
)
self.assertEqual(len(snapshot_data["Snapshots"]), 1)
def test_volume_snapshot_copy_tags(self):
factory = self.replay_flight_data("test_ebs_snapshot_copy_tags")
policy = self.load_policy(
{
"name": "ebs-test-snapshot",
"resource": "ebs",
"filters": [{"VolumeId": "vol-0252f61378ede9d01"}],
"actions": [{"type": "snapshot", "copy-tags": ['Name', 'Stage']}]
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
snapshot_data = factory().client("ec2").describe_snapshots(
Filters=[{"Name": "volume-id", "Values": ["vol-0252f61378ede9d01"]}]
)
rtags = {t['Key']: t['Value'] for t in resources[0]['Tags']}
rtags.pop('DoNotCopy')
rtags['custodian_snapshot'] = ''
for s in snapshot_data['Snapshots']:
self.assertEqual(rtags, {t['Key']: t['Value'] for t in s['Tags']})
def test_volume_snapshot_copy_volume_tags(self):
factory = self.replay_flight_data("test_ebs_snapshot_copy_volume_tags")
policy = self.load_policy(
{
"name": "ebs-test-snapshot",
"resource": "ebs",
"filters": [{"VolumeId": "vol-0252f61378ede9d01"}],
"actions": [{"type": "snapshot",
"copy-volume-tags": False,
"tags": {'test-tag': 'custodian'}}]
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
snapshot_data = factory().client("ec2").describe_snapshots(
Filters=[{"Name": "volume-id", "Values": ["vol-0252f61378ede9d01"]}]
)
for s in snapshot_data['Snapshots']:
self.assertEqual({'test-tag': 'custodian'}, {t['Key']: t['Value'] for t in s['Tags']})
class VolumeDeleteTest(BaseTest):
def test_volume_delete_force(self):
self.patch(Delete, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_force_delete")
policy = self.load_policy(
{
"name": "test-ebs",
"resource": "ebs",
"filters": [{"VolumeId": "vol-d0790258"}],
"actions": [{"type": "delete", "force": True}],
},
session_factory=factory,
)
resources = policy.run()
try:
factory().client("ec2").describe_volumes(
VolumeIds=[resources[0]["VolumeId"]]
)
except ClientError as e:
self.assertEqual(e.response["Error"]["Code"], "InvalidVolume.NotFound")
else:
self.fail("Volume still exists")
class EncryptExtantVolumesTest(BaseTest):
def test_encrypt_volumes(self):
self.patch(EncryptInstanceVolumes, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_encrypt_volumes")
policy = self.load_policy(
{
"name": "ebs-remediate-attached",
"resource": "ebs",
"filters": [
{"Encrypted": False}, {"VolumeId": "vol-0f53c81b92b4ecfce"}
],
"actions": [
{
"type": "encrypt-instance-volumes",
"delay": 0.001,
"key": "alias/encryptebs",
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
for r in resources:
volumes = session_factory().client("ec2").describe_volumes(
Filters=[
{
"Name": "attachment.instance-id",
"Values": [r["Attachments"][0]["InstanceId"]],
}
]
)
for v in volumes["Volumes"]:
self.assertTrue(v["Attachments"][0]["DeleteOnTermination"])
self.assertTrue(v["Encrypted"])
if "Tags" in v:
self.assertNotIn(
"maid-crypt-remediation", [i["Key"] for i in v["Tags"]]
)
self.assertNotIn(
"maid-origin-volume", [i["Key"] for i in v["Tags"]]
)
self.assertNotIn(
"maid-instance-device", [i["Key"] for i in v["Tags"]]
)
class TestKmsAlias(BaseTest):
def test_ebs_kms_alias(self):
session_factory = self.replay_flight_data("test_ebs_aws_managed_kms_keys")
p = self.load_policy(
{
"name": "ebs-aws-managed-kms-keys-filters",
"resource": "ebs",
"filters": [
{
"type": "kms-alias",
"key": "AliasName",
"value": "^(alias/aws/)",
"op": "regex",
}
],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-14a3cd9d")
class EbsFaultToleranceTest(BaseTest):
def test_ebs_fault_tolerant(self):
session = self.replay_flight_data("test_ebs_fault_tolerant")
policy = self.load_policy(
{
"name": "ebs-fault-tolerant",
"resource": "ebs",
"filters": ["fault-tolerant"],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-c5eaa459")
def test_ebs_non_fault_tolerant(self):
session = self.replay_flight_data("test_ebs_non_fault_tolerant")
policy = self.load_policy(
{
"name": "ebs-non-fault-tolerant",
"resource": "ebs",
"filters": [{"type": "fault-tolerant", "tolerant": False}],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-abdb8d37")
class PiopsMetricsFilterTest(BaseTest):
def test_ebs_metrics_percent_filter(self):
session = self.replay_flight_data("test_ebs_metrics_percent_filter")
policy = self.load_policy(
{
"name": "ebs-unused-piops",
"resource": "ebs",
"filters": [
{
"type": "metrics",
"name": "VolumeConsumedReadWriteOps",
"op": "lt",
"value": 50,
"statistics": "Maximum",
"days": 1,
"percent-attr": "Iops",
}
],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
class HealthEventsFilterTest(BaseTest):
def test_ebs_health_events_filter(self):
session_factory = self.replay_flight_data("test_ebs_health_events_filter")
policy = self.load_policy(
{
"name": "ebs-health-events-filter",
"resource": "ebs",
"filters": [{"type": "health-event", "types": ["AWS_EBS_VOLUME_LOST"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
for r in resources:
self.assertTrue(
("c7n:HealthEvent" in r) and
("Description" in e for e in r["c7n:HealthEvent"])
)
|
capitalone/cloud-custodian
|
tests/test_ebs.py
|
Python
|
apache-2.0
| 25,361
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _ # noqa
from heat.openstack.common import importutils
from heat.openstack.common import jsonutils
from heat.openstack.common import log as logging
from heat.openstack.common.rpc import amqp as rpc_amqp
from heat.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"name": link_name,
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
addr_opts["link"]["x-declare"].update(link_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the reciever on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
super(DirectConsumer, self).__init__(
session, callback,
"%s/%s" % (msg_id, msg_id),
{"type": "direct"},
msg_id,
{
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
})
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
super(TopicConsumer, self).__init__(
session, callback,
"%s/%s" % (exchange_name, topic),
{}, name or topic,
{
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
})
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
super(FanoutConsumer, self).__init__(
session, callback,
"%s_fanout" % topic,
{"durable": False, "type": "fanout"},
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
{"exclusive": True})
def reconnect(self, session):
topic = self.get_node_name().rpartition('_fanout')[0]
params = {
'session': session,
'topic': topic,
'callback': self.callback,
}
self.__init__(conf=self.conf, **params)
super(FanoutConsumer, self).reconnect(session)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
super(DirectPublisher, self).__init__(session, msg_id,
{"type": "Direct"})
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic))
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""init a 'fanout' publisher.
"""
super(FanoutPublisher, self).__init__(
session,
"%s_fanout" % topic, {"type": "fanout"})
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
super(NotifyPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic),
{"durable": True})
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in consumers.itervalues():
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError) as e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
lakshmi-kannan/matra
|
openstack/common/rpc/impl_qpid.py
|
Python
|
apache-2.0
| 26,196
|
########################################################################
# amara/xpath/locationpaths/predicates.py
"""
A parsed token that represents a predicate list.
"""
from __future__ import absolute_import
from itertools import count, izip
from amara.xpath import datatypes
from amara.xpath.expressions.basics import literal, variable_reference
from amara.xpath.expressions.booleans import equality_expr, relational_expr
from amara.xpath.functions import position_function
from ._nodetests import positionfilter
from ._paths import pathiter
__all__ = ['predicates', 'predicate']
class predicates(tuple):
def __init__(self, *args):
self.select = pathiter(pred.select for pred in self).select
return
def filter(self, nodes, context, reverse):
if self:
state = context.node, context.position, context.size
for predicate in self:
nodes = datatypes.nodeset(predicate.select(context, nodes))
context.node, context.position, context.size = state
else:
nodes = datatypes.nodeset(nodes)
if reverse:
nodes.reverse()
return nodes
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
for pred in self:
pred.pprint(indent + ' ', stream)
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self)
def __unicode__(self):
return u''.join(map(unicode, self))
#FIXME: should this derive from boolean_expression?
class predicate:
def __init__(self, expression):
self._expr = expression
self._provide_context_size = False #See http://trac.xml3k.org/ticket/62
#FIXME: There are probably many code paths which need self._provide_context_size set
# Check for just "Number"
if isinstance(expression, literal):
const = datatypes.number(expression._literal)
index = int(const)
if index == const and index >= 1:
self.select = positionfilter(index)
else:
# FIXME: add warning that expression will not select anything
self.select = izip()
return
# Check for "position() = Expr"
elif isinstance(expression, equality_expr) and expression._op == '=':
if isinstance(expression._left, position_function):
expression = expression._right
if isinstance(expression, literal):
const = datatypes.number(expression._literal)
index = int(const)
if index == const and index >= 1:
self.select = positionfilter(index)
else:
self.select = izip()
else:
#FIXME: This will kick in the non-lazy behavior too broadly, e.g. in the case of [position = 1+1]
#See: http://trac.xml3k.org/ticket/62
self._provide_context_size = True
self._expr = expression
self.select = self._number
return
elif isinstance(expression._right, position_function):
expression = expression._left
if isinstance(expression, literal):
const = datatypes.number(expression._literal)
index = int(const)
if index == const and index >= 1:
self.select = positionfilter(index)
else:
self.select = izip()
else:
self._expr = expression
self.select = self._number
return
# Check for "position() [>,>=] Expr" or "Expr [<,<=] position()"
# FIXME - do full slice-type notation
elif isinstance(expression, relational_expr):
op = expression._op
if (isinstance(expression._left, position_function) and
isinstance(expression._right, (literal, variable_reference))
and op in ('>', '>=')):
self._start = expression._right
self._position = (op == '>')
self.select = self._slice
return
elif (isinstance(expression._left, (literal, variable_reference))
and isinstance(expression._right, Position)
and op in ('<', '<=')):
self._start = expression._left
self._position = (op == '<')
self.select = self._slice
return
if issubclass(expression.return_type, datatypes.number):
self.select = self._number
elif expression.return_type is not datatypes.xpathobject:
assert issubclass(expression.return_type, datatypes.xpathobject)
self.select = self._boolean
return
def _slice(self, context, nodes):
start = self._start.evaluate_as_number(context)
position = self._position
if position > start:
return nodes
position += 1
nodes = iter(nodes)
for node in nodes:
if position > start:
break
position += 1
return nodes
def _number(self, context, nodes):
expr = self._expr
position = 1
if self._provide_context_size:
nodes = list(nodes)
context.size = len(nodes)
context.current_node = context.node
for node in nodes:
context.node, context.position = node, position
if expr.evaluate_as_number(context) == position:
yield node
position += 1
return
def _boolean(self, context, nodes):
expr = self._expr
position = 1
context.current_node = context.node
for node in nodes:
context.node, context.position = node, position
if expr.evaluate_as_boolean(context):
yield node
position += 1
return
def select(self, context, nodes):
expr = self._expr
position = 1
context.current_node = context.node
for node in nodes:
context.node, context.position = node, position
result = expr.evaluate(context)
if isinstance(result, datatypes.number):
# This must be separate to prevent falling into
# the boolean check.
if result == position:
yield node
elif result:
yield node
position += 1
return
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
self._expr.pprint(indent + ' ', stream)
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self)
def __unicode__(self):
return u'[%s]' % self._expr
@property
def children(self):
'Child of the parse tree of a predicate is its expression'
return (self._expr,)
|
zepheira/amara
|
lib/xpath/locationpaths/predicates.py
|
Python
|
apache-2.0
| 7,392
|
##
# @author Brandon Michael
# Roll the dice based on the user's input. Track double rolls and display
# the double totals.
# import the random library
import random
# Set the start and end values the same as a dice
start = 1
end = 6
# Set the running total for doubles found
totalDoubles = 0
# Get the number of times we need to roll the dice
rolls = int(input("Enter the number of dice rolls: "))
# Loop through the number of rolls
for num in range(0, rolls, 1):
# Capture the rolls to check for doubles
roll_1 = random.randint(start, end)
roll_2 = random.randint(start, end)
# Check if rolls equal each other, and track the double count
if roll_1 == roll_2:
totalDoubles = totalDoubles + 1
print(roll_1, roll_2, "Double !")
else:
print(roll_1, roll_2)
# Print the results
print(totalDoubles, "double(s) rolled.")
|
bwmichael/jccc-cis142-python
|
old/roll-the-dice.py
|
Python
|
apache-2.0
| 871
|
import os
import unittest
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
#
# CreateSamples
#
class CreateSamples(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "CreateSamples" # TODO make this more human readable by adding spaces
self.parent.categories = ["Examples"]
self.parent.dependencies = []
self.parent.contributors = ["John Doe (AnyWare Corp.)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This is an example of scripted loadable module bundled in an extension.
It performs a simple thresholding on the input volume and optionally captures a screenshot.
"""
self.parent.acknowledgementText = """
This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc.
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""" # replace with organization, grant and thanks.
#
# CreateSamplesWidget
#
class CreateSamplesWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
generalParametersCollapsibleButton = ctk.ctkCollapsibleButton()
generalParametersCollapsibleButton.text = "General parameters"
self.layout.addWidget(generalParametersCollapsibleButton)
# Layout within the dummy collapsible button
hlayout = qt.QHBoxLayout(generalParametersCollapsibleButton)
self.label=qt.QLabel("Volume Name:")
hlayout.addWidget(self.label)
self.volumeNameLine=qt.QLineEdit()
hlayout.addWidget(self.volumeNameLine)
self.volumeNameLine.connect('textChanged(QString)', self.onLabelChanged)
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Sample From Nothing"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
#
# Sample Label map Button
#
self.labelButton = qt.QPushButton("Create Sample Label Map")
self.labelButton.toolTip = "Create sample label map."
self.labelButton.enabled = True
parametersFormLayout.addRow(self.labelButton)
#
# Sample Volume Button
#
self.volumeButton = qt.QPushButton("Create Sample Volume")
self.volumeButton.toolTip = "Create sample volume."
self.volumeButton.enabled = True
parametersFormLayout.addRow(self.volumeButton)
#
# Sample model Button
#
self.modelButton = qt.QPushButton("Create Sample Model")
self.modelButton.toolTip = "Create sample Model."
self.modelButton.enabled = True
parametersFormLayout.addRow(self.modelButton)
# connections
self.labelButton.connect('clicked(bool)', self.onLabelButton)
self.volumeButton.connect('clicked(bool)', self.onVolumeButton)
self.modelButton.connect('clicked(bool)', self.onModelButton)
parametersCollapsibleButton2 = ctk.ctkCollapsibleButton()
parametersCollapsibleButton2.text = "Sample From example"
self.layout.addWidget(parametersCollapsibleButton2)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton2)
#
# input volume selector
#
self.inputSelector = slicer.qMRMLNodeComboBox()
self.inputSelector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
# Keep the following line as an example
#self.inputSelector.addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", 0 )
self.inputSelector.selectNodeUponCreation = True
self.inputSelector.addEnabled = False
self.inputSelector.removeEnabled = False
self.inputSelector.noneEnabled = True
self.inputSelector.showHidden = False
self.inputSelector.showChildNodeTypes = False
self.inputSelector.setMRMLScene( slicer.mrmlScene )
self.inputSelector.setToolTip( "reference image." )
parametersFormLayout.addRow("Reference Volume: ", self.inputSelector)
self.inputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSampleFromReferenceSelect)
#
# Sample From reference Button
#
self.referenceButton = qt.QPushButton("Create Sample Model from a reference")
self.referenceButton.toolTip = "Create sample Model from a reference."
parametersFormLayout.addRow(self.referenceButton)
self.referenceButton.connect('clicked(bool)', self.onReferenceButton)
# Add vertical spacer
self.layout.addStretch(1)
# Refresh Apply button state
self.onLabelChanged(self.volumeNameLine.text)
def ButtonsClickable(self, value):
self.labelButton.setEnabled(value)
self.volumeButton.setEnabled(value)
self.modelButton.setEnabled(value)
self.onSampleFromReferenceSelect()
def cleanup(self):
pass
def onLabelChanged(self,myString):
if not myString=='':
self.ButtonsClickable(True)
else:
self.ButtonsClickable(False)
def onSampleFromReferenceSelect(self):
self.referenceButton.enabled = self.inputSelector.currentNode() and self.volumeNameLine.text != ''
def onLabelButton(self):
logic = CreateSamplesLogic()
logic.createVolume(self.volumeNameLine.text, labelmap=True)
def onVolumeButton(self):
logic = CreateSamplesLogic()
logic.createVolume(self.volumeNameLine.text)
def onModelButton(self):
logic = CreateSamplesLogic()
logic.createModel()
def onReferenceButton(self):
logic = CreateSamplesLogic()
logic.createVolume(self.volumeNameLine.text, labelmap=True, reference=self.inputSelector.currentNode())
#
# CreateSamplesLogic
#
class CreateSamplesLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setVolumeAsBackgroundImage(self, node):
count = slicer.mrmlScene.GetNumberOfNodesByClass('vtkMRMLSliceCompositeNode')
for n in xrange(count):
compNode = slicer.mrmlScene.GetNthNodeByClass(n, 'vtkMRMLSliceCompositeNode')
compNode.SetBackgroundVolumeID(node.GetID())
return True
# Create sample labelmap with same geometry as input volume
def createVolume(self , volumeName, labelmap=False, reference=None):
if volumeName == '':
raise Exception('The name of the output volume cannot be empty')
value = 1
sampleVolumeNode = slicer.vtkMRMLScalarVolumeNode()
sampleVolumeNode = slicer.mrmlScene.AddNode(sampleVolumeNode)
imageData = vtk.vtkImageData()
if reference == None:
mySpacing = (0.5,0.6,0.5)
myOrigin = (20,50,50)
# Do NOT set the spacing and the origin of imageData (vtkImageData)
# The spacing and the origin should only be set in the vtkMRMLScalarVolumeNode!!!!!!
imageData.SetDimensions(30,5,15)
imageData.AllocateScalars(vtk.VTK_DOUBLE, 1)
sampleVolumeNode.SetSpacing(mySpacing[0],mySpacing[1],mySpacing[2])
sampleVolumeNode.SetOrigin(myOrigin[0],myOrigin[1],myOrigin[2])
else:
sampleVolumeNode.Copy(reference)
imageData.DeepCopy(reference.GetImageData())
sampleVolumeNode.SetName(volumeName)
sampleVolumeNode.SetAndObserveImageData(imageData)
extent = imageData.GetExtent()
for x in xrange(extent[0], extent[1]+1):
for y in xrange(extent[2], extent[3]+1):
for z in xrange(extent[4], extent[5]+1):
if (x >= (extent[1]/4) and x <= (extent[1]/4) * 3) and (y >= (extent[3]/4) and y <= (extent[3]/4) * 3) and (z >= (extent[5]/4) and z <= (extent[5]/4) * 3):
imageData.SetScalarComponentFromDouble(x,y,z,0,value)
else:
imageData.SetScalarComponentFromDouble(x,y,z,0,0)
# Display labelmap
if labelmap:
sampleVolumeNode.SetLabelMap(1)
labelmapVolumeDisplayNode = slicer.vtkMRMLLabelMapVolumeDisplayNode()
slicer.mrmlScene.AddNode(labelmapVolumeDisplayNode)
colorNode = slicer.util.getNode('GenericAnatomyColors')
labelmapVolumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
labelmapVolumeDisplayNode.VisibilityOn()
sampleVolumeNode.SetAndObserveDisplayNodeID(labelmapVolumeDisplayNode.GetID())
else:
volumeDisplayNode = slicer.vtkMRMLScalarVolumeDisplayNode()
slicer.mrmlScene.AddNode(volumeDisplayNode)
colorNode = slicer.util.getNode('Grey')
volumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
volumeDisplayNode.VisibilityOn()
sampleVolumeNode.SetAndObserveDisplayNodeID(volumeDisplayNode.GetID())
self.setVolumeAsBackgroundImage(sampleVolumeNode)
return True
def createModel(self):
print "model"
class CreateSamplesTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
|
fbudin69500/SlicerPythonPlayground
|
test/CreateSamples/CreateSamples.py
|
Python
|
apache-2.0
| 9,951
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from edctf.api.models import challengeboard, category, challenge
from edctf.api.serializers import challengeboard_serializer, category_serializer, challenge_serializer
class challengeboard_view(APIView):
"""
Manages challengeboard requests
"""
permission_classes = (IsAuthenticated,)
def get(self, request, id=None, format=None):
"""
Gets all challengeboards or gets one challengeboard via
challengeboards/:id.
"""
# If challengeboard id was requested, return that challengeboard
# else return list of all challengeboards in the database.
if id:
# Retrieve and serialize the requested challengeboard data.
challengeboards = challengeboard.objects.filter(id=id)
challengeboards_serializer = challengeboard_serializer(challengeboards, many=True, context={'request': request})
# Retrieve and serialize the categories in the challengeboard.
categories = category.objects.filter(challengeboard=challengeboards.first())
categories_serializer = category_serializer(categories, many=True, context={'request': request})
# Retrieve and serialize the challenges in each category.
challenges = []
for cat in categories:
challenges += challenge.objects.filter(category=cat)
challenges_serializer = challenge_serializer(challenges, many=True, context={'request': request})
# Return the serialized data.
return Response({
'challengeboards': challengeboards_serializer.data,
'categories': categories_serializer.data,
'challenges': challenges_serializer.data,
})
else:
# Retrieve and serialize the requested challengeboard data.
challengeboards = challengeboard.objects.all()
serializer = challengeboard_serializer(challengeboards, many=True, context={'request': request})
# Return the serialized data.
return Response({
'challengeboards': serializer.data,
})
|
IAryan/edCTF
|
edctf/api/views/challengeboard.py
|
Python
|
apache-2.0
| 2,095
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
# +++your code here+++
count = 0
for word in words:
if len(word) >= 2 and word[0] == word[-1]:
count += 1
return count
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
# +++your code here+++
#for word in words:
# if word[0] == 'x':
# takeOut =
return
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
|
missyjcat/pythonexercises
|
basic/list1.py
|
Python
|
apache-2.0
| 3,070
|
"""The tests for SleepIQ binary sensor platform."""
import unittest
from unittest.mock import MagicMock
import requests_mock
from homeassistant.setup import setup_component
from homeassistant.components.binary_sensor import sleepiq
from tests.components.test_sleepiq import mock_responses
from tests.common import get_test_home_assistant
class TestSleepIQBinarySensorSetup(unittest.TestCase):
"""Tests the SleepIQ Binary Sensor platform."""
DEVICES = []
def add_entities(self, devices):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.username = 'foo'
self.password = 'bar'
self.config = {
'username': self.username,
'password': self.password,
}
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@requests_mock.Mocker()
def test_setup(self, mock):
"""Test for successfully setting up the SleepIQ platform."""
mock_responses(mock)
setup_component(self.hass, 'sleepiq', {
'sleepiq': self.config})
sleepiq.setup_platform(self.hass,
self.config,
self.add_entities,
MagicMock())
self.assertEqual(2, len(self.DEVICES))
left_side = self.DEVICES[1]
self.assertEqual('SleepNumber ILE Test1 Is In Bed', left_side.name)
self.assertEqual('on', left_side.state)
right_side = self.DEVICES[0]
self.assertEqual('SleepNumber ILE Test2 Is In Bed', right_side.name)
self.assertEqual('off', right_side.state)
|
persandstrom/home-assistant
|
tests/components/binary_sensor/test_sleepiq.py
|
Python
|
apache-2.0
| 1,835
|
from __future__ import print_function
from cms.sitemaps import CMSSitemap
from django.conf.urls import * # NOQA
from django.conf.urls.i18n import i18n_patterns
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from django.conf import settings
from ImmOrbit.api import router
admin.autodiscover()
urlpatterns = i18n_patterns('',
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap',
{'sitemaps': {'cmspages': CMSSitemap}}),
url(r'^select2/', include('django_select2.urls')),
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^', include('cms.urls')),
)
# This is only needed when using runserver.
if settings.DEBUG:
urlpatterns = patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', # NOQA
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
) + staticfiles_urlpatterns() + urlpatterns # NOQA
|
RedMap/Django-ImmOrbit
|
example/src/Demo/urls.py
|
Python
|
apache-2.0
| 1,077
|
import numpy as np
import pytest
import re
from typing import Text, Dict, Any, Callable, List, Optional, Union
from rasa.engine.graph import ExecutionContext
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.storage.resource import Resource
from rasa.nlu.constants import (
DENSE_FEATURIZABLE_ATTRIBUTES,
MESSAGE_ATTRIBUTES,
TOKENS_NAMES,
)
from rasa.nlu.tokenizers.spacy_tokenizer import POS_TAG_KEY
from rasa.nlu.featurizers.sparse_featurizer.lexical_syntactic_featurizer import (
LexicalSyntacticFeaturizer,
FEATURES,
)
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.constants import FEATURE_TYPE_SEQUENCE, TEXT
from rasa.shared.exceptions import InvalidConfigException
from rasa.nlu.tokenizers.tokenizer import Token
@pytest.fixture
def resource_lexical_syntactic_featurizer() -> Resource:
return Resource("LexicalSyntacticFeaturizer")
@pytest.fixture
def create_lexical_syntactic_featurizer(
default_model_storage: ModelStorage,
default_execution_context: ExecutionContext,
resource_lexical_syntactic_featurizer: Resource,
) -> Callable[[Dict[Text, Any]], LexicalSyntacticFeaturizer]:
def inner(config: Dict[Text, Any]):
return LexicalSyntacticFeaturizer.create(
config={**LexicalSyntacticFeaturizer.get_default_config(), **config,},
model_storage=default_model_storage,
execution_context=default_execution_context,
resource=resource_lexical_syntactic_featurizer,
)
return inner
@pytest.mark.parametrize(
"sentence,part_of_speech,feature_config,expected_features",
[
# simple example 1
(
"hello goodbye hello",
None,
[["BOS", "upper"], ["BOS", "EOS", "prefix2", "digit"], ["EOS", "low"],],
[
[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0],
],
),
# simple example 2
(
"a 1",
None,
[["BOS", "upper"], ["BOS", "EOS", "prefix2", "digit"], ["EOS", "low"],],
[
[0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0],
],
),
# larger window size
(
"hello 123 hello 123 hello",
None,
[["upper"], ["digit"], ["low"], ["digit"]],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0],
# Note:
# 1. we just describe the features for first token here
# 2. "123".islower() == "123".isupper() == False, which is why we end
# up with 7 features
),
# with part of speech
(
"The sun is shining",
["DET", "NOUN", "AUX", "VERB"],
[["pos", "pos2"]],
[
[0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
],
),
],
)
def test_feature_computation(
create_lexical_syntactic_featurizer: Callable[
[Dict[Text, Any]], LexicalSyntacticFeaturizer
],
sentence: Text,
part_of_speech: Optional[List[Text]],
feature_config: List[List[Text]],
expected_features: List[Union[int, List[int]]],
):
featurizer = create_lexical_syntactic_featurizer(
{"alias": "lsf", "features": feature_config}
)
# build the message
tokens = [
Token(text=match[0], start=match.start())
for match in re.finditer(r"\w+", sentence)
]
# ... and add part of speech tags (str) to tokens (if specified)
if part_of_speech:
assert len(tokens) == len(part_of_speech)
for token, pos in zip(tokens, part_of_speech):
token.data = {POS_TAG_KEY: pos}
message = Message(data={TOKENS_NAMES[TEXT]: tokens})
# train
featurizer.train(TrainingData([message]))
assert not message.features
# process
featurizer.process([message])
assert len(message.features) == 1
feature = message.features[0]
assert feature.attribute == TEXT
assert feature.is_sparse()
assert feature.type == FEATURE_TYPE_SEQUENCE
assert feature.features.shape[0] == len(tokens)
if isinstance(expected_features[0], List):
assert len(expected_features) == feature.features.shape[0]
# we specified the full matrix
assert np.all(feature.features.todense() == expected_features)
else:
assert len(expected_features) == feature.features.shape[1]
# just check features for the first token
assert np.all(feature.features.todense()[0] == expected_features)
def test_features_for_messages_with_missing_part_of_speech_tags(
create_lexical_syntactic_featurizer: Callable[
[Dict[Text, Any]], LexicalSyntacticFeaturizer
],
):
# build the message and do NOT add part of speech information
sentence = "hello goodbye hello"
message_data = {
TOKENS_NAMES[TEXT]: [
Token(text=match[0], start=match.start())
for match in re.finditer(r"\w+", sentence)
]
}
message = Message(data=message_data)
# train and process
featurizer = create_lexical_syntactic_featurizer(
{"alias": "lsf", "features": [["BOS", "pos"]]}
)
featurizer.train(TrainingData([message]))
featurizer.process([message])
feature = message.features[0]
assert feature.features.shape[1] == 3 # BOS = True/False, pos = None
def test_only_featurizes_text_attribute(
create_lexical_syntactic_featurizer: Callable[
[Dict[Text, Any]], LexicalSyntacticFeaturizer
],
):
# build a message with tokens for lots of attributes
sentence = "hello goodbye hello"
tokens = [
Token(text=match[0], start=match.start())
for match in re.finditer(r"\w+", sentence)
]
message_data = {}
for attribute in MESSAGE_ATTRIBUTES + DENSE_FEATURIZABLE_ATTRIBUTES:
message_data[attribute] = sentence
message_data[TOKENS_NAMES[attribute]] = tokens
message = Message(data=message_data)
# train and process
featurizer = create_lexical_syntactic_featurizer(
{"alias": "lsf", "features": [["BOS"]]}
)
featurizer.train(TrainingData([message]))
featurizer.process([message])
assert len(message.features) == 1
assert message.features[0].attribute == TEXT
def test_process_multiple_messages(
create_lexical_syntactic_featurizer: Callable[
[Dict[Text, Any]], LexicalSyntacticFeaturizer
],
):
# build a message with tokens for lots of attributes
multiple_messages = []
for sentence in ["hello", "hello there"]:
tokens = [
Token(text=match[0], start=match.start())
for match in re.finditer(r"\w+", sentence)
]
multiple_messages.append(Message(data={TOKENS_NAMES[TEXT]: tokens}))
# train and process
featurizer = create_lexical_syntactic_featurizer(
{"alias": "lsf", "features": [["prefix2"]]}
)
featurizer.train(TrainingData(multiple_messages))
featurizer.process(multiple_messages)
for message in multiple_messages:
assert len(message.features) == 1
assert message.features[0].attribute == TEXT
# we know both texts where used for training if more than one feature has been
# extracted e.g. for the first message from which only the prefix "he" can be
# extracted
assert multiple_messages[0].features[0].features.shape[-1] > 1
@pytest.mark.parametrize("feature_config", [(["pos", "BOS"],)])
def test_create_train_load_and_process(
create_lexical_syntactic_featurizer: Callable[
[Dict[Text, Any]], LexicalSyntacticFeaturizer
],
default_model_storage: ModelStorage,
default_execution_context: ExecutionContext,
resource_lexical_syntactic_featurizer: Resource,
feature_config: List[Text],
) -> Callable[..., LexicalSyntacticFeaturizer]:
config = {"alias": "lsf", "features": feature_config}
featurizer = create_lexical_syntactic_featurizer(config)
sentence = "Hello how are you"
tokens = [
Token(text=match[0], start=match.start())
for match in re.finditer(r"\w+", sentence)
]
message = Message(data={TOKENS_NAMES[TEXT]: tokens})
featurizer.train(TrainingData([message]))
loaded_featurizer = LexicalSyntacticFeaturizer.load(
config={**LexicalSyntacticFeaturizer.get_default_config(), **config,},
model_storage=default_model_storage,
execution_context=default_execution_context,
resource=resource_lexical_syntactic_featurizer,
)
assert loaded_featurizer._feature_to_idx_dict == featurizer._feature_to_idx_dict
@pytest.mark.parametrize(
"config,raises",
[
# do not raise
({}, False),
({**LexicalSyntacticFeaturizer.get_default_config()}, False),
({FEATURES: [["suffix2"]]}, False),
(
{
"bla": "obviously an unknown extra feature",
"faeturizer": "typos are also unknown features",
},
False,
),
# raise
({FEATURES: ["pos", "suffix2"]}, True),
({FEATURES: ["suffix1234"]}, True),
],
)
def test_validate_config(config: Dict[Text, Any], raises: bool):
if not raises:
LexicalSyntacticFeaturizer.validate_config(config)
else:
with pytest.raises(InvalidConfigException):
LexicalSyntacticFeaturizer.validate_config(config)
@pytest.mark.parametrize(
"sentence, feature_config, expected_features",
[("The sun is shining", [["pos", "pos2"]], np.ones(shape=(4, 2)),),],
)
def test_warn_if_part_of_speech_features_cannot_be_computed(
create_lexical_syntactic_featurizer: Callable[
[Dict[Text, Any]], LexicalSyntacticFeaturizer
],
sentence: Text,
feature_config: Dict[Text, Any],
expected_features: np.ndarray,
):
featurizer = create_lexical_syntactic_featurizer(
{"alias": "lsf", "features": feature_config}
)
# build the message - with tokens but *no* part-of-speech tags
tokens = [
Token(text=match[0], start=match.start())
for match in re.finditer(r"\w+", sentence)
]
message = Message(data={TOKENS_NAMES[TEXT]: tokens})
# train
with pytest.warns(
UserWarning,
match="Expected training data to include tokens with part-of-speech tags",
):
featurizer.train(TrainingData([message]))
assert not message.features
# process
with pytest.warns(None) as records:
featurizer.process([message])
assert len(records) == 0
assert len(message.features) == 1
feature = message.features[0]
assert np.all(feature.features.todense() == expected_features)
|
RasaHQ/rasa_nlu
|
tests/nlu/featurizers/test_lexical_syntactic_featurizer.py
|
Python
|
apache-2.0
| 11,174
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-30 12:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('todo', '0006_auto_20160530_1210'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='category',
field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='todo.Category'),
preserve_default=False,
),
]
|
Azarn/mytodo
|
todo/migrations/0007_auto_20160530_1233.py
|
Python
|
apache-2.0
| 593
|
from nose.tools import eq_
from pepdata.amino_acid_alphabet import (
canonical_amino_acids,
canonical_amino_acid_letters,
extended_amino_acids,
extended_amino_acid_letters,
)
def test_canonical_amino_acids():
assert len(canonical_amino_acids) == 20
def test_canonical_amino_acids_letters():
assert len(canonical_amino_acid_letters) == 20
assert "X" not in canonical_amino_acid_letters
expected_letters = [aa.letter for aa in canonical_amino_acids]
eq_(expected_letters, canonical_amino_acid_letters)
def test_extended_amino_acids():
assert len(extended_amino_acids) > 20
def test_extended_amino_acids_letters():
assert len(extended_amino_acid_letters) > 20
assert "X" in extended_amino_acid_letters
assert "J" in extended_amino_acid_letters
expected_letters = [aa.letter for aa in extended_amino_acids]
eq_(expected_letters, extended_amino_acid_letters)
|
hammerlab/pepdata
|
test/test_amino_acids.py
|
Python
|
apache-2.0
| 918
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Google Analytics cookie event formatters."""
import unittest
from plaso.formatters import ganalytics
from tests.formatters import test_lib
class AnalyticsUtmaCookieFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the UTMA Google Analytics cookie event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = ganalytics.AnalyticsUtmaCookieFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = ganalytics.AnalyticsUtmaCookieFormatter()
expected_attribute_names = [
u'url', u'cookie_name', u'sessions', u'domain_hash', u'visitor_id']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
class AnalyticsUtmbCookieFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the UTMB Google Analytics cookie event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = ganalytics.AnalyticsUtmbCookieFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = ganalytics.AnalyticsUtmbCookieFormatter()
expected_attribute_names = [
u'url', u'cookie_name', u'pages_viewed', u'domain_hash']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
class AnalyticsUtmtCookieFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the UTMT Google Analytics cookie event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = ganalytics.AnalyticsUtmtCookieFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = ganalytics.AnalyticsUtmtCookieFormatter()
expected_attribute_names = [u'url', u'cookie_name']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
class AnalyticsUtmzCookieFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the UTMZ Google Analytics cookie event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = ganalytics.AnalyticsUtmzCookieFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = ganalytics.AnalyticsUtmzCookieFormatter()
expected_attribute_names = [
u'url', u'cookie_name', u'sessions', u'domain_hash', u'sources',
u'utmcsr', u'utmccn', u'utmcmd', u'utmctr', u'utmcct']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
if __name__ == '__main__':
unittest.main()
|
dc3-plaso/plaso
|
tests/formatters/ganalytics.py
|
Python
|
apache-2.0
| 3,320
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU outside compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorboard.plugins.scalar import summary_v2 as scalar_summary_v2
from tensorflow.core.util import event_pb2
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import summary_ops_v2 as summary
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
def get_tpu_cluster_resolver():
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_strategy_util.initialize_tpu_system(resolver)
return tpu_lib.TPUStrategyV2(resolver)
def computation_with_string_ops(x):
output = string_ops.string_format("1{}", x)
return string_ops.string_to_number(output)
def _events_from_logdir(test_case, logdir):
"""Reads summary events from log directory."""
test_case.assertTrue(gfile.Exists(logdir))
files = gfile.ListDirectory(logdir)
test_case.assertLen(files, 1)
records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
class TpuOutsideCompilationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(TpuOutsideCompilationTest, self).setUp()
config.set_soft_device_placement(False)
def testHostNoInput(self):
strategy = get_tpu_strategy()
def outside_fn():
logging_ops.print_v2("Outside compiled")
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu.outside_compilation(outside_fn)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOnly(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu.outside_compilation(outside_fn, x2)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOutput(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output = tpu.outside_compilation(outside_fn, x2)
return output
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))
def testHostMultipleInputs(self):
strategy = get_tpu_strategy()
val0 = np.arange(6).reshape((2, 3)).astype(np.float32)
val1 = np.arange(6).reshape((3, 2)).astype(np.float32)
def outside_fn(arg0, arg1):
tmp = array_ops.reshape(arg1, array_ops.shape(arg0))
ret0 = arg0 + tmp
ret1 = math_ops.matmul(arg0, arg1)
ret2 = array_ops.concat([arg0, tmp], 0)
return ret0, ret1, ret2
@def_function.function
def train_step():
def tpu_fn(x, y):
a = x + 7.0
b = y * 2.0
c, d, e = tpu.outside_compilation(outside_fn, a, b)
return (math_ops.reduce_max(c) + math_ops.reduce_min(d) +
math_ops.reduce_sum(e))
return strategy.run(tpu_fn, args=(val0, val1))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(213., shape=(strategy.num_replicas_in_sync)))
def testMultipleClusters(self):
strategy = get_tpu_strategy()
def outside_fn1(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
def outside_fn2(x):
logging_ops.print_v2("Outside compiled", x)
return x - 18.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output1 = tpu.outside_compilation(outside_fn1, x2)
x3 = output1 + 3.0
output2 = tpu.outside_compilation(outside_fn2, x3)
return output2
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(21., shape=(strategy.num_replicas_in_sync)))
@parameterized.parameters((True), (False))
def testOutsideCompilationControlFlowIf(self, take_true_branch):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
input_value = 51.0 if take_true_branch else 25.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
if x < 50.0:
return tpu.outside_compilation(outside_fn, x2)
else:
return x2
return strategy.run(tpu_fn, args=(input_value,))
output_value = 36.0
if take_true_branch:
output_value = 56.0
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationControlFlowWhile(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
while x2 < 50.0:
x2 = tpu.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationHostControlFlow(self):
"""Tests that control flow on host for outside_compilation works."""
strategy = get_tpu_strategy()
def outside_fn(x):
n = 0
while n < 4:
x = x + 6.0
n = n + 1
return x
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
x2 = tpu.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
def testSummary(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
@parameterized.parameters((True), (False))
def testSummaryInCond(self, take_true_branch):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step(take_true_branch):
def computation(x):
x = x + 1.0
if x < 5.0:
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
x = y
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step(take_true_branch)),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
def testSummaryInWhile(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
n = 0
while n < 3:
x = x + 1.0
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
x = y
n = n + 1
return y + 1.0
return strategy.run(computation, args=(2.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(31., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationAtHeadAndTail(self):
"""Tests that outside_compilation at head/tail of TPU computation works."""
strategy = get_tpu_strategy()
def host_computation(x):
return x * 2.0
@def_function.function
def train_step():
def computation(x):
w = tpu.outside_compilation(host_computation, x)
y = w + 1.0
z = tpu.outside_compilation(host_computation, y)
return z + 5.0
return strategy.run(computation, args=(2.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(15., shape=(strategy.num_replicas_in_sync)))
def testGradientAcrossOutsideCompilation(self):
"""Tests compiled gradients can contain host computations."""
strategy = get_tpu_strategy()
def host_computation(a):
b = a * a
c = b * b
return c
@def_function.function
def train_step():
def computation(x, y):
a = x + 7.0
b = tpu.outside_compilation(host_computation, a)
c = b * y
d = gradients_impl.gradients(
[c], [x], colocate_gradients_with_ops=True)[0]
return d
return strategy.run(computation, args=(2.0, 3.0))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(8748., shape=(strategy.num_replicas_in_sync)))
def testGradientOfGradientAcrossOutsideCompilation(self):
"""Tests compiled gradients of gradients can contain host computations."""
strategy = get_tpu_strategy()
def host_computation(a):
b = a * a
c = b * b
return c
@def_function.function
def train_step():
def computation(x, y):
a = x + 7.0
b = tpu.outside_compilation(host_computation, a)
c = b * y
d = gradients_impl.gradients(
[c], [x], colocate_gradients_with_ops=True)[0]
e = gradients_impl.gradients(
[d], [x], colocate_gradients_with_ops=True)[0]
return e
return strategy.run(computation, args=(2.0, 3.0))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(2916., shape=(strategy.num_replicas_in_sync)))
class OutsideCompilationOnUnsupportedOpTest(test.TestCase,
parameterized.TestCase):
def setUp(self):
super(OutsideCompilationOnUnsupportedOpTest, self).setUp()
config.set_soft_device_placement(True)
def testStringOpWithManualOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return tpu.outside_compilation(computation_with_string_ops, x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testStringOpWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return computation_with_string_ops(x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
@parameterized.parameters((True), (False))
def testSummaryControlFlowIfWithAutoOutsideCompilation(
self, take_true_branch):
strategy = get_tpu_strategy()
@def_function.function
def step():
def computation(x):
x = x + 1.0
if x < 5:
scalar_summary_v2.scalar("x", x, step=0)
x = x * 2.0
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
if take_true_branch:
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
#
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "cond/x")
@test_util.disable_mlir_bridge(
"TODO(b/168493455): Reenable this test once deadlock resolved."
)
def testAutoOutsideCompilationWithFunctionalNodes(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(a, b):
def fn(a, b):
fn1 = lambda: computation_with_string_ops(a * 100)
fn2 = lambda: computation_with_string_ops(a)
pred = math_ops.greater_equal(a, b)
result = array_ops.identity(
control_flow_ops.cond(pred, fn1, fn2),
name="uncompilable_control_flow")
return result
return strategy.run(fn, args=(a, b))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0.0, -1.0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testRandomOpsWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step():
def computation():
return random_ops.random_normal(shape=[1, 2, 3])
return strategy.run(computation, args=())
self.assertAllEqual(
strategy.experimental_local_results(train_step())[0].shape, [1, 2, 3])
if __name__ == "__main__":
test.main()
|
karllessard/tensorflow
|
tensorflow/python/tpu/tpu_outside_compilation_test.py
|
Python
|
apache-2.0
| 18,249
|
# coding=utf-8
__author__ = 'litao'
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
import thread
import time
class QSBK:
def __init__(self):
self.pageIndex = 1
self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
self.headers = { 'User-Agent' : self.user_agent }
self.stories = []
self.enable = False
def getPage(self,pageIndex):
try:
url = 'http://www.qiushibaike.com/hot/page/' + str(pageIndex)
request = urllib2.Request(url,headers = self.headers)
response = urllib2.urlopen(request)
pageCode = response.read().decode('utf-8')
return pageCode
except urllib2.URLError, e:
if hasattr(e,"reason"):
print u"连接糗事百科失败,错误原因",e.reason
return None
#传入某一页代码,返回本页不带图片的段子列表
def getPageItems(self,pageIndex):
pageCode = self.getPage(pageIndex)
if not pageCode:
print "页面加载失败...."
return None
pattern = re.compile('<div.*?author">.*?<a.*?<img.*?>(.*?)</a>.*?<div.*?'+
'content">(.*?)<!--(.*?)-->.*?</div>(.*?)<div class="stats.*?class="number">(.*?)</i>',re.S)
items = re.findall(pattern,pageCode)
#用来存储每页的段子们
pageStories = []
#遍历正则表达式匹配的信息
for item in items:
#是否含有图片
haveImg = re.search("img",item[3])
#如果不含有图片,把它加入list中
if not haveImg:
replaceBR = re.compile('<br/>')
text = re.sub(replaceBR,"\n",item[1])
#item[0]是一个段子的发布者,item[1]是内容,item[2]是发布时间,item[4]是点赞数
pageStories.append([item[0].strip(),text.strip(),item[2].strip(),item[4].strip()])
return pageStories
#加载并提取页面的内容,加入到列表中
def loadPage(self):
#如果当前未看的页数少于2页,则加载新一页
if self.enable == True:
if len(self.stories) < 2:
#获取新一页
pageStories = self.getPageItems(self.pageIndex)
#将该页的段子存放到全局list中
if pageStories:
self.stories.append(pageStories)
#获取完之后页码索引加一,表示下次读取下一页
self.pageIndex += 1
#调用该方法,每次敲回车打印输出一个段子
def getOneStory(self,pageStories,page):
#遍历一页的段子
for story in pageStories:
#等待用户输入
input = raw_input()
#每当输入回车一次,判断一下是否要加载新页面
self.loadPage()
#如果输入Q则程序结束
if input == "Q":
self.enable = False
return
print u"第%d页\t发布人:%s\t发布时间:%s\t赞:%s\n%s" %(page,story[0],story[2],story[3],story[1])
#开始方法
def start(self):
print u"正在读取糗事百科,按回车查看新段子,Q退出"
#使变量为True,程序可以正常运行
self.enable = True
#先加载一页内容
self.loadPage()
#局部变量,控制当前读到了第几页
nowPage = 0
while self.enable:
if len(self.stories)>0:
#从全局list中获取一页的段子
pageStories = self.stories[0]
#当前读到的页数加一
nowPage += 1
#将全局list中第一个元素删除,因为已经取出
del self.stories[0]
#输出该页的段子
self.getOneStory(pageStories,nowPage)
spider = QSBK()
spider.start()
|
nfsli926/stock
|
python/com/nfs/helloworld.py
|
Python
|
apache-2.0
| 3,988
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains functions for outputting as l2t_csv.
Author description at: http://code.google.com/p/log2timeline/wiki/l2t_csv
"""
import logging
import re
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.lib import output
from plaso.lib import timelib
from plaso.lib import utils
from plaso.output import helper
class L2tcsv(output.FileLogOutputFormatter):
"""CSV format used by log2timeline, with 17 fixed fields."""
FORMAT_ATTRIBUTE_RE = re.compile('{([^}]+)}')
def Start(self):
"""Returns a header for the output."""
# Build a hostname and username dict objects.
self._hostnames = {}
if self.store:
self._hostnames = helper.BuildHostDict(self.store)
self._preprocesses = {}
for info in self.store.GetStorageInformation():
if hasattr(info, 'store_range'):
for store_number in range(
info.store_range[0], info.store_range[1] + 1):
self._preprocesses[store_number] = info
self.filehandle.WriteLine(
u'date,time,timezone,MACB,source,sourcetype,type,user,host,short,desc,'
u'version,filename,inode,notes,format,extra\n')
def WriteEvent(self, event_object):
"""Write a single event."""
try:
self.EventBody(event_object)
except errors.NoFormatterFound:
logging.error(u'Unable to output line, no formatter found.')
logging.error(event_object)
def EventBody(self, event_object):
"""Formats data as l2t_csv and writes to the filehandle from OutputFormater.
Args:
event_object: The event object (EventObject).
Raises:
errors.NoFormatterFound: If no formatter for that event is found.
"""
if not hasattr(event_object, 'timestamp'):
return
event_formatter = eventdata.EventFormatterManager.GetFormatter(event_object)
if not event_formatter:
raise errors.NoFormatterFound(
u'Unable to find event formatter for: {0:s}.'.format(
event_object.DATA_TYPE))
msg, msg_short = event_formatter.GetMessages(event_object)
source_short, source_long = event_formatter.GetSources(event_object)
date_use = timelib.Timestamp.CopyToDatetime(
event_object.timestamp, self.zone)
extras = []
format_variables = self.FORMAT_ATTRIBUTE_RE.findall(
event_formatter.format_string)
for key in event_object.GetAttributes():
if key in utils.RESERVED_VARIABLES or key in format_variables:
continue
# Force a string conversion since some of the extra attributes
# can be numbers or bools.
value = getattr(event_object, key)
extras.append(u'{0:s}: {1!s} '.format(key, value))
extra = ' '.join(extras)
inode = getattr(event_object, 'inode', '-')
if inode == '-':
if hasattr(event_object, 'pathspec') and hasattr(
event_object.pathspec, 'image_inode'):
inode = event_object.pathspec.image_inode
hostname = getattr(event_object, 'hostname', u'')
# TODO: move this into a base output class.
username = getattr(event_object, 'username', u'-')
if self.store:
if not hostname:
hostname = self._hostnames.get(event_object.store_number, u'-')
pre_obj = self._preprocesses.get(event_object.store_number)
if pre_obj:
check_user = pre_obj.GetUsernameById(username)
if check_user != '-':
username = check_user
row = ('{0:02d}/{1:02d}/{2:04d}'.format(
date_use.month, date_use.day, date_use.year),
'{0:02d}:{1:02d}:{2:02d}'.format(
date_use.hour, date_use.minute, date_use.second),
self.zone,
helper.GetLegacy(event_object),
source_short,
source_long,
getattr(event_object, 'timestamp_desc', u'-'),
username,
hostname,
msg_short,
msg,
'2',
getattr(event_object, 'display_name', u'-'),
inode,
getattr(event_object, 'notes', u'-'), # Notes field placeholder.
getattr(event_object, 'parser', u'-'),
extra.replace('\n', u'-').replace('\r', u''))
out_write = u'{0:s}\n'.format(
u','.join(unicode(x).replace(',', u' ') for x in row))
self.filehandle.WriteLine(out_write)
|
iwm911/plaso
|
plaso/output/l2t_csv.py
|
Python
|
apache-2.0
| 4,971
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from blinker import signal, Namespace, NamedSignal
from yosaipy2.core.event.abcs import EventBus
from typing import Dict
from functools import wraps
class BlinkerEventBus(EventBus):
def __init__(self):
# type: (str) -> None
self.AUTO_TOPIC = "blinker_eventbus_auto_topic"
self._signals = {} # type: Dict[NamedSignal]
def send_message(self, topic_name, **kwargs):
if topic_name not in self._signals:
sig = signal(topic_name)
self._signals[topic_name] = sig
else:
sig = self._signals[topic_name]
sig.send(None, **kwargs)
def subscribe(self, func, topic_name):
if topic_name not in self._signals:
sig = signal(topic_name)
self._signals[topic_name] = sig
else:
sig = self._signals[topic_name]
callback = self._adapter(func, topic_name)
sig.connect(callback)
def unsubscribe(self, listener, topic_name):
pass
@staticmethod
def _adapter(func, topic_name):
@wraps(func)
def callback(sender, **kwargs):
func(topic=topic_name, **kwargs)
return callback
def isSubscribed(self, listener, topic_name):
if topic_name not in self._signals:
return False
return True
event_bus = BlinkerEventBus()
|
jellybean4/yosaipy2
|
yosaipy2/core/event/event_bus.py
|
Python
|
apache-2.0
| 1,390
|
"""This test checks that Optuna is functional.
It also checks that it is usable with a separate scheduler.
"""
import time
import ray
from ray import tune
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.optuna import OptunaSearch
def evaluation_fn(step, width, height):
return (0.1 + width * step / 100)**(-1) + height * 0.1
def easy_objective(config):
# Hyperparameters
width, height = config["width"], config["height"]
for step in range(config["steps"]):
# Iterative training function - can be any arbitrary training procedure
intermediate_score = evaluation_fn(step, width, height)
# Feed the score back back to Tune.
tune.report(iterations=step, mean_loss=intermediate_score)
time.sleep(0.1)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init(configure_logging=False)
tune_kwargs = {
"num_samples": 10 if args.smoke_test else 100,
"config": {
"steps": 100,
"width": tune.uniform(0, 20),
"height": tune.uniform(-100, 100),
# This is an ignored parameter.
"activation": tune.choice(["relu", "tanh"])
}
}
algo = OptunaSearch()
algo = ConcurrencyLimiter(algo, max_concurrent=4)
scheduler = AsyncHyperBandScheduler()
tune.run(
easy_objective,
metric="mean_loss",
mode="min",
search_alg=algo,
scheduler=scheduler,
**tune_kwargs)
|
robertnishihara/ray
|
python/ray/tune/examples/optuna_example.py
|
Python
|
apache-2.0
| 1,721
|
'''
SI 618 - HW 4: Map-Reduce Part 2
Uniqname: gracfu
'''
from mrjob.job import MRJob
from mrjob.step import MRStep
import re
WORD_RE = re.compile(r"\b[\w']+\b")
class MRMostUsedWord(MRJob):
def mapper_get_words(self, _, line):
for word in WORD_RE.findall(line):
yield (word.lower(), 1)
def combiner_count_words(self, word, counts):
yield (word, sum(counts))
def reducer_count_words(self, word, counts):
yield None, (sum(counts), word)
def reducer_find_max_words(self, _, word_count_pairs):
yield max(word_count_pairs)
def steps(self):
return [
MRStep(mapper = self.mapper_get_words,
combiner = self.combiner_count_words,
reducer = self.reducer_count_words),
MRStep(reducer = self.reducer_find_max_words)
]
if __name__ == '__main__':
MRMostUsedWord.run()
|
gracfu/618_map_reduce
|
map_reduce_part2.py
|
Python
|
apache-2.0
| 802
|
#!/usr/bin/env python
"""do_release.py
Usage:
do_release.py [--force] [CALICO_DOCKER_VERSION CALICO_VERSION LIBCALICO_VERSION LIBNETWORK_VERSION]
Options:
-h --help Show this screen.
"""
import subprocess
import utils
import re
from docopt import docopt
from utils import print_paragraph as para
from utils import print_user_actions as actions
from utils import print_bullet as bullet
from utils import print_next as next
from utils import print_warning as warning
# The candidate version replacement performs most of the required version
# replacements, but replaces build artifact URLs with a dynamic URL that
# can return an artifact for an arbitrary branch. This is replaced with the
# GitHub release artifact just before the release is actually cut.
CANDIDATE_VERSION_REPLACE = [
(re.compile(r'__version__\s*=\s*".*"'),
'__version__ = "{version-no-v}"'),
(re.compile(r'\*\*release\*\*'),
'{version}'),
(re.compile('http://www\.projectcalico\.org/latest/calicoctl'),
'http://www.projectcalico.org/builds/calicoctl?circleci-branch={version}-candidate'),
(re.compile(r'git\+https://github\.com/projectcalico/calico\.git'),
'git+https://github.com/projectcalico/calico.git@{calico-version}'),
(re.compile(r'git\+https://github\.com/projectcalico/libcalico\.git'),
'git+https://github.com/projectcalico/libcalico.git@{libcalico-version}'),
(re.compile(r'calico_docker_ver\s*=\s*"latest"'),
'calico_docker_ver = "{version}"'),
(re.compile('calico_node_ver\s*=\s*"latest"'),
'calico_node_ver = "{version}"'),
(re.compile('calico/node:latest'),
'calico/node:{version}'),
(re.compile('calico/node-libnetwork:latest'),
'calico/node-libnetwork:{libnetwork-version}'),
(re.compile('calico_libnetwork_ver\s*=\s*"latest"'),
'calico_libnetwork_ver = "{libnetwork-version}"')
]
# The final version replace handles migrating the dynamic (temporary) URLs to
# point to the Git archives.
FINAL_VERSION_REPLACE = [
(re.compile('http://www\.projectcalico\.org/latest/calicoctl\?circleci\-branch=.*\-candidate'),
'https://github.com/projectcalico/calico-docker/releases/download/{version}/calicoctl'),
]
# Version replacement for the master branch. We just need to update the
# python version string and the comments.
MASTER_VERSION_REPLACE = [
(re.compile(r'__version__\s*=\s*".*"'),
'__version__ = "{version-no-v}-dev"'),
(re.compile(r'https://github\.com/projectcalico/calico\-docker/blob/.*/README\.md'),
'https://github.com/projectcalico/calico-docker/blob/{version}/README.md')
]
# Load the globally required release data.
release_data = utils.load_release_data()
# ============== Define the release steps. ===============
def start_release():
"""
Start the release process, asking user for version information.
:return:
"""
para("Your git repository should be checked out to the correct revision "
"that you want to cut a release with. This is usually the HEAD of "
"the master branch.")
utils.check_or_exit("Are you currently on the correct revision")
old_version = utils.get_calicoctl_version()
para("Current version is: %s" % old_version)
new_version = arguments["CALICO_DOCKER_VERSION"]
if not new_version:
while True:
new_version = raw_input("New calicoctl version?: ")
release_type = utils.check_version_increment(old_version, new_version)
if release_type:
para("Release type: %s" % release_type)
break
calico_version = arguments["CALICO_VERSION"]
libcalico_version = arguments["LIBCALICO_VERSION"]
libnetwork_version = arguments["LIBNETWORK_VERSION"]
if not (calico_version and libcalico_version and libnetwork_version):
para("To pin the calico libraries used by calico-docker, please specify "
"the name of the requested versions as they appear in the GitHub "
"releases.")
calico_version = \
utils.get_github_library_version("calico (felix)",
"https://github.com/projectcalico/calico")
libcalico_version = \
utils.get_github_library_version("libcalico",
"https://github.com/projectcalico/libcalico")
libnetwork_version = \
utils.get_github_library_version("libnetwork-plugin",
"https://github.com/projectcalico/libnetwork-plugin")
release_data["versions"] = {"version": new_version,
"version-no-v": new_version[1:],
"calico-version": calico_version,
"libcalico-version": libcalico_version,
"libnetwork-version": libnetwork_version}
bullet("Creating a candidate release branch called "
"'%s-candidate'." % new_version)
if arguments['--force']:
subprocess.call("git branch -D %s-candidate" % new_version, shell=True)
subprocess.call("git checkout -b %s-candidate" % new_version, shell=True)
# Update the code tree
utils.update_files(CANDIDATE_VERSION_REPLACE, release_data["versions"])
new_version = release_data["versions"]["version"]
para("The codebase has been updated to reference the release candidate "
"artifacts.")
bullet("Adding, committing and pushing the updated files to "
"origin/%s-candidate" % new_version)
subprocess.call("git add --all", shell=True)
subprocess.call('git commit -m "Update version strings for release '
'candidate %s"' % new_version, shell=True)
if arguments['--force']:
subprocess.call("git push -f origin %s-candidate" % new_version, shell=True)
else:
subprocess.call("git push origin %s-candidate" % new_version, shell=True)
actions()
bullet("Create a DockerHub release called '%s'" % new_version)
bullet("Monitor the semaphore, CircleCI and Docker builds for this branch "
"until all have successfully completed. Fix any issues with the "
"build.")
bullet("Run through a subset of the demonstrations. When running the "
"vagrant instructions, make sure you are using the candidate "
"branch (e.g. git checkout %s-candidate):" % new_version)
bullet("Ubuntu libnetwork", level=1)
bullet("CoreOS default networking", level=1)
para("Follow the URL below to view the correct demonstration instructions "
"for this release candidate.")
bullet("https://github.com/projectcalico/calico-docker/tree/%s-candidate" % new_version)
next("Once you have completed the testing, re-run the script.")
def cut_release():
"""
The candidate branch has been tested, so cut the actual release.
"""
utils.check_or_exit("Have you successfully tested your release candidate")
# Update the code tree once more to set the final GitHub URLs
utils.update_files(FINAL_VERSION_REPLACE, release_data["versions"])
new_version = release_data["versions"]["version"]
para("The codebase has been updated to reference the GitHub release "
"artifacts.")
actions()
bullet("Add, commit and push the updated files to "
"origin/%s-candidate" % new_version)
bullet("git add --all", level=1)
bullet('git commit -m "Update version strings for release '
'%s"' % new_version, level=1)
bullet("git push origin %s-candidate" % new_version, level=1)
bullet("[ideally squash the two commits into one]", level=1)
bullet("Monitor the semaphore, CircleCI and Docker builds for this branch "
"until all have successfully completed. Fix any issues with the "
"build.")
bullet("Create a Pull Request and review the changes")
bullet("Create a GitHub release called '%s'" % new_version)
para("Attach the calicoctl binary to the release. It can be downloaded "
"from the following URL:")
bullet("http://www.projectcalico.org/builds/calicoctl?circleci-branch=%s-candidate" % new_version)
para("Once the release has been created on GitHub, perform a final test "
"of the release:")
bullet("Run through a subset of the demonstrations. When running the "
"vagrant instructions, make sure you are using the tagged release "
"(e.g. git checkout tags/%s):" % new_version)
bullet("CoreOS libnetwork", level=1)
bullet("Ubuntu default networking", level=1)
next("Once you have completed the testing, re-run the script.")
def change_to_master():
"""
Version has been releases and tested.
"""
utils.check_or_exit("Have you successfully tested the release")
new_version = release_data["versions"]["version"]
para("The release is now complete. We now need to update the master "
"branch and do some general branch and build tidyup.")
actions()
bullet("Delete the DockerHub build for this release")
bullet("Checkout the master branch, and ensure it is up to date")
bullet("git checkout master", level=1)
bullet("git pull origin master", level=1)
bullet("Delete the origin/%s-candidate branch" % new_version)
bullet("git branch -D %s-candidate" % new_version, level=1)
bullet("git push origin :%s-candidate" % new_version, level=1)
next("Once complete, re-run the script.")
def update_master():
"""
Master branch is now checked out and needs updating.
"""
utils.check_or_exit("Is your git repository now on master")
# Update the master files.
utils.update_files(MASTER_VERSION_REPLACE, release_data["versions"],
is_release=False)
new_version = release_data["versions"]["version"]
para("The master codebase has now been updated to reference the latest "
"release.")
actions()
bullet("Self review the latest changes to master")
bullet("Push the changes to origin/master")
bullet("git add --all", level=1)
bullet('git commit -m "Update docs to version %s"' % new_version, level=1)
bullet("git push origin master", level=1)
bullet("Verify builds are working")
next("Once complete, re-run the script")
def complete():
"""
Show complete message
"""
utils.check_or_exit("Have you pushed the version update to master?")
warning("Release process is now complete.")
RELEASE_STEPS = [
start_release,
cut_release,
change_to_master,
update_master,
complete
]
def do_steps():
"""
Do the next step in the release process.
"""
step = release_data.get("step-number", 0)
RELEASE_STEPS[step]()
step = step+ 1
if step == len(RELEASE_STEPS):
release_data.clear()
else:
release_data["step-number"] = step
utils.save_release_data(release_data)
if __name__ == "__main__":
arguments = docopt(__doc__)
do_steps()
|
alexhersh/calico-docker
|
release-scripts/do_release.py
|
Python
|
apache-2.0
| 11,007
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class servicegroup_servicegroupentitymonbindings_binding(base_resource) :
""" Binding class showing the servicegroupentitymonbindings that can be bound to servicegroup.
"""
def __init__(self) :
self._servicegroupentname2 = ""
self._monitor_name = ""
self._monitor_state = ""
self._passive = False
self._monitortotalprobes = 0
self._monitortotalfailedprobes = 0
self._monitorcurrentfailedprobes = 0
self._lastresponse = ""
self._servicegroupname = ""
self._port = 0
self._weight = 0
self._customserverid = ""
self._serverid = 0
self._state = ""
self._hashid = 0
self.___count = 0
@property
def servicegroupname(self) :
"""Name of the service group.<br/>Minimum length = 1.
"""
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
"""Name of the service group.<br/>Minimum length = 1
"""
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def servicegroupentname2(self) :
try :
return self._servicegroupentname2
except Exception as e:
raise e
@servicegroupentname2.setter
def servicegroupentname2(self, servicegroupentname2) :
try :
self._servicegroupentname2 = servicegroupentname2
except Exception as e:
raise e
@property
def port(self) :
"""Port number of the service. Each service must have a unique port number.<br/>Range 1 - 65535.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
"""Port number of the service. Each service must have a unique port number.<br/>Range 1 - 65535
"""
try :
self._port = port
except Exception as e:
raise e
@property
def state(self) :
"""Initial state of the service after binding.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
"""Initial state of the service after binding.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._state = state
except Exception as e:
raise e
@property
def hashid(self) :
"""Unique numerical identifier used by hash based load balancing methods to identify a service.<br/>Minimum value = 1.
"""
try :
return self._hashid
except Exception as e:
raise e
@hashid.setter
def hashid(self, hashid) :
"""Unique numerical identifier used by hash based load balancing methods to identify a service.<br/>Minimum value = 1
"""
try :
self._hashid = hashid
except Exception as e:
raise e
@property
def serverid(self) :
"""The identifier for the service. This is used when the persistency type is set to Custom Server ID.
"""
try :
return self._serverid
except Exception as e:
raise e
@serverid.setter
def serverid(self, serverid) :
"""The identifier for the service. This is used when the persistency type is set to Custom Server ID.
"""
try :
self._serverid = serverid
except Exception as e:
raise e
@property
def customserverid(self) :
"""Unique service identifier. Used when the persistency type for the virtual server is set to Custom Server ID.<br/>Default value: "None".
"""
try :
return self._customserverid
except Exception as e:
raise e
@customserverid.setter
def customserverid(self, customserverid) :
"""Unique service identifier. Used when the persistency type for the virtual server is set to Custom Server ID.<br/>Default value: "None"
"""
try :
self._customserverid = customserverid
except Exception as e:
raise e
@property
def weight(self) :
""".<br/>Default value: 1<br/>Minimum value = 1<br/>Maximum value = 100.
"""
try :
return self._weight
except Exception as e:
raise e
@weight.setter
def weight(self, weight) :
""".<br/>Default value: 1<br/>Minimum value = 1<br/>Maximum value = 100
"""
try :
self._weight = weight
except Exception as e:
raise e
@property
def monitor_name(self) :
"""Monitor name.
"""
try :
return self._monitor_name
except Exception as e:
raise e
@monitor_name.setter
def monitor_name(self, monitor_name) :
"""Monitor name.
"""
try :
self._monitor_name = monitor_name
except Exception as e:
raise e
@property
def passive(self) :
"""Indicates if load monitor is passive. A passive load monitor does not remove service from LB decision when threshold is breached.
"""
try :
return self._passive
except Exception as e:
raise e
@passive.setter
def passive(self, passive) :
"""Indicates if load monitor is passive. A passive load monitor does not remove service from LB decision when threshold is breached.
"""
try :
self._passive = passive
except Exception as e:
raise e
@property
def lastresponse(self) :
"""The string form of monstatcode.
"""
try :
return self._lastresponse
except Exception as e:
raise e
@property
def monitor_state(self) :
"""The running state of the monitor on this service.<br/>Possible values = UP, DOWN, UNKNOWN, BUSY, OUT OF SERVICE, GOING OUT OF SERVICE, DOWN WHEN GOING OUT OF SERVICE, NS_EMPTY_STR, Unknown, DISABLED.
"""
try :
return self._monitor_state
except Exception as e:
raise e
@property
def monitortotalfailedprobes(self) :
"""Total number of failed probes.
"""
try :
return self._monitortotalfailedprobes
except Exception as e:
raise e
@property
def monitorcurrentfailedprobes(self) :
"""Total number of currently failed probes.
"""
try :
return self._monitorcurrentfailedprobes
except Exception as e:
raise e
@property
def monitortotalprobes(self) :
"""Total number of probes sent to monitor this service.
"""
try :
return self._monitortotalprobes
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(servicegroup_servicegroupentitymonbindings_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.servicegroup_servicegroupentitymonbindings_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.servicegroupname) :
return str(self.servicegroupname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, servicegroupname) :
""" Use this API to fetch servicegroup_servicegroupentitymonbindings_binding resources.
"""
try :
obj = servicegroup_servicegroupentitymonbindings_binding()
obj.servicegroupname = servicegroupname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, servicegroupname, filter_) :
""" Use this API to fetch filtered set of servicegroup_servicegroupentitymonbindings_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = servicegroup_servicegroupentitymonbindings_binding()
obj.servicegroupname = servicegroupname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, servicegroupname) :
""" Use this API to count servicegroup_servicegroupentitymonbindings_binding resources configued on NetScaler.
"""
try :
obj = servicegroup_servicegroupentitymonbindings_binding()
obj.servicegroupname = servicegroupname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, servicegroupname, filter_) :
""" Use this API to count the filtered set of servicegroup_servicegroupentitymonbindings_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = servicegroup_servicegroupentitymonbindings_binding()
obj.servicegroupname = servicegroupname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Monitor_state:
UP = "UP"
DOWN = "DOWN"
UNKNOWN = "UNKNOWN"
BUSY = "BUSY"
OUT_OF_SERVICE = "OUT OF SERVICE"
GOING_OUT_OF_SERVICE = "GOING OUT OF SERVICE"
DOWN_WHEN_GOING_OUT_OF_SERVICE = "DOWN WHEN GOING OUT OF SERVICE"
NS_EMPTY_STR = "NS_EMPTY_STR"
Unknown = "Unknown"
DISABLED = "DISABLED"
class Monstate:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class servicegroup_servicegroupentitymonbindings_binding_response(base_response) :
def __init__(self, length=1) :
self.servicegroup_servicegroupentitymonbindings_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.servicegroup_servicegroupentitymonbindings_binding = [servicegroup_servicegroupentitymonbindings_binding() for _ in range(length)]
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/basic/servicegroup_servicegroupentitymonbindings_binding.py
|
Python
|
apache-2.0
| 10,748
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo.config import cfg
from sahara import conductor as c
from sahara import context
from sahara.utils.openstack import keystone
conductor = c.API
CONF = cfg.CONF
def create_trust(cluster):
client = keystone.client()
ctx = context.current()
trustee_id = keystone.client_for_admin().user_id
trust = client.trusts.create(trustor_user=client.user_id,
trustee_user=trustee_id,
impersonation=True,
role_names=ctx.roles,
project=client.tenant_id)
conductor.cluster_update(ctx,
cluster,
{'trust_id': trust.id})
def use_os_admin_auth_token(cluster):
if cluster.trust_id:
ctx = context.current()
ctx.username = CONF.keystone_authtoken.admin_user
ctx.tenant_id = cluster.tenant_id
client = keystone.client_for_trusts(cluster.trust_id)
ctx.token = client.auth_token
ctx.service_catalog = json.dumps(
client.service_catalog.catalog['catalog'])
def delete_trust(cluster):
if cluster.trust_id:
keystone_client = keystone.client_for_trusts(cluster.trust_id)
keystone_client.trusts.delete(cluster.trust_id)
|
tellesnobrega/storm_plugin
|
sahara/service/trusts.py
|
Python
|
apache-2.0
| 1,897
|
#coding=utf-8
'''
Created on 2013年7月13日
@author: huiyugeng
'''
import types
import ndb
import regex
'''
载入解析映射
'''
def load_map(map_file):
map_struct = ndb.load(map_file)
map_list = ndb.execute(map_struct, 'select: map')
return map_list
'''
规则匹配及字段映射
'''
def reflect(map_list, line):
value_list = []
map_index = {}
if line == None:
return None
line = line.strip()
for map_item in map_list:
rule_type = map_item.get('type').strip() if map_item.has_key('type') else ''
if rule_type == '':
rule_type = 'regex'
pattern = map_item.get('pattern') if map_item.has_key('pattern') else ''
if rule_type == 'regex':
if regex.check_line(pattern, line):
value_list = regex.get_line(pattern, line)
map_index = __build_map_index(map_item)
break
elif rule_type == 'split':
match = map_item.get('match') if map_item.has_key('match') else ''
if __is_match(match, line):
value_list = line.split(pattern)
map_index = __build_map_index(map_item)
break
map_value = {}
index = 1
for value in value_list:
key = map_index.get(str(index))
if key != None:
map_value[key] = value
index = index + 1
return map_value
def __build_map_index(map_item):
map_index = {}
map_index_list = map_item.get('item') if map_item.has_key('item') else []
for _map_index in map_index_list:
try:
if _map_index.has_key('index') and _map_index.has_key('key'):
map_index[_map_index.get('index')] = _map_index.get('key')
except:
pass
return map_index
def __is_match(match, line):
result = False
if regex.check_line('(\S+)\((\S+)\)', match):
fun, value = regex.get_line('(\S+)\((\S+)\)', match)
if fun == 'startswith':
result = line.startswith(value)
elif fun == 'endswith':
result = line.endswith(value)
elif fun == 'in':
result = value in line
return result
|
fengtaijun/py_text
|
src/regex/regex_map.py
|
Python
|
apache-2.0
| 2,376
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import os
import sys
import types
from functions_framework.exceptions import (
InvalidConfigurationException,
InvalidTargetTypeException,
MissingTargetException,
)
DEFAULT_SOURCE = os.path.realpath("./main.py")
FUNCTION_SIGNATURE_TYPE = "FUNCTION_SIGNATURE_TYPE"
HTTP_SIGNATURE_TYPE = "http"
CLOUDEVENT_SIGNATURE_TYPE = "cloudevent"
BACKGROUNDEVENT_SIGNATURE_TYPE = "event"
# REGISTRY_MAP stores the registered functions.
# Keys are user function names, values are user function signature types.
REGISTRY_MAP = {}
def get_user_function(source, source_module, target):
"""Returns user function, raises exception for invalid function."""
# Extract the target function from the source file
if not hasattr(source_module, target):
raise MissingTargetException(
"File {source} is expected to contain a function named {target}".format(
source=source, target=target
)
)
function = getattr(source_module, target)
# Check that it is a function
if not isinstance(function, types.FunctionType):
raise InvalidTargetTypeException(
"The function defined in file {source} as {target} needs to be of "
"type function. Got: invalid type {target_type}".format(
source=source, target=target, target_type=type(function)
)
)
return function
def load_function_module(source):
"""Load user function source file."""
# 1. Extract the module name from the source path
realpath = os.path.realpath(source)
directory, filename = os.path.split(realpath)
name, extension = os.path.splitext(filename)
# 2. Create a new module
spec = importlib.util.spec_from_file_location(
name, realpath, submodule_search_locations=[directory]
)
source_module = importlib.util.module_from_spec(spec)
# 3. Add the directory of the source to sys.path to allow the function to
# load modules relative to its location
sys.path.append(directory)
# 4. Add the module to sys.modules
sys.modules[name] = source_module
return source_module, spec
def get_function_source(source):
"""Get the configured function source."""
source = source or os.environ.get("FUNCTION_SOURCE", DEFAULT_SOURCE)
# Python 3.5: os.path.exist does not support PosixPath
source = str(source)
return source
def get_function_target(target):
"""Get the configured function target."""
target = target or os.environ.get("FUNCTION_TARGET", "")
# Set the environment variable if it wasn't already
os.environ["FUNCTION_TARGET"] = target
if not target:
raise InvalidConfigurationException(
"Target is not specified (FUNCTION_TARGET environment variable not set)"
)
return target
def get_func_signature_type(func_name: str, signature_type: str) -> str:
"""Get user function's signature type.
Signature type is searched in the following order:
1. Decorator user used to register their function
2. --signature-type flag
3. environment variable FUNCTION_SIGNATURE_TYPE
If none of the above is set, signature type defaults to be "http".
"""
registered_type = REGISTRY_MAP[func_name] if func_name in REGISTRY_MAP else ""
sig_type = (
registered_type
or signature_type
or os.environ.get(FUNCTION_SIGNATURE_TYPE, HTTP_SIGNATURE_TYPE)
)
# Set the environment variable if it wasn't already
os.environ[FUNCTION_SIGNATURE_TYPE] = sig_type
# Update signature type for legacy GCF Python 3.7
if os.environ.get("ENTRY_POINT"):
os.environ["FUNCTION_TRIGGER_TYPE"] = sig_type
return sig_type
|
GoogleCloudPlatform/functions-framework-python
|
src/functions_framework/_function_registry.py
|
Python
|
apache-2.0
| 4,301
|
# Adapted from: https://github.com/sussexwearlab/DeepConvLSTM
__author__ = 'fjordonez, gchevalier'
from signal_filtering import filter_opportunity_datasets_accelerometers
import os
import zipfile
import argparse
import numpy as np
import cPickle as cp
from io import BytesIO
from pandas import Series
# Hardcoded number of sensor channels employed in the OPPORTUNITY challenge
NB_SENSOR_CHANNELS = 113
NB_SENSOR_CHANNELS_WITH_FILTERING = 149 # =77 gyros +36*2 accelerometer channels
# Hardcoded names of the files defining the OPPORTUNITY challenge data. As named in the original data.
OPPORTUNITY_DATA_FILES_TRAIN = [
'OpportunityUCIDataset/dataset/S1-Drill.dat',
'OpportunityUCIDataset/dataset/S1-ADL1.dat',
'OpportunityUCIDataset/dataset/S1-ADL2.dat',
'OpportunityUCIDataset/dataset/S1-ADL3.dat',
'OpportunityUCIDataset/dataset/S1-ADL4.dat',
'OpportunityUCIDataset/dataset/S1-ADL5.dat',
'OpportunityUCIDataset/dataset/S2-Drill.dat',
'OpportunityUCIDataset/dataset/S2-ADL1.dat',
'OpportunityUCIDataset/dataset/S2-ADL2.dat',
'OpportunityUCIDataset/dataset/S2-ADL3.dat',
'OpportunityUCIDataset/dataset/S3-Drill.dat',
'OpportunityUCIDataset/dataset/S3-ADL1.dat',
'OpportunityUCIDataset/dataset/S3-ADL2.dat',
'OpportunityUCIDataset/dataset/S3-ADL3.dat'
]
OPPORTUNITY_DATA_FILES_TEST = [
'OpportunityUCIDataset/dataset/S2-ADL4.dat',
'OpportunityUCIDataset/dataset/S2-ADL5.dat',
'OpportunityUCIDataset/dataset/S3-ADL4.dat',
'OpportunityUCIDataset/dataset/S3-ADL5.dat'
]
def select_columns_opp(data):
"""Selection of the 113 columns employed in the OPPORTUNITY challenge
:param data: numpy integer matrix
Sensor data (all features)
:return: tuple((numpy integer 2D matrix, numpy integer 1D matrix))
(Selection of features (N, f), feature_is_accelerometer (f,) one-hot)
"""
# In term of column_names.txt's ranges: excluded-included (here 0-indexed)
features_delete = np.arange(46, 50)
features_delete = np.concatenate([features_delete, np.arange(59, 63)])
features_delete = np.concatenate([features_delete, np.arange(72, 76)])
features_delete = np.concatenate([features_delete, np.arange(85, 89)])
features_delete = np.concatenate([features_delete, np.arange(98, 102)])
features_delete = np.concatenate([features_delete, np.arange(134, 243)])
features_delete = np.concatenate([features_delete, np.arange(244, 249)])
# In term of column_names.txt's ranges: excluded-included
features_delete = np.arange(46, 50)
features_delete = np.concatenate([features_delete, np.arange(59, 63)])
features_delete = np.concatenate([features_delete, np.arange(72, 76)])
features_delete = np.concatenate([features_delete, np.arange(85, 89)])
features_delete = np.concatenate([features_delete, np.arange(98, 102)])
features_delete = np.concatenate([features_delete, np.arange(134, 243)])
features_delete = np.concatenate([features_delete, np.arange(244, 249)])
# In term of column_names.txt's ranges: excluded-included
features_acc = np.arange(1, 37)
features_acc = np.concatenate([features_acc, np.arange(134, 194)])
features_acc = np.concatenate([features_acc, np.arange(207, 231)])
# One-hot for everything that is an accelerometer
is_accelerometer = np.zeros([243])
is_accelerometer[features_acc] = 1
# Deleting some signals to keep only the 113 of the challenge
data = np.delete(data, features_delete, 1)
is_accelerometer = np.delete(is_accelerometer, features_delete, 0)
# Shape `(N, f), (f, )`
# where N is number of timesteps and f is 113 features, one-hot
return data, is_accelerometer
def normalize(x):
"""Normalizes all sensor channels by mean substraction,
dividing by the standard deviation and by 2.
:param x: numpy integer matrix
Sensor data
:return:
Normalized sensor data
"""
x = np.array(x, dtype=np.float32)
m = np.mean(x, axis=0)
x -= m
std = np.std(x, axis=0)
std += 0.000001
x /= (std * 2) # 2 is for having smaller values
return x
def split_data_into_time_gyros_accelerometers(data, is_accelerometer):
# Assuming index 0 of features is reserved for time.
# Splitting data into gyros, accelerometers and time:
is_accelerometer = np.array(is_accelerometer*2-1, dtype=np.int32)
# is_accelerometer's zeros have been replaced by -1. 1's are untouched.
plane = np.arange(len(is_accelerometer)) * is_accelerometer
delete_gyros = [-e for e in plane if e <= 0]
delete_accms = [ e for e in plane if e >= 0]
time = data[:,0]
gyros = np.delete(data, delete_accms, 1)
accms = np.delete(data, delete_gyros, 1)
return time, gyros, accms
def divide_x_y(data, label, filter_accelerometers):
"""Segments each sample into (time+features) and (label)
:param data: numpy integer matrix
Sensor data
:param label: string, ['gestures' (default), 'locomotion']
Type of activities to be recognized
:return: numpy integer matrix, numpy integer array
Features encapsulated into a matrix and labels as an array
"""
if filter_accelerometers:
data_x = data[:, :114]
else:
data_x = data[:,1:114]
# Choose labels type for y
if label not in ['locomotion', 'gestures']:
raise RuntimeError("Invalid label: '%s'" % label)
if label == 'locomotion':
data_y = data[:, 114] # Locomotion label
elif label == 'gestures':
data_y = data[:, 115] # Gestures label
return data_x, data_y
def adjust_idx_labels(data_y, label):
"""Transforms original labels into the range [0, nb_labels-1]
:param data_y: numpy integer array
Sensor labels
:param label: string, ['gestures' (default), 'locomotion']
Type of activities to be recognized
:return: numpy integer array
Modified sensor labels
"""
if label == 'locomotion': # Labels for locomotion are adjusted
data_y[data_y == 4] = 3
data_y[data_y == 5] = 4
elif label == 'gestures': # Labels for gestures are adjusted
data_y[data_y == 406516] = 1
data_y[data_y == 406517] = 2
data_y[data_y == 404516] = 3
data_y[data_y == 404517] = 4
data_y[data_y == 406520] = 5
data_y[data_y == 404520] = 6
data_y[data_y == 406505] = 7
data_y[data_y == 404505] = 8
data_y[data_y == 406519] = 9
data_y[data_y == 404519] = 10
data_y[data_y == 406511] = 11
data_y[data_y == 404511] = 12
data_y[data_y == 406508] = 13
data_y[data_y == 404508] = 14
data_y[data_y == 408512] = 15
data_y[data_y == 407521] = 16
data_y[data_y == 405506] = 17
return data_y
def check_data(data_set):
"""Try to access to the file and checks if dataset is in the data directory
In case the file is not found try to download it from original location
:param data_set:
Path with original OPPORTUNITY zip file
:return:
"""
print 'Checking dataset {0}'.format(data_set)
data_dir, data_file = os.path.split(data_set)
# When a directory is not provided, check if dataset is in the data directory
if data_dir == "" and not os.path.isfile(data_set):
new_path = os.path.join(os.path.split(__file__)[0], "data", data_set)
if os.path.isfile(new_path) or data_file == 'OpportunityUCIDataset.zip':
data_set = new_path
# When dataset not found, try to download it from UCI repository
if (not os.path.isfile(data_set)) and data_file == 'OpportunityUCIDataset.zip':
print '... dataset path {0} not found'.format(data_set)
import urllib
origin = (
'https://archive.ics.uci.edu/ml/machine-learning-databases/00226/OpportunityUCIDataset.zip'
)
if not os.path.exists(data_dir):
print '... creating directory {0}'.format(data_dir)
os.makedirs(data_dir)
print '... downloading data from {0}'.format(origin)
urllib.urlretrieve(origin, data_set)
return data_dir
def process_dataset_file(data, label, filter_accelerometers):
"""Function defined as a pipeline to process individual OPPORTUNITY files
:param data: numpy integer matrix
Matrix containing data samples (rows) for every sensor channel (column)
:param label: string, ['gestures' (default), 'locomotion']
Type of activities to be recognized
:return: numpy integer matrix, numy integer array
Processed sensor data, segmented into features (x) and labels (y)
"""
# Select correct columns
data, is_accelerometer = select_columns_opp(data)
# Colums are segmentd into features and labels
data_x, data_y = divide_x_y(data, label, filter_accelerometers)
data_y = adjust_idx_labels(data_y, label)
data_y = data_y.astype(int)
# Perform linear interpolation (a.k.a. filling in NaN)
data_x = np.array([Series(i).interpolate() for i in data_x.T]).T
# Remaining missing data are converted to zero
data_x[np.isnan(data_x)] = 0
# All sensor channels are normalized
data_x = normalize(data_x)
if filter_accelerometers:
# x's accelerometers, are filtered out by some LP passes for noise and gravity.
# Time is discarded, accelerometers are filtered to
# split gravity and remove noise.
_, x_gyros, x_accms = split_data_into_time_gyros_accelerometers(
data_x, is_accelerometer
)
print "gyros' shape: {}".format(x_gyros.shape)
print "old accelerometers' shape: {}".format(x_accms.shape)
x_accms = normalize(filter_opportunity_datasets_accelerometers(x_accms))
print "new accelerometers' shape: {}".format(x_accms.shape)
# Put features together (inner concatenation with transposals)
data_x = np.hstack([x_gyros, x_accms])
print "new total shape: {}".format(data_x.shape)
return data_x, data_y
def load_data_files(zipped_dataset, label, data_files, filter_accelerometers=False):
"""Loads specified data files' features (x) and labels (y)
:param zipped_dataset: ZipFile
OPPORTUNITY zip file to read from
:param label: string, ['gestures' (default), 'locomotion']
Type of activities to be recognized. The OPPORTUNITY dataset includes several annotations to perform
recognition modes of locomotion/postures and recognition of sporadic gestures.
:param data_files: list of strings
Data files to load.
:return: numpy integer matrix, numy integer array
Loaded sensor data, segmented into features (x) and labels (y)
"""
nb_sensors = NB_SENSOR_CHANNELS_WITH_FILTERING if filter_accelerometers else NB_SENSOR_CHANNELS
data_x = np.empty((0, nb_sensors))
data_y = np.empty((0))
for filename in data_files:
try:
data = np.loadtxt(BytesIO(zipped_dataset.read(filename)))
print '... file {0}'.format(filename)
x, y = process_dataset_file(data, label, filter_accelerometers)
data_x = np.vstack((data_x, x))
data_y = np.concatenate([data_y, y])
print "Data's shape yet: "
print data_x.shape
except KeyError:
print 'ERROR: Did not find {0} in zip file'.format(filename)
return data_x, data_y
def generate_data(dataset, target_filename, label):
"""Function to read the OPPORTUNITY challenge raw data and process all sensor channels
:param dataset: string
Path with original OPPORTUNITY zip file
:param target_filename: string
Processed file
:param label: string, ['gestures' (default), 'locomotion']
Type of activities to be recognized. The OPPORTUNITY dataset includes several annotations to perform
recognition modes of locomotion/postures and recognition of sporadic gestures.
"""
data_dir = check_data(dataset)
zf = zipfile.ZipFile(dataset)
print '\nProcessing train dataset files...\n'
X_train, y_train = load_data_files(zf, label, OPPORTUNITY_DATA_FILES_TRAIN)
print '\nProcessing test dataset files...\n'
X_test, y_test = load_data_files(zf, label, OPPORTUNITY_DATA_FILES_TEST)
print "Final datasets with size: | train {0} | test {1} | ".format(X_train.shape, X_test.shape)
obj = [(X_train, y_train), (X_test, y_test)]
f = file(os.path.join(data_dir, target_filename), 'wb')
cp.dump(obj, f, protocol=cp.HIGHEST_PROTOCOL)
f.close()
def get_args():
'''This function parses and return arguments passed in'''
parser = argparse.ArgumentParser(
description='Preprocess OPPORTUNITY dataset')
# Add arguments
parser.add_argument(
'-i', '--input', type=str, help='OPPORTUNITY zip file', required=True)
parser.add_argument(
'-o', '--output', type=str, help='Processed data file', required=True)
parser.add_argument(
'-t', '--task', type=str.lower, help='Type of activities to be recognized', default="gestures", choices = ["gestures", "locomotion"], required=False)
# Array for all arguments passed to script
args = parser.parse_args()
# Assign args to variables
dataset = args.input
target_filename = args.output
label = args.task
# Return all variable values
return dataset, target_filename, label
if __name__ == '__main__':
OpportunityUCIDataset_zip, output, l = get_args();
generate_data(OpportunityUCIDataset_zip, output, l)
|
guillaume-chevalier/HAR-stacked-residual-bidir-LSTMs
|
data/preprocess_data.py
|
Python
|
apache-2.0
| 13,571
|
################################################################################
# Name : GDB Wrapper
# Author : Jesse Schwartzentruber & Tyson Smith
#
# Copyright 2014 BlackBerry Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import distutils.spawn
import os
import platform
import re
import signal
import tempfile
import time
from . import _common
CLOSE_FDS = True
if platform.system() in ["Linux", "Darwin"]:
TOOL_GDB = distutils.spawn.find_executable('gdb', os.pathsep.join([os.environ['PATH'], _common.PATH_DBG]))
if platform.system() == "Linux":
TOOL_GDB_NTO = os.path.join(_common.PATH_DBG, "linux_x64-gdb-ntoarm")
TOOL_KDSRV = os.path.join(_common.PATH_DBG, "linux_x64-kdserver")
else:
TOOL_GDB_NTO = None
TOOL_KDSRV = None
elif platform.system() == "QNX":
TOOL_GDB = {"x86": os.path.join(_common.PATH_DBG, "ntox86-gdb"),
"armle": os.path.join(_common.PATH_DBG, "ntoarm-gdb"),
}[platform.processor()]
TOOL_GDB_NTO = TOOL_GDB
TOOL_KDSRV = None
assert os.access(TOOL_GDB, os.X_OK), "%s is not executable" % TOOL_GDB
elif platform.system() == "Windows":
TOOL_GDB = distutils.spawn.find_executable('gdb.exe', os.pathsep.join([os.environ['PATH'], _common.PATH_DBG]))
TOOL_GDB_NTO = os.path.join(_common.PATH_DBG, "gdb-ntoarm.exe")
TOOL_KDSRV = os.path.join(_common.PATH_DBG, "kdserver.exe")
CLOSE_FDS = False
GDB_CMDS = os.path.join(os.path.abspath(os.path.dirname(__file__)), "cmds.gdb")
# child sometimes doesn't die on SIGTERM in QNX
# wait this length of time before sending another SIGTERM, and finally SIGKILL
SLAY_TIMEOUT = 10
def _send_signal(signal, *args):
for pid in args:
if pid:
os.kill(pid, signal)
break
def _trim_disassembly(stdout):
if not stdout:
return stdout
start_loc = stdout.find("Dump of assembler code")
end_loc = stdout.find("End of assembler dump.", start_loc)
if start_loc == -1 or end_loc == -1:
return "%s\nError trimming assembler dump. start_loc = %d, end_loc = %d" % (stdout,
start_loc,
end_loc)
try:
a, b = stdout[start_loc:end_loc].split("\n=>")
except ValueError:
return "%s\nError trimming assembler dump. Could not find '=>'" % (stdout)
a = a.splitlines()
start_loc += len(a.pop(0))
return "%s\n%s\n=>%s\n%s" % (stdout[:start_loc],
"\n".join(a[-15:]),
"\n".join(b.splitlines()[:15]),
stdout[end_loc:])
def _gdb_cmd(target_exe, solib_search=None, run=True):
return [TOOL_GDB, "-nx", "-x", GDB_CMDS] + \
[i for sl in [("-ex", x) for x in
_gdb_cmd_gen(run=run, target=target_exe, solib_search=solib_search)] for i in sl] + \
["-return-child-result", "-batch", "--args"]
def run_with_gdb(target_cmd, symbols=None, solib_search=None, env=None, callback=None,
callback_args=None, timeout=_common.DEFAULT_TIMEOUT, memory_limit=None,
idle_limit=None):
"""
This function is similar to the :func:`run` function above,
except the target is executed under control of the GNU Debugger.
Symbols may be specified manually, otherwise they are expected
to be findable by GDB (usually included in the target itself).
:func:`run_with_gdb` returns a :class:`~alf.FuzzResult` instance.
If no crash was detected, the :attr:`~alf.FuzzResult.classification`
member of the :class:`~alf.FuzzResult` will be
:data:`~alf.debug.NOT_AN_EXCEPTION`.
Classifications: :data:`~alf.debug.NOT_AN_EXCEPTION`,
:data:`~alf.debug.TIMEOUT`, :data:`~alf.debug.UNKNOWN`.
Availability: Unix, Windows.
"""
classification = None
cpid = None
if platform.system() == "Windows":
_common._set_gflags(target_cmd[0])
if platform.system() == "QNX":
if not os.path.isfile("libc.so.3"):
if not os.path.isfile("/root/symbols/x86/lib/libc.so.3.sym"):
raise RuntimeError("Cannot find /root/symbols/x86/lib/libc.so.3.sym")
os.symlink("/root/symbols/x86/lib/libc.so.3.sym", "libc.so.3")
fd, temp_fn = tempfile.mkstemp(prefix="gdb", suffix=".log", dir=".")
os.close(fd)
nul = open(os.devnull, "w+")
try:
with open(temp_fn, "w+") as f:
if env is None:
env = dict(os.environ)
env["LIBC_FATAL_STDERR_"] = "1"
p = _common.subprocess.Popen(_gdb_cmd(target_cmd[0], solib_search) + target_cmd,
close_fds=CLOSE_FDS, stdout=f, stderr=f, stdin=nul,
creationflags=_common.POPEN_FLAGS, env=env)
try:
with open(temp_fn) as fr:
while p.poll() is None:
line = fr.readline()
m = re.match(r"^\*\s+1\s+Thread\s+\w+\s+\(LWP\s+(?P<pid>[0-9]+)\)", line)
if m is None:
m = re.match(r"^\*\s+1\s+(pid|process|Thread)\s+(?P<pid>[0-9]+)", line)
if m:
cpid = int(m.group("pid"))
break
cb_res = _common._call_callback(callback, callback_args, p.pid)
if cb_res == _common.CB_ERROR:
raise RuntimeError("callback() returned error")
target_mon = _common.TargetMonitor(cpid, idle_limit=idle_limit,
memory_limit=memory_limit, time_limit=timeout)
while p.poll() is None:
if target_mon.check_memory():
classification = _common.EXCESS_MEMORY_USAGE
break
if target_mon.check_idle():
break
if target_mon.check_timeout():
classification = _common.TIMEOUT
break
time.sleep(0.01)
finally:
while p.poll() is None:
try:
if platform.system() == "QNX":
attempt = -1
sigs = [signal.SIGTERM, signal.SIGKILL]
while p.poll() is None:
attempt += 1
assert attempt < len(sigs), "Failed to kill child process"
_send_signal(sigs[attempt], cpid, p.pid)
kill_time = _common.prof_timer()
while _common.prof_timer() - kill_time < SLAY_TIMEOUT:
if p.poll() is not None:
break
time.sleep(0.25)
elif platform.system() == "Windows":
_send_signal(signal.CTRL_BREAK_EVENT, cpid, p.pid)
else:
_send_signal(signal.SIGTERM, cpid, p.pid)
except OSError:
pass
exit_code = p.wait()
f.seek(0, os.SEEK_SET)
stdout = f.read()
finally:
_common.delete(temp_fn)
nul.close()
m = re.search(r"Traceback \(\D+\):.+Python command:", stdout, re.DOTALL)
if m:
tb = m.group(0)
tb = tb[:tb.rfind("\n")]
if not tb.endswith("No threads running"):
raise RuntimeError("GDB Python Failure\n\n%s" % tb)
else:
return _common.FuzzResult(_common.NOT_AN_EXCEPTION, stdout)
backtrace, debug_classification = _process_gdb_output(stdout)
if cb_res == _common.CB_HANG:
classification = _common.TIMEOUT
elif classification is None:
if cb_res == _common.CB_FAIL:
classification = _common.UNKNOWN
else:
classification = debug_classification
stdout = _trim_disassembly(stdout)
stdout = _common._limit_output_length(stdout)
return _common.FuzzResult(classification, stdout, backtrace, exit_code)
def _symbolize(target, output, tool, exp_opt):
fd, tmp_log = tempfile.mkstemp(prefix="%s_log" % tool, suffix=".txt", dir=".")
try:
os.write(fd, output)
finally:
os.close(fd)
try:
result = _common.run([TOOL_GDB, "-batch", "-nx",
"-ex", "set python print-stack full",
"-ex", "py import exploitable",
"-ex", "exploitable -m %s %s" % (exp_opt, tmp_log),
"-ex", "quit", target], timeout=180)
finally:
_common.delete(tmp_log)
if result.classification == _common.TIMEOUT:
raise RuntimeError("Timed out while processing %s output:\n%s" % (tool, output))
result.backtrace, result.classification = _process_gdb_output(result.text)
result.text = _common._limit_output_length(result.text)
if result.classification == _common.NOT_AN_EXCEPTION:
raise RuntimeError("Failed to process %s output:\n%s" % (tool, output))
return result
def symbolize_valgrind(target, valgrind_output):
"""
Creates a :class:`~alf.FuzzResult` with classification by analyzing the log
generated by Valgrind/Memcheck.
"""
return _symbolize(target, valgrind_output, "valgrind", "-vg")
def symbolize_asan(target, asan_output):
"""
Creates a :class:`~alf.FuzzResult` with classification by analyzing the log
generated by AddressSanitizer.
The result.text includes asan_output, but symbolized if possible.
"""
return _symbolize(target, asan_output, "asan", "-a")
def _gdb_core_debug(symbols, ucore=None, kcore=None, remote=None, solib_search=None):
assert TOOL_GDB_NTO, "GDB targetting NTO not available for this platform"
if kcore:
assert TOOL_KDSRV, "kdserver not available for this platform"
assert len([x for x in [ucore, kcore, remote] if x is not None]) == 1, "Must specify exactly one core file"
with tempfile.TemporaryFile() as f:
gdb_cmd = [TOOL_GDB_NTO, "-nx", "-x", GDB_CMDS, symbols]
if ucore is not None:
gdb_cmd.append(ucore)
gdb = _common.subprocess.Popen(gdb_cmd, stdout=f, stderr=f, stdin=_common.subprocess.PIPE)
if kcore is not None:
gdb.stdin.write("target remote |%s %s\n" % (TOOL_KDSRV, kcore.replace("\\", "\\\\")))
elif remote is not None:
gdb.stdin.write("target remote %s\n" % remote)
core = ucore or kcore
for c in _gdb_cmd_gen(core=core, solib_search=solib_search, detach=not core):
gdb.stdin.write("%s\n" % c)
gdb_wait_st = _common.prof_timer()
while gdb.poll() is None and (_common.prof_timer() - gdb_wait_st) < 20:
time.sleep(0.1)
if gdb.poll() is None:
gdb.terminate()
gdb.wait()
f.seek(0)
gdb_out = f.read()
trim = gdb_out.find(r'$1 = "TRIM"')
if trim != -1:
gdb_out = "\n".join([l for l in gdb_out[:trim].splitlines()[:-1] if not l.startswith("#0")] +
gdb_out[trim:].splitlines()[1:] + [""])
bt, cls = _process_gdb_output(gdb_out)
gdb_out = _trim_disassembly(gdb_out)
return _common.FuzzResult(cls, gdb_out, bt)
def _gdb_cmd_gen(core=False, run=False, use_rcheck=False,
solib_search=None, target=None, detach=False, follow_child=False):
# static cmds, sequence definitions, or conditional cmds (if, while, etc.) must go in cmds.gdb
if follow_child:
yield "set follow-fork-mode child"
if run and use_rcheck:
yield "set environment LD_PRELOAD librcheck.so"
# Suppress prints from librcheck
yield "set environment MALLOC_FILE /dev/null"
# memory tracing on start. If memory tracing is disabled, errors can't report allocation/deallocation backtraces for memory chunk involved in error condition.
yield "set environment MALLOC_START_TRACING 0"
# Start control thread, and allows the IDE to send commands to the application (can't use if process forks).
yield "set environment MALLOC_CTHREAD 0"
# Check for out of bounds errors on every allocation/deallocation.
yield "set environment MALLOC_CKBOUNDS 0"
# Check strings and memory functions for errors.
yield "set environment MALLOC_CKACCESS 0"
# Check free and alloc functions for errors.
yield "set environment MALLOC_CKALLOC 0"
# Set error action behavior, 1-abort, 2 - exit (no core), 3 - dump core
yield "set environment MALLOC_ACTION 0"
# Enable dumping leaks on exit
yield "set environment MALLOC_DUMP_LEAKS 0" # TODO: This causes a trace back when mem leaks are caught
# Set to 0 to disable optimization. The default is 32
yield "set environment MALLOC_USE_CACHE 0"
# Check the allocator chain integrity on every allocation/deallocation (very expensive).
yield "set environment MALLOC_CKCHAIN 0"
if solib_search:
yield "set solib-search-path %s" % solib_search
if core:
# put in a trim marker, because GDB runs "backtrace 1 full" when loading a core file
yield "print \"TRIM\""
yield "info program"
yield "monitor kprintf"
elif run:
yield "set environment ASAN_OPTIONS abort_on_error=1 handle_segv=0 strict_memcmp=0 alloc_dealloc_mismatch=0 check_malloc_usable_size=0"
yield "start"
# need the pid to be able to kill it
yield "info threads"
# continue running
yield "continue"
yield "symbol-file"
if target is None:
raise RuntimeError("Missing target")
yield "symbol-file %s" % target
yield "sharedlibrary"
yield "info proc mappings" # Linux only?
yield "info meminfo" # QNX, does it work on core files?
yield "info threads"
# try to load symbols for any shared libs that were dynamically loaded
yield "shared"
# print library info so we know if symbols are missing
yield "info sharedlibrary"
yield "backtrace full"
yield "exploitable -m"
yield "info locals"
yield "info registers"
yield "disassemble"
if detach:
yield "detach"
if platform.system() == "Windows":
if core:
yield "quit $_exitcode"
else:
yield "init-if-undefined $_exitcode = -1"
# this doesn't work in the hang case
#yield "while $_exitcode == -1"
#yield "continue"
#yield "end"
yield "quit $_exitcode"
else:
yield "quit_with_code"
_RE_GDB_OUTPUT = re.compile(r"""(?x) # verbose
^(It\ stopped\ with|Program\ received)\ signal
\ (?P<signame>SIG[A-Z]+), |
^Program\ terminated\ with\ signal
\ (?P<signum>[0-9]+), |
^\s+(?P<mapstart>0x[A-Fa-f0-9]+)\s+
(?P<mapend>0x[A-Fa-f0-9]+)\s+
(?P<mapsize>0x[A-Fa-f0-9]+)\s+
(?P<mapoffset>0x[A-Fa-f0-9]+)\s+
(?P<mapimage>.*)$ |
^\#[0-9]+\s+(?P<addr1>0x[A-Fa-f0-9]+)?\s*
<(?P<image1>[A-Za-z0-9_\.-]+)!
(?P<symbol1>[A-Za-z0-9_:]+)(\([^\+]+\))?\+?
(?P<offset1>[0-9]+)?>\s+\( |
^\#[0-9]+\s+(?P<addr2>0x[A-Fa-f0-9]+)\s+\(\) |
^\#[0-9]+\s+(?P<addr3>0x[A-Fa-f0-9]+)?(\s+in)?\s+
(?P<symbol3>[A-Za-z0-9_:?]+)\s+\(.*?\)\s+
(from|at)\s+(?P<image3>[A-Za-z0-9_\./-]+):?
(?P<offset3>[0-9]+)?$ |
^\#[0-9]+\s+(?P<addr4>0x[A-Fa-f0-9]+)?(\s+in)?\s+
(?P<symbol4>[A-Za-z0-9_:?]+)""", re.MULTILINE)
def _process_gdb_output(stdout):
# try parsing for CERT exploitable output first
backtrace, classification, _ = _common.process_exploitable_output(stdout)
if classification != _common.NOT_AN_EXCEPTION or backtrace:
return (backtrace, classification)
# CERT exploitable failed...
classification = _common.NOT_AN_EXCEPTION
backtrace = []
maps = {}
for m in _RE_GDB_OUTPUT.finditer(stdout):
sig = None
if m.group("signame"):
sig = m.group("signame")
elif m.group("signum"):
sig = int(m.group("signum"))
elif m.group("symbol1"):
addr = m.group("addr1")
image = m.group("image1")
symbol = m.group("symbol1")
offset = m.group("offset1")
elif m.group("addr2"):
addr = m.group("addr2")
image = symbol = offset = None
elif m.group("symbol3"):
addr = m.group("addr3")
image = m.group("image3")
symbol = m.group("symbol3")
offset = m.group("offset3")
if symbol == "??":
symbol = "Unknown"
if image:
image = os.path.basename(image)
elif m.group("symbol4"):
addr = m.group("addr4")
symbol = m.group("symbol4")
image = offset = None
elif m.group("mapstart"):
maps[(int(m.group("mapstart"), 16), int(m.group("mapend"), 16))] = m.group("mapimage")
continue
if sig is not None:
if sig in [8, "SIGFPE"]:
classification = _common.PROBABLY_NOT_EXPLOITABLE
elif sig not in [2, "SIGINT"]:
classification = _common.UNKNOWN
else:
if addr is not None:
addr = int(addr, 16)
if offset is not None:
offset = int(offset)
backtrace.append((addr, image, symbol, offset))
real_bt = []
for (addr, image, symbol, offset) in backtrace:
if addr is not None:
# try to find a map matching this address
for (m_start, m_end), m_image in maps.items():
if (addr >= m_start) and (addr < m_end):
rel_addr = addr - m_start
#log.debug("got rel_addr of %s+0x%08X for symbol %s", m_image, rel_addr, symbol)
if image is None:
image = os.path.basename(m_image)
if offset is None:
offset = rel_addr
break
real_bt.append(_common.LSO((image, symbol, offset)))
return (real_bt, classification)
|
blackberry/ALF
|
alf/debug/_gdb.py
|
Python
|
apache-2.0
| 19,799
|
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=1.8'],
pbr=True)
|
jiahaoliang/group-based-policy
|
setup.py
|
Python
|
apache-2.0
| 1,058
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import HasTraits, Str, List, Instance
from traitsui.api import View, UItem, Item, TableEditor
from traitsui.table_column import ObjectColumn
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.ui.enum_editor import myEnumEditor
class Conflict(HasTraits):
queue_name = Str
runspec = Instance('pychron.experiment.automated_run.spec.AutomatedRunSpec')
identifier = Str
position = Str
repository_identifier = Str
repository_ids = Str
available_ids = List
class ConflictResolver(HasTraits):
conflicts = List
available_ids = List
def apply(self):
for c in self.conflicts:
c.runspec.repository_identifier = c.repository_identifier
def add_conflicts(self, qname, cs):
for ai, exps in cs:
self.conflicts.append(Conflict(queue_name=qname,
runspec=ai,
position=ai.position,
repository_identifier=ai.repository_identifier,
identifier=ai.identifier,
repository_ids=','.join(exps),
available_ids=self.available_ids))
def traits_view(self):
cols = [ObjectColumn(name='queue_name', editable=False),
ObjectColumn(name='identifier', editable=False),
ObjectColumn(name='position', editable=False),
ObjectColumn(name='repository_identifier',
label='Assigned Repository',
tooltip='Repository assigned to this analysis in the Experiment Queue',
editor=myEnumEditor(name='available_ids')),
ObjectColumn(name='repository_ids',
label='Existing Repositories',
tooltip='Set of repositories that already contain this L#',
editable=False)]
v = okcancel_view(UItem('conflicts', editor=TableEditor(columns=cols)),
title='Resolve Repository Conflicts')
return v
if __name__ == '__main__':
def main():
from pychron.paths import paths
paths.build('_dev')
from pychron.core.helpers.logger_setup import logging_setup
from pychron.experiment.automated_run.spec import AutomatedRunSpec
logging_setup('dvcdb')
from pychron.dvc.dvc_database import DVCDatabase
from itertools import groupby
db = DVCDatabase(kind='mysql', host='localhost', username='root', name='pychronmeta', password='Argon')
db.connect()
identifiers = ['63290', '63291']
runs = [AutomatedRunSpec(identifier='63290', repository_identifier='Cather_McIntoshd')]
cr = ConflictResolver()
experiments = {}
cr.available_ids = db.get_repository_identifiers()
eas = db.get_associated_repositories(identifiers)
for idn, exps in groupby(eas, key=lambda x: x[1]):
experiments[idn] = [e[0] for e in exps]
conflicts = []
for ai in runs:
identifier = ai.identifier
es = experiments[identifier]
if ai.repository_identifier not in es:
conflicts.append((ai, es))
if conflicts:
cr.add_conflicts('Foo', conflicts)
if cr.conflicts:
info = cr.edit_traits(kind='livemodal')
if info.result:
cr.apply()
# for ci in runs:
# print ci.identifier, ci.experiment_identifier
from traits.api import Button
class Demo(HasTraits):
test = Button
def traits_view(self):
return View(Item('test'))
def _test_fired(self):
main()
d = Demo()
d.configure_traits()
# ============= EOF =============================================
|
UManPychron/pychron
|
pychron/experiment/conflict_resolver.py
|
Python
|
apache-2.0
| 4,802
|
# GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from common import *
from options import *
from vi_mgr_common import *
from dos import *
from analytics_policy import *
from vip_autoscale import *
class VssPlacement(object):
# all schemas
num_subcores_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.5) Number of sub-cores that comprise a CPU core. (Default: 4)"),
required=False,
update_allowed=True,
)
core_nonaffinity_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.5) Degree of core non-affinity for VS placement. (Default: 2)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'num_subcores',
'core_nonaffinity',
)
# mapping of properties to their schemas
properties_schema = {
'num_subcores': num_subcores_schema,
'core_nonaffinity': core_nonaffinity_schema,
}
class VcenterClusters(object):
# all schemas
cluster_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
)
cluster_uuids_schema = properties.Schema(
properties.Schema.LIST,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=cluster_uuids_item_schema,
required=False,
update_allowed=True,
)
include_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'cluster_uuids',
'include',
)
# mapping of properties to their schemas
properties_schema = {
'cluster_uuids': cluster_uuids_schema,
'include': include_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'cluster_uuids': 'vimgrclusterruntime',
}
class VcenterHosts(object):
# all schemas
host_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
)
host_uuids_schema = properties.Schema(
properties.Schema.LIST,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=host_uuids_item_schema,
required=False,
update_allowed=True,
)
include_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'host_uuids',
'include',
)
# mapping of properties to their schemas
properties_schema = {
'host_uuids': host_uuids_schema,
'include': include_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'host_uuids': 'vimgrhostruntime',
}
class IptableRule(object):
# all schemas
src_ip_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
dst_ip_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
src_port_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=PortRange.properties_schema,
required=False,
update_allowed=True,
)
dst_port_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=PortRange.properties_schema,
required=False,
update_allowed=True,
)
proto_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['PROTO_ALL', 'PROTO_ICMP', 'PROTO_TCP', 'PROTO_UDP']),
],
)
input_interface_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
output_interface_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
action_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['ACCEPT', 'DNAT', 'DROP', 'MASQUERADE', 'REJECT']),
],
)
dnat_ip_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
tag_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'src_ip',
'dst_ip',
'src_port',
'dst_port',
'proto',
'input_interface',
'output_interface',
'action',
'dnat_ip',
'tag',
)
# mapping of properties to their schemas
properties_schema = {
'src_ip': src_ip_schema,
'dst_ip': dst_ip_schema,
'src_port': src_port_schema,
'dst_port': dst_port_schema,
'proto': proto_schema,
'input_interface': input_interface_schema,
'output_interface': output_interface_schema,
'action': action_schema,
'dnat_ip': dnat_ip_schema,
'tag': tag_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'src_ip': getattr(IpAddrPrefix, 'field_references', {}),
'dst_ip': getattr(IpAddrPrefix, 'field_references', {}),
'src_port': getattr(PortRange, 'field_references', {}),
'dst_port': getattr(PortRange, 'field_references', {}),
'dnat_ip': getattr(IpAddr, 'field_references', {}),
}
unique_keys = {
'src_ip': getattr(IpAddrPrefix, 'unique_keys', {}),
'dst_ip': getattr(IpAddrPrefix, 'unique_keys', {}),
'src_port': getattr(PortRange, 'unique_keys', {}),
'dst_port': getattr(PortRange, 'unique_keys', {}),
'dnat_ip': getattr(IpAddr, 'unique_keys', {}),
}
class IptableRuleSet(object):
# all schemas
table_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
chain_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
rules_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IptableRule.properties_schema,
required=True,
update_allowed=False,
)
rules_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=rules_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'table',
'chain',
'rules',
)
# mapping of properties to their schemas
properties_schema = {
'table': table_schema,
'chain': chain_schema,
'rules': rules_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'rules': getattr(IptableRule, 'field_references', {}),
}
unique_keys = {
'rules': getattr(IptableRule, 'unique_keys', {}),
}
class ServiceEngineGroup(AviResource):
resource_name = "serviceenginegroup"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
max_vs_per_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum number of Virtual Services that can be placed on a single Service Engine. East West Virtual Services are excluded from this limit. (Default: 10)"),
required=False,
update_allowed=True,
)
min_scaleout_per_vs_schema = properties.Schema(
properties.Schema.NUMBER,
_("Minimum number of active Service Engines for the Virtual Service. (Default: 1)"),
required=False,
update_allowed=True,
)
max_scaleout_per_vs_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum number of active Service Engines for the Virtual Service. (Default: 4)"),
required=False,
update_allowed=True,
)
max_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum number of Services Engines in this group. (Default: 10)"),
required=False,
update_allowed=True,
)
vcpus_per_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Number of vcpus for each of the Service Engine virtual machines. (Default: 1)"),
required=False,
update_allowed=True,
)
memory_per_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Amount of memory for each of the Service Engine virtual machines. (Default: 2048)"),
required=False,
update_allowed=True,
)
disk_per_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Amount of disk space for each of the Service Engine virtual machines. (Units: GB) (Default: 10)"),
required=False,
update_allowed=True,
)
max_cpu_usage_schema = properties.Schema(
properties.Schema.NUMBER,
_("When CPU usage on an SE exceeds this threshold, Virtual Services hosted on this SE may be rebalanced to other SEs to reduce load. A new SE may be created as part of this process. (Units: PERCENT) (Default: 80)"),
required=False,
update_allowed=True,
)
min_cpu_usage_schema = properties.Schema(
properties.Schema.NUMBER,
_("When CPU usage on an SE falls below the minimum threshold, Virtual Services hosted on the SE may be consolidated onto other underutilized SEs. After consolidation, unused Service Engines may then be eligible for deletion. (Units: PERCENT) (Default: 30)"),
required=False,
update_allowed=True,
)
se_deprovision_delay_schema = properties.Schema(
properties.Schema.NUMBER,
_("Duration to preserve unused Service Engine virtual machines before deleting them. If traffic to a Virtual Service were to spike up abruptly, this SE would still be available to be utilized again rather than creating a new SE. If this value is set to 0, Controller will never delete any SEs and administrator has to manually cleanup unused SEs. (Units: MIN) (Default: 120)"),
required=False,
update_allowed=True,
)
auto_rebalance_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("If set, Virtual Services will be automatically migrated when load on an SE is less than minimum or more than maximum thresholds. Only Alerts are generated when the auto_rebalance is not set. (Default: False)"),
required=False,
update_allowed=True,
)
se_name_prefix_schema = properties.Schema(
properties.Schema.STRING,
_("Prefix to use for virtual machine name of Service Engines."),
required=False,
update_allowed=True,
)
vs_host_redundancy_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Ensure primary and secondary Service Engines are deployed on different physical hosts. (Default: True)"),
required=False,
update_allowed=True,
)
vcenter_folder_schema = properties.Schema(
properties.Schema.STRING,
_("Folder to place all the Service Engine virtual machines in vCenter."),
required=False,
update_allowed=True,
)
vcenter_datastores_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=VcenterDatastore.properties_schema,
required=True,
update_allowed=False,
)
vcenter_datastores_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=vcenter_datastores_item_schema,
required=False,
update_allowed=True,
)
vcenter_datastores_include_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
vcenter_datastore_mode_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: VCENTER_DATASTORE_ANY)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['VCENTER_DATASTORE_ANY', 'VCENTER_DATASTORE_LOCAL', 'VCENTER_DATASTORE_SHARED']),
],
)
vcenter_clusters_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=VcenterClusters.properties_schema,
required=False,
update_allowed=True,
)
vcenter_hosts_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=VcenterHosts.properties_schema,
required=False,
update_allowed=True,
)
openstack_availability_zone_schema = properties.Schema(
properties.Schema.STRING,
_("(Deprecated in: 17.1.1) "),
required=False,
update_allowed=True,
)
cpu_reserve_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
mem_reserve_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: True)"),
required=False,
update_allowed=True,
)
mgmt_network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Management network to use for Avi Service Engines You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
mgmt_subnet_schema = properties.Schema(
properties.Schema.MAP,
_("Management subnet to use for Avi Service Engines"),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
ha_mode_schema = properties.Schema(
properties.Schema.STRING,
_("High Availability mode for all the Virtual Services using this Service Engine group. (Default: HA_MODE_SHARED)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HA_MODE_LEGACY_ACTIVE_STANDBY', 'HA_MODE_SHARED', 'HA_MODE_SHARED_PAIR']),
],
)
algo_schema = properties.Schema(
properties.Schema.STRING,
_("In compact placement, Virtual Services are placed on existing SEs until max_vs_per_se limit is reached. (Default: PLACEMENT_ALGO_PACKED)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['PLACEMENT_ALGO_DISTRIBUTED', 'PLACEMENT_ALGO_PACKED']),
],
)
buffer_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Excess Service Engine capacity provisioned for HA failover (Default: 1)"),
required=False,
update_allowed=True,
)
active_standby_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Service Engines in active/standby mode for HA failover (Default: False)"),
required=False,
update_allowed=True,
)
placement_mode_schema = properties.Schema(
properties.Schema.STRING,
_("If placement mode is 'Auto', Virtual Services are automatically placed on Service Engines. (Default: PLACEMENT_MODE_AUTO)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['PLACEMENT_MODE_AUTO']),
],
)
openstack_mgmt_network_name_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Management network name"),
required=False,
update_allowed=True,
)
openstack_mgmt_network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Management network UUID"),
required=False,
update_allowed=True,
)
instance_flavor_schema = properties.Schema(
properties.Schema.STRING,
_("Instance/Flavor type for SE instance"),
required=False,
update_allowed=True,
)
hypervisor_schema = properties.Schema(
properties.Schema.STRING,
_("Override default hypervisor"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['DEFAULT', 'KVM', 'VMWARE_ESX', 'VMWARE_VSAN', 'XEN']),
],
)
se_dos_profile_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=DosThresholdProfile.properties_schema,
required=False,
update_allowed=True,
)
auto_rebalance_interval_schema = properties.Schema(
properties.Schema.NUMBER,
_("Frequency of rebalance, if 'Auto rebalance' is enabled (Units: SEC) (Default: 300)"),
required=False,
update_allowed=True,
)
aggressive_failure_detection_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable aggressive failover configuration for ha. (Default: False)"),
required=False,
update_allowed=True,
)
realtime_se_metrics_schema = properties.Schema(
properties.Schema.MAP,
_("Enable or disable real time SE metrics"),
schema=MetricsRealTimeUpdate.properties_schema,
required=False,
update_allowed=True,
)
vs_scaleout_timeout_schema = properties.Schema(
properties.Schema.NUMBER,
_("Time to wait for the scaled out SE to become ready before marking the scaleout done (Units: SEC) (Default: 30)"),
required=False,
update_allowed=True,
)
vs_scalein_timeout_schema = properties.Schema(
properties.Schema.NUMBER,
_("Time to wait for the scaled in SE to drain existing flows before marking the scalein done (Units: SEC) (Default: 30)"),
required=False,
update_allowed=True,
)
hardwaresecuritymodulegroup_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
connection_memory_percentage_schema = properties.Schema(
properties.Schema.NUMBER,
_("Percentage of memory for connection state. This will come at the expense of memory used for HTTP in-memory cache. (Units: PERCENT) (Default: 50)"),
required=False,
update_allowed=True,
)
extra_config_multiplier_schema = properties.Schema(
properties.Schema.NUMBER,
_("Multiplier for extra config to support large VS/Pool config. (Default: 0.0)"),
required=False,
update_allowed=True,
)
vs_scalein_timeout_for_upgrade_schema = properties.Schema(
properties.Schema.NUMBER,
_("During SE upgrade, Time to wait for the scaled-in SE to drain existing flows before marking the scalein done (Units: SEC) (Default: 30)"),
required=False,
update_allowed=True,
)
host_attribute_key_schema = properties.Schema(
properties.Schema.STRING,
_("Key of a (Key, Value) pair identifying a label for a set of Nodes usually in Container Clouds. Needs to be specified together with host_attribute_value. SEs can be configured differently including HA modes across different SE Groups. May also be used for isolation between different classes of VirtualServices. VirtualServices' SE Group may be specified via annotations/labels. A OpenShift/Kubernetes namespace maybe annotated with a matching SE Group label as openshift.io/node-selector: apptype=prod. When multiple SE Groups are used in a Cloud with host attributes specified,just a single SE Group can exist as a match-all SE Group without a host_attribute_key."),
required=False,
update_allowed=True,
)
host_attribute_value_schema = properties.Schema(
properties.Schema.STRING,
_("Value of a (Key, Value) pair identifying a label for a set of Nodes usually in Container Clouds. Needs to be specified together with host_attribute_key."),
required=False,
update_allowed=True,
)
log_disksz_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum disk capacity (in MB) to be allocated to an SE. This is exclusively used for debug and log data. (Units: MB) (Default: 10000)"),
required=False,
update_allowed=True,
)
os_reserved_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("Amount of extra memory to be reserved for use by the Operating System on a Service Engine. (Units: MB) (Default: 0)"),
required=False,
update_allowed=True,
)
floating_intf_ip_item_schema = properties.Schema(
properties.Schema.MAP,
_("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 1 Tag will advertise this floating IP when manual load distribution is enabled."),
schema=IpAddr.properties_schema,
required=True,
update_allowed=False,
)
floating_intf_ip_schema = properties.Schema(
properties.Schema.LIST,
_("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 1 Tag will advertise this floating IP when manual load distribution is enabled."),
schema=floating_intf_ip_item_schema,
required=False,
update_allowed=True,
)
hm_on_standby_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable active health monitoring from the standby SE for all placed virtual services. (Default: True)"),
required=False,
update_allowed=True,
)
per_app_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Per-app SE mode is designed for deploying dedicated load balancers per app (VS). In this mode, each SE is limited to a max of 2 VSs. vCPUs in per-app SEs count towards licensing usage at 25% rate. (Default: False)"),
required=False,
update_allowed=True,
)
enable_vmac_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Use Virtual MAC address for interfaces on which floating interface IPs are placed (Default: False)"),
required=False,
update_allowed=True,
)
distribute_load_active_standby_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Use both the active and standby Service Engines for Virtual Service placement in the legacy active standby HA mode. (Default: False)"),
required=False,
update_allowed=True,
)
auto_redistribute_active_standby_load_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Redistribution of virtual services from the takeover SE to the replacement SE can cause momentary traffic loss. If the auto-redistribute load option is left in its default off state, any desired rebalancing requires calls to REST API. (Default: False)"),
required=False,
update_allowed=True,
)
floating_intf_ip_se_2_item_schema = properties.Schema(
properties.Schema.MAP,
_("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 2 Tag will advertise this floating IP when manual load distribution is enabled."),
schema=IpAddr.properties_schema,
required=True,
update_allowed=False,
)
floating_intf_ip_se_2_schema = properties.Schema(
properties.Schema.LIST,
_("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 2 Tag will advertise this floating IP when manual load distribution is enabled."),
schema=floating_intf_ip_se_2_item_schema,
required=False,
update_allowed=True,
)
custom_tag_item_schema = properties.Schema(
properties.Schema.MAP,
_("Custom tag will be used to create the tags for SE instance in AWS. Note this is not the same as the prefix for SE name"),
schema=CustomTag.properties_schema,
required=True,
update_allowed=False,
)
custom_tag_schema = properties.Schema(
properties.Schema.LIST,
_("Custom tag will be used to create the tags for SE instance in AWS. Note this is not the same as the prefix for SE name"),
schema=custom_tag_item_schema,
required=False,
update_allowed=True,
)
dedicated_dispatcher_core_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Dedicate the core that handles packet receive/transmit from the network to just the dispatching function. Don't use it for TCP/IP and SSL functions. (Default: False)"),
required=False,
update_allowed=True,
)
cpu_socket_affinity_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Allocate all the CPU cores for the Service Engine Virtual Machines on the same CPU socket. Applicable only for vCenter Cloud. (Default: False)"),
required=False,
update_allowed=True,
)
num_flow_cores_sum_changes_to_ignore_schema = properties.Schema(
properties.Schema.NUMBER,
_("Number of changes in num flow cores sum to ignore. (Default: 8)"),
required=False,
update_allowed=True,
)
least_load_core_selection_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Select core with least load for new flow. (Default: True)"),
required=False,
update_allowed=True,
)
extra_shared_config_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) Extra config memory to support large Geo DB configuration. (Units: MB) (Default: 0)"),
required=False,
update_allowed=True,
)
se_tunnel_mode_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) Determines if DSR from secondary SE is active or not: 0: Automatically determine based on hypervisor type. 1: Disable DSR unconditionally. ~[0,1]: Enable DSR unconditionally. (Default: 0)"),
required=False,
update_allowed=True,
)
openstack_availability_zones_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) "),
required=True,
update_allowed=False,
)
openstack_availability_zones_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) "),
schema=openstack_availability_zones_item_schema,
required=False,
update_allowed=True,
)
service_ip_subnets_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) Subnets assigned to the SE group. Required for VS group placement."),
schema=IpAddrPrefix.properties_schema,
required=True,
update_allowed=False,
)
service_ip_subnets_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) Subnets assigned to the SE group. Required for VS group placement."),
schema=service_ip_subnets_item_schema,
required=False,
update_allowed=True,
)
se_vs_hb_max_vs_in_pkt_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) Maximum number of virtualservices for which heartbeat messages are aggregated in one packet. (Default: 256)"),
required=False,
update_allowed=True,
)
se_vs_hb_max_pkts_in_batch_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) Maximum number of aggregated vs heartbeat packets to send in a batch. (Default: 8)"),
required=False,
update_allowed=True,
)
auto_rebalance_criteria_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Set of criteria for SE Auto Rebalance."),
required=True,
update_allowed=False,
constraints=[
constraints.AllowedValues(['SE_AUTO_REBALANCE_CPS', 'SE_AUTO_REBALANCE_CPU', 'SE_AUTO_REBALANCE_MBPS', 'SE_AUTO_REBALANCE_OPEN_CONNS', 'SE_AUTO_REBALANCE_PPS']),
],
)
auto_rebalance_criteria_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.2.3) Set of criteria for SE Auto Rebalance."),
schema=auto_rebalance_criteria_item_schema,
required=False,
update_allowed=True,
)
cloud_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=False,
)
iptables_item_schema = properties.Schema(
properties.Schema.MAP,
_("Iptable Rules"),
schema=IptableRuleSet.properties_schema,
required=True,
update_allowed=False,
)
iptables_schema = properties.Schema(
properties.Schema.LIST,
_("Iptable Rules"),
schema=iptables_item_schema,
required=False,
update_allowed=True,
)
enable_routing_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable routing for this ServiceEngineGroup (Default: False)"),
required=False,
update_allowed=True,
)
advertise_backend_networks_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Advertise reach-ability of backend server networks via ADC through BGP for default gateway feature. (Default: False)"),
required=False,
update_allowed=True,
)
enable_vip_on_all_interfaces_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.1) Enable VIP on all interfaces of SE. (Default: True)"),
required=False,
update_allowed=True,
)
se_thread_multiplier_schema = properties.Schema(
properties.Schema.NUMBER,
_("Multiplier for SE threads based on vCPU. (Default: 1)"),
required=False,
update_allowed=True,
)
async_ssl_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("SSL handshakes will be handled by dedicated SSL Threads (Default: False)"),
required=False,
update_allowed=True,
)
async_ssl_threads_schema = properties.Schema(
properties.Schema.NUMBER,
_("Number of Async SSL threads per se_dp (Default: 1)"),
required=False,
update_allowed=True,
)
se_udp_encap_ipc_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.2) Determines if SE-SE IPC messages are encapsulated in an UDP header: 0: Automatically determine based on hypervisor type. 1: Use UDP encap unconditionally. ~[0,1]: Don't use UDP encap. (Default: 0)"),
required=False,
update_allowed=True,
)
se_ipc_udp_port_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.2) UDP Port for SE_DP IPC in Docker bridge mode. (Default: 1500)"),
required=False,
update_allowed=True,
)
se_remote_punt_udp_port_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.2) UDP Port for punted packets in Docker bridge mode. (Default: 1501)"),
required=False,
update_allowed=True,
)
se_tunnel_udp_port_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) UDP Port for tunneled packets from secondary to primary SE in Docker bridge mode. (Default: 1550)"),
required=False,
update_allowed=True,
)
custom_securitygroups_mgmt_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.3) Custom Security Groups to be associated with management vNic for SE instances in OpenStack and AWS Clouds."),
required=True,
update_allowed=False,
)
custom_securitygroups_mgmt_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.3) Custom Security Groups to be associated with management vNic for SE instances in OpenStack and AWS Clouds."),
schema=custom_securitygroups_mgmt_item_schema,
required=False,
update_allowed=True,
)
custom_securitygroups_data_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.3) Custom Security Groups to be associated with data vNics for SE instances in OpenStack and AWS Clouds."),
required=True,
update_allowed=False,
)
custom_securitygroups_data_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.3) Custom Security Groups to be associated with data vNics for SE instances in OpenStack and AWS Clouds."),
schema=custom_securitygroups_data_item_schema,
required=False,
update_allowed=True,
)
archive_shm_limit_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) Amount of SE memory in GB until which shared memory is collected in core archive. (Units: GB) (Default: 8)"),
required=False,
update_allowed=True,
)
significant_log_throttle_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) This setting limits the number of significant logs generated per second per core on this SE. Default is 100 logs per second. Set it to zero (0) to disable throttling. (Units: PER_SECOND) (Default: 100)"),
required=False,
update_allowed=True,
)
udf_log_throttle_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) This setting limits the number of UDF logs generated per second per core on this SE. UDF logs are generated due to the configured client log filters or the rules with logging enabled. Default is 100 logs per second. Set it to zero (0) to disable throttling. (Units: PER_SECOND) (Default: 100)"),
required=False,
update_allowed=True,
)
non_significant_log_throttle_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) This setting limits the number of non-significant logs generated per second per core on this SE. Default is 100 logs per second. Set it to zero (0) to disable throttling. (Units: PER_SECOND) (Default: 100)"),
required=False,
update_allowed=True,
)
ingress_access_mgmt_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.5) Program SE security group ingress rules to allow SSH/ICMP management access from remote CIDR type. (Default: SG_INGRESS_ACCESS_ALL)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SG_INGRESS_ACCESS_ALL', 'SG_INGRESS_ACCESS_NONE', 'SG_INGRESS_ACCESS_VPC']),
],
)
ingress_access_data_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.5) Program SE security group ingress rules to allow VIP data access from remote CIDR type. (Default: SG_INGRESS_ACCESS_ALL)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SG_INGRESS_ACCESS_ALL', 'SG_INGRESS_ACCESS_NONE', 'SG_INGRESS_ACCESS_VPC']),
],
)
se_sb_dedicated_core_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 16.5.2, 17.1.9, 17.2.3) Sideband traffic will be handled by a dedicated core (Default: False)"),
required=False,
update_allowed=True,
)
se_probe_port_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.2) TCP port on SE where echo service will be run (Default: 7)"),
required=False,
update_allowed=True,
)
se_sb_threads_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 16.5.2, 17.1.9, 17.2.3) Number of Sideband threads per SE (Default: 1)"),
required=False,
update_allowed=True,
)
ignore_rtt_threshold_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.6,17.2.2) Ignore RTT samples if it is above threshold (Units: MILLISECONDS) (Default: 5000)"),
required=False,
update_allowed=True,
)
waf_mempool_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.3) Enable memory pool for WAF (Default: True)"),
required=False,
update_allowed=True,
)
waf_mempool_size_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.3) Memory pool size used for WAF (Units: KB) (Default: 64)"),
required=False,
update_allowed=True,
)
se_bandwidth_type_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.5) Select the SE bandwidth for the bandwidth license."),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SE_BANDWIDTH_10000M', 'SE_BANDWIDTH_1000M', 'SE_BANDWIDTH_200M', 'SE_BANDWIDTH_25M', 'SE_BANDWIDTH_UNLIMITED']),
],
)
license_type_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.5) If no license type is specified then default license enforcement for the cloud type is chosen."),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['LIC_BACKEND_SERVERS', 'LIC_CORES', 'LIC_HOSTS', 'LIC_SE_BANDWIDTH', 'LIC_SOCKETS']),
],
)
license_tier_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.5) Specifies the license tier which would be used. This field by default inherits the value from cloud."),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['ENTERPRISE_16', 'ENTERPRISE_18']),
],
)
allow_burst_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.5) Allow SEs to be created using burst license"),
required=False,
update_allowed=True,
)
auto_rebalance_capacity_per_se_item_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.4) Capacities of SE for auto rebalance for each criteria."),
required=True,
update_allowed=False,
)
auto_rebalance_capacity_per_se_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.2.4) Capacities of SE for auto rebalance for each criteria."),
schema=auto_rebalance_capacity_per_se_item_schema,
required=False,
update_allowed=True,
)
host_gateway_monitor_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.4) Enable the host gateway monitor when service engine is deployed as docker container. Disabled by default. (Default: False)"),
required=False,
update_allowed=True,
)
vss_placement_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.2.5) Parameters to place Virtual Services on only a subset of the cores of an SE."),
schema=VssPlacement.properties_schema,
required=False,
update_allowed=True,
)
flow_table_new_syn_max_entries_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.5) Maximum number of flow table entries that have not completed TCP three-way handshake yet (Default: 0)"),
required=False,
update_allowed=True,
)
minimum_required_config_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.1) (Deprecated in: 18.1.2) Required available config memory to apply any configuration (Units: PERCENT)"),
required=False,
update_allowed=True,
)
disable_csum_offloads_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.14, 17.2.5, 18.1.1) Stop using TCP/UDP and IP checksum offload features of NICs (Default: False)"),
required=False,
update_allowed=True,
)
disable_gro_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.5, 18.1.1) Disable Generic Receive Offload (GRO) in DPDK poll-mode driver packet receive path. GRO is on by default on NICs that do not support LRO (Large Receive Offload) or do not gain performance boost from LRO. (Default: False)"),
required=False,
update_allowed=True,
)
disable_tso_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.5, 18.1.1) Disable TCP Segmentation Offload (TSO) in DPDK poll-mode driver packet transmit path. TSO is on by default on NICs that support it. (Default: False)"),
required=False,
update_allowed=True,
)
enable_hsm_priming_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.7, 18.1.1) (This is a beta feature). Enable HSM key priming. If enabled, key handles on the hsm will be synced to SE before processing client connections. (Default: False)"),
required=False,
update_allowed=True,
)
service_ip6_subnets_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 18.1.1) IPv6 Subnets assigned to the SE group. Required for VS group placement."),
schema=IpAddrPrefix.properties_schema,
required=True,
update_allowed=False,
)
service_ip6_subnets_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 18.1.1) IPv6 Subnets assigned to the SE group. Required for VS group placement."),
schema=service_ip6_subnets_item_schema,
required=False,
update_allowed=True,
)
se_tracert_port_range_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.2.8) Traceroute port range"),
schema=PortRange.properties_schema,
required=False,
update_allowed=True,
)
distribute_queues_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.8) Distributes queue ownership among cores so multiple cores handle dispatcher duties. (Default: False)"),
required=False,
update_allowed=True,
)
additional_config_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.1) (Deprecated in: 18.1.2) Indicates the percent of config memory used for config updates. (Units: PERCENT)"),
required=False,
update_allowed=True,
)
vss_placement_enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 18.1.1) If set, Virtual Services will be placed on only a subset of the cores of an SE. (Default: False)"),
required=False,
update_allowed=True,
)
enable_multi_lb_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.10, 18.1.2) Applicable only for Azure cloud with Basic SKU LB. If set, additional Azure LBs will be automatically created if resources in existing LB are exhausted. (Default: False)"),
required=False,
update_allowed=True,
)
n_log_streaming_threads_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.12, 18.1.2) Number of threads to use for log streaming. (Default: 1)"),
required=False,
update_allowed=True,
)
free_list_size_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.10) Number of entries in the free list (Default: 1024)"),
required=False,
update_allowed=True,
)
max_rules_per_lb_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.12, 18.1.2) Applicable to Azure platform only. Maximum number of rules per Azure LB. (Default: 150)"),
required=False,
update_allowed=True,
)
max_public_ips_per_lb_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.12, 18.1.2) Applicable to Azure platform only. Maximum number of public IPs per Azure LB. (Default: 30)"),
required=False,
update_allowed=True,
)
waf_learning_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Amount of memory reserved on SE for WAF learning. This can be atmost 5% of SE memory. (Units: MB) (Default: 0)"),
required=False,
update_allowed=True,
)
waf_learning_interval_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Frequency with which SE publishes WAF learning. (Units: MIN) (Default: 10)"),
required=False,
update_allowed=True,
)
self_se_election_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 18.1.2) Enable SEs to elect a primary amongst themselves in the absence of a connectivity to controller. (Default: False)"),
required=False,
update_allowed=True,
)
vip_asg_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 18.1.2) When vip_asg is set, Vip configuration will be managed by Avi.User will be able to configure vip_asg or Vips individually at the time of create."),
schema=VipAutoscaleGroup.properties_schema,
required=False,
update_allowed=True,
)
minimum_connection_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Indicates the percent of memory reserved for connections. (Units: PERCENT) (Default: 20)"),
required=False,
update_allowed=True,
)
shm_minimum_config_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Minimum required shared memory to apply any configuration. (Units: MB) (Default: 4)"),
required=False,
update_allowed=True,
)
heap_minimum_config_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Minimum required heap memory to apply any configuration. (Units: MB) (Default: 8)"),
required=False,
update_allowed=True,
)
disable_se_memory_check_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 18.1.2) If set, disable the config memory check done in service engine. (Default: False)"),
required=False,
update_allowed=True,
)
memory_for_config_update_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Indicates the percent of memory reserved for config updates. (Units: PERCENT) (Default: 15)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'description',
'max_vs_per_se',
'min_scaleout_per_vs',
'max_scaleout_per_vs',
'max_se',
'vcpus_per_se',
'memory_per_se',
'disk_per_se',
'max_cpu_usage',
'min_cpu_usage',
'se_deprovision_delay',
'auto_rebalance',
'se_name_prefix',
'vs_host_redundancy',
'vcenter_folder',
'vcenter_datastores',
'vcenter_datastores_include',
'vcenter_datastore_mode',
'vcenter_clusters',
'vcenter_hosts',
'openstack_availability_zone',
'cpu_reserve',
'mem_reserve',
'mgmt_network_uuid',
'mgmt_subnet',
'ha_mode',
'algo',
'buffer_se',
'active_standby',
'placement_mode',
'openstack_mgmt_network_name',
'openstack_mgmt_network_uuid',
'instance_flavor',
'hypervisor',
'se_dos_profile',
'auto_rebalance_interval',
'aggressive_failure_detection',
'realtime_se_metrics',
'vs_scaleout_timeout',
'vs_scalein_timeout',
'hardwaresecuritymodulegroup_uuid',
'connection_memory_percentage',
'extra_config_multiplier',
'vs_scalein_timeout_for_upgrade',
'host_attribute_key',
'host_attribute_value',
'log_disksz',
'os_reserved_memory',
'floating_intf_ip',
'hm_on_standby',
'per_app',
'enable_vmac',
'distribute_load_active_standby',
'auto_redistribute_active_standby_load',
'floating_intf_ip_se_2',
'custom_tag',
'dedicated_dispatcher_core',
'cpu_socket_affinity',
'num_flow_cores_sum_changes_to_ignore',
'least_load_core_selection',
'extra_shared_config_memory',
'se_tunnel_mode',
'openstack_availability_zones',
'service_ip_subnets',
'se_vs_hb_max_vs_in_pkt',
'se_vs_hb_max_pkts_in_batch',
'auto_rebalance_criteria',
'cloud_uuid',
'iptables',
'enable_routing',
'advertise_backend_networks',
'enable_vip_on_all_interfaces',
'se_thread_multiplier',
'async_ssl',
'async_ssl_threads',
'se_udp_encap_ipc',
'se_ipc_udp_port',
'se_remote_punt_udp_port',
'se_tunnel_udp_port',
'custom_securitygroups_mgmt',
'custom_securitygroups_data',
'archive_shm_limit',
'significant_log_throttle',
'udf_log_throttle',
'non_significant_log_throttle',
'ingress_access_mgmt',
'ingress_access_data',
'se_sb_dedicated_core',
'se_probe_port',
'se_sb_threads',
'ignore_rtt_threshold',
'waf_mempool',
'waf_mempool_size',
'se_bandwidth_type',
'license_type',
'license_tier',
'allow_burst',
'auto_rebalance_capacity_per_se',
'host_gateway_monitor',
'vss_placement',
'flow_table_new_syn_max_entries',
'minimum_required_config_memory',
'disable_csum_offloads',
'disable_gro',
'disable_tso',
'enable_hsm_priming',
'service_ip6_subnets',
'se_tracert_port_range',
'distribute_queues',
'additional_config_memory',
'vss_placement_enabled',
'enable_multi_lb',
'n_log_streaming_threads',
'free_list_size',
'max_rules_per_lb',
'max_public_ips_per_lb',
'waf_learning_memory',
'waf_learning_interval',
'self_se_election',
'vip_asg',
'minimum_connection_memory',
'shm_minimum_config_memory',
'heap_minimum_config_memory',
'disable_se_memory_check',
'memory_for_config_update',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'description': description_schema,
'max_vs_per_se': max_vs_per_se_schema,
'min_scaleout_per_vs': min_scaleout_per_vs_schema,
'max_scaleout_per_vs': max_scaleout_per_vs_schema,
'max_se': max_se_schema,
'vcpus_per_se': vcpus_per_se_schema,
'memory_per_se': memory_per_se_schema,
'disk_per_se': disk_per_se_schema,
'max_cpu_usage': max_cpu_usage_schema,
'min_cpu_usage': min_cpu_usage_schema,
'se_deprovision_delay': se_deprovision_delay_schema,
'auto_rebalance': auto_rebalance_schema,
'se_name_prefix': se_name_prefix_schema,
'vs_host_redundancy': vs_host_redundancy_schema,
'vcenter_folder': vcenter_folder_schema,
'vcenter_datastores': vcenter_datastores_schema,
'vcenter_datastores_include': vcenter_datastores_include_schema,
'vcenter_datastore_mode': vcenter_datastore_mode_schema,
'vcenter_clusters': vcenter_clusters_schema,
'vcenter_hosts': vcenter_hosts_schema,
'openstack_availability_zone': openstack_availability_zone_schema,
'cpu_reserve': cpu_reserve_schema,
'mem_reserve': mem_reserve_schema,
'mgmt_network_uuid': mgmt_network_uuid_schema,
'mgmt_subnet': mgmt_subnet_schema,
'ha_mode': ha_mode_schema,
'algo': algo_schema,
'buffer_se': buffer_se_schema,
'active_standby': active_standby_schema,
'placement_mode': placement_mode_schema,
'openstack_mgmt_network_name': openstack_mgmt_network_name_schema,
'openstack_mgmt_network_uuid': openstack_mgmt_network_uuid_schema,
'instance_flavor': instance_flavor_schema,
'hypervisor': hypervisor_schema,
'se_dos_profile': se_dos_profile_schema,
'auto_rebalance_interval': auto_rebalance_interval_schema,
'aggressive_failure_detection': aggressive_failure_detection_schema,
'realtime_se_metrics': realtime_se_metrics_schema,
'vs_scaleout_timeout': vs_scaleout_timeout_schema,
'vs_scalein_timeout': vs_scalein_timeout_schema,
'hardwaresecuritymodulegroup_uuid': hardwaresecuritymodulegroup_uuid_schema,
'connection_memory_percentage': connection_memory_percentage_schema,
'extra_config_multiplier': extra_config_multiplier_schema,
'vs_scalein_timeout_for_upgrade': vs_scalein_timeout_for_upgrade_schema,
'host_attribute_key': host_attribute_key_schema,
'host_attribute_value': host_attribute_value_schema,
'log_disksz': log_disksz_schema,
'os_reserved_memory': os_reserved_memory_schema,
'floating_intf_ip': floating_intf_ip_schema,
'hm_on_standby': hm_on_standby_schema,
'per_app': per_app_schema,
'enable_vmac': enable_vmac_schema,
'distribute_load_active_standby': distribute_load_active_standby_schema,
'auto_redistribute_active_standby_load': auto_redistribute_active_standby_load_schema,
'floating_intf_ip_se_2': floating_intf_ip_se_2_schema,
'custom_tag': custom_tag_schema,
'dedicated_dispatcher_core': dedicated_dispatcher_core_schema,
'cpu_socket_affinity': cpu_socket_affinity_schema,
'num_flow_cores_sum_changes_to_ignore': num_flow_cores_sum_changes_to_ignore_schema,
'least_load_core_selection': least_load_core_selection_schema,
'extra_shared_config_memory': extra_shared_config_memory_schema,
'se_tunnel_mode': se_tunnel_mode_schema,
'openstack_availability_zones': openstack_availability_zones_schema,
'service_ip_subnets': service_ip_subnets_schema,
'se_vs_hb_max_vs_in_pkt': se_vs_hb_max_vs_in_pkt_schema,
'se_vs_hb_max_pkts_in_batch': se_vs_hb_max_pkts_in_batch_schema,
'auto_rebalance_criteria': auto_rebalance_criteria_schema,
'cloud_uuid': cloud_uuid_schema,
'iptables': iptables_schema,
'enable_routing': enable_routing_schema,
'advertise_backend_networks': advertise_backend_networks_schema,
'enable_vip_on_all_interfaces': enable_vip_on_all_interfaces_schema,
'se_thread_multiplier': se_thread_multiplier_schema,
'async_ssl': async_ssl_schema,
'async_ssl_threads': async_ssl_threads_schema,
'se_udp_encap_ipc': se_udp_encap_ipc_schema,
'se_ipc_udp_port': se_ipc_udp_port_schema,
'se_remote_punt_udp_port': se_remote_punt_udp_port_schema,
'se_tunnel_udp_port': se_tunnel_udp_port_schema,
'custom_securitygroups_mgmt': custom_securitygroups_mgmt_schema,
'custom_securitygroups_data': custom_securitygroups_data_schema,
'archive_shm_limit': archive_shm_limit_schema,
'significant_log_throttle': significant_log_throttle_schema,
'udf_log_throttle': udf_log_throttle_schema,
'non_significant_log_throttle': non_significant_log_throttle_schema,
'ingress_access_mgmt': ingress_access_mgmt_schema,
'ingress_access_data': ingress_access_data_schema,
'se_sb_dedicated_core': se_sb_dedicated_core_schema,
'se_probe_port': se_probe_port_schema,
'se_sb_threads': se_sb_threads_schema,
'ignore_rtt_threshold': ignore_rtt_threshold_schema,
'waf_mempool': waf_mempool_schema,
'waf_mempool_size': waf_mempool_size_schema,
'se_bandwidth_type': se_bandwidth_type_schema,
'license_type': license_type_schema,
'license_tier': license_tier_schema,
'allow_burst': allow_burst_schema,
'auto_rebalance_capacity_per_se': auto_rebalance_capacity_per_se_schema,
'host_gateway_monitor': host_gateway_monitor_schema,
'vss_placement': vss_placement_schema,
'flow_table_new_syn_max_entries': flow_table_new_syn_max_entries_schema,
'minimum_required_config_memory': minimum_required_config_memory_schema,
'disable_csum_offloads': disable_csum_offloads_schema,
'disable_gro': disable_gro_schema,
'disable_tso': disable_tso_schema,
'enable_hsm_priming': enable_hsm_priming_schema,
'service_ip6_subnets': service_ip6_subnets_schema,
'se_tracert_port_range': se_tracert_port_range_schema,
'distribute_queues': distribute_queues_schema,
'additional_config_memory': additional_config_memory_schema,
'vss_placement_enabled': vss_placement_enabled_schema,
'enable_multi_lb': enable_multi_lb_schema,
'n_log_streaming_threads': n_log_streaming_threads_schema,
'free_list_size': free_list_size_schema,
'max_rules_per_lb': max_rules_per_lb_schema,
'max_public_ips_per_lb': max_public_ips_per_lb_schema,
'waf_learning_memory': waf_learning_memory_schema,
'waf_learning_interval': waf_learning_interval_schema,
'self_se_election': self_se_election_schema,
'vip_asg': vip_asg_schema,
'minimum_connection_memory': minimum_connection_memory_schema,
'shm_minimum_config_memory': shm_minimum_config_memory_schema,
'heap_minimum_config_memory': heap_minimum_config_memory_schema,
'disable_se_memory_check': disable_se_memory_check_schema,
'memory_for_config_update': memory_for_config_update_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'iptables': getattr(IptableRuleSet, 'field_references', {}),
'floating_intf_ip_se_2': getattr(IpAddr, 'field_references', {}),
'hardwaresecuritymodulegroup_uuid': 'hardwaresecuritymodulegroup',
'vcenter_hosts': getattr(VcenterHosts, 'field_references', {}),
'custom_tag': getattr(CustomTag, 'field_references', {}),
'service_ip_subnets': getattr(IpAddrPrefix, 'field_references', {}),
'mgmt_network_uuid': 'network',
'vcenter_datastores': getattr(VcenterDatastore, 'field_references', {}),
'mgmt_subnet': getattr(IpAddrPrefix, 'field_references', {}),
'vip_asg': getattr(VipAutoscaleGroup, 'field_references', {}),
'service_ip6_subnets': getattr(IpAddrPrefix, 'field_references', {}),
'floating_intf_ip': getattr(IpAddr, 'field_references', {}),
'se_tracert_port_range': getattr(PortRange, 'field_references', {}),
'vcenter_clusters': getattr(VcenterClusters, 'field_references', {}),
'se_dos_profile': getattr(DosThresholdProfile, 'field_references', {}),
'realtime_se_metrics': getattr(MetricsRealTimeUpdate, 'field_references', {}),
'vss_placement': getattr(VssPlacement, 'field_references', {}),
}
unique_keys = {
'iptables': getattr(IptableRuleSet, 'unique_keys', {}),
'floating_intf_ip_se_2': getattr(IpAddr, 'unique_keys', {}),
'vcenter_hosts': getattr(VcenterHosts, 'unique_keys', {}),
'custom_tag': getattr(CustomTag, 'unique_keys', {}),
'service_ip_subnets': getattr(IpAddrPrefix, 'unique_keys', {}),
'realtime_se_metrics': getattr(MetricsRealTimeUpdate, 'unique_keys', {}),
'vcenter_datastores': getattr(VcenterDatastore, 'unique_keys', {}),
'mgmt_subnet': getattr(IpAddrPrefix, 'unique_keys', {}),
'vip_asg': getattr(VipAutoscaleGroup, 'unique_keys', {}),
'service_ip6_subnets': getattr(IpAddrPrefix, 'unique_keys', {}),
'floating_intf_ip': getattr(IpAddr, 'unique_keys', {}),
'se_tracert_port_range': getattr(PortRange, 'unique_keys', {}),
'vcenter_clusters': getattr(VcenterClusters, 'unique_keys', {}),
'se_dos_profile': getattr(DosThresholdProfile, 'unique_keys', {}),
'vss_placement': getattr(VssPlacement, 'unique_keys', {}),
}
def resource_mapping():
return {
'Avi::LBaaS::ServiceEngineGroup': ServiceEngineGroup,
}
|
avinetworks/avi-heat
|
avi/heat/resources/se_group.py
|
Python
|
apache-2.0
| 63,203
|
import tangelo
import pymongo
import bson.json_util
from ArborFileManagerAPI import ArborFileManager
api = ArborFileManager()
api.initDatabaseConnection()
@tangelo.restful
def get(*pargs, **query_args):
if len(pargs) == 0:
return tangelo.HTTPStatusCode(400, "Missing resource type")
resource_type = pargs[0]
allowed = ["project", "analysis","collection", "workflow"]
if resource_type == "project":
if len(pargs) == 1:
return api.getListOfProjectNames()
elif len(pargs) == 2:
project = pargs[1]
return api.getListOfTypesForProject(project)
elif len(pargs) == 3:
project = pargs[1]
datatype = pargs[2]
return api.getListOfDatasetsByProjectAndType(project, datatype)
elif len(pargs) == 4:
project = pargs[1]
datatype = pargs[2]
dataset = pargs[3]
coll = api.db[api.returnCollectionForObjectByName(project, datatype, dataset)]
return bson.json_util.dumps(list(coll.find()))
elif len(pargs) == 5:
project = pargs[1]
datatype = pargs[2]
dataset = pargs[3]
stringFormat = pargs[4]
string = api.getDatasetAsTextString(project, datatype, dataset, stringFormat)
return string
else:
return tangelo.HTTPStatusCode(400, "Bad request - got %d parameter(s), was expecting between 1 and 5")
elif resource_type == "analysis":
if len(pargs) == 1:
return api.getListOfAnalysisNames()
elif len(pargs) == 2:
analysis_name = pargs[1]
coll = api.db[api.returnCollectionForAnalysisByName(analysis_name)]
return bson.json_util.dumps(list(coll.find()))
elif len(pargs) == 3:
analysis_name = pargs[1]
coll = api.db[api.returnCollectionForAnalysisByName(analysis_name)]
return coll.find_one()["analysis"]["script"]
# add a collection option to return the database and collection name for an object in the
# Arbor treestore. This 'information hiding violation' of the treestore allows for low-level
# clients to connect and work directly with the mongo database, should it be needed. This level
# is used in the phylomap application.
elif resource_type == "collection":
if len(pargs) == 4:
project = pargs[1]
datatype = pargs[2]
dataset = pargs[3]
collname = api.returnCollectionForObjectByName(project, datatype, dataset)
dbname = api.getMongoDatabase()
dbhost = api.getMongoHost()
dbport = api.getMongoPort()
return bson.json_util.dumps({'host':dbhost,'port':dbport,'db': dbname,'collection': collname})
# if workflow is specified as the resource type, then list the workflows in a project or display the
# information about a particular workflow
elif resource_type == "workflow":
if len(pargs) == 2:
project = pargs[1]
return api.getListOfDatasetsByProjectAndType(project,"Workflow")
if len(pargs) == 3:
project = pargs[1]
workflowName = pargs[2]
print("REST: getting status of workflow:",workflowName)
return bson.json_util.dumps(api.getStatusOfWorkflow(workflowName,project))
else:
return tangelo.HTTPStatusCode(400, "Workflow resource requires 2 or 3 positional arguments")
else:
return tangelo.HTTPStatusCode(400, "Bad resource type '%s' - allowed types are: %s" % (resource_type, ", ".join(allowed)))
# Jan 2014 - added support for workflows as a datatype inside projects. new workflow-only named types are
# defined here to allow workflows to be created and run through the REST interface
#
@tangelo.restful
def put(resource, projname, datasetname=None, data=None, filename=None, filetype=None,
workflowName = None, stepName=None, stepType=None, inputStepName=None, outputStepName=None,
inPortName=None,outPortName=None,operation=None, parameterName=None, parameterValue=None,
parameterValueNumber=None,flowType=None,dataType=None, **kwargs):
if (resource != "project") and (resource != "workflow"):
return tangelo.HTTPStatusCode(400, "Bad resource type '%s' - allowed types are: project")
if resource == "project":
if datasetname is None:
api.newProject(projname)
else:
if filename is None:
return tangelo.HTTPStatusCode(400, "Missing argument 'filename'")
if filetype is None:
return tangelo.HTTPStatusCode(400, "Missing argument 'filetype'")
if data is None:
return tangelo.HTTPStatusCode(400, "Missing argument 'data'")
if datasetname is None:
return tangelo.HTTPStatusCode(400, "Missing argument 'datasetname'")
# user wants to upload a tree or a character matrix
if filetype == "newick" or filetype == "phyloxml":
api.newTreeInProjectFromString(datasetname, data, projname, filename, filetype)
if (filetype == "csv" and dataType is None) or (filetype == "csv" and dataType=='CharacterMatrix'):
api.newCharacterMatrixInProjectFromString(datasetname, data, projname, filename)
if filetype == "csv" and dataType=="Occurrences":
api.newOccurrencesInProjectFromString(datasetname, data, projname)
# workflow creation
# arborapi: /workflow/projname/workflowname - creates new empty workflow
# arborapi: /workflow/projname/workflowname//
if resource == "workflow":
# the user wants to create a new, empty workflow
if operation == "newWorkflow":
api.newWorkflowInProject(workflowName, projname)
if operation == "newWorkstepInWorkflow":
api.newWorkstepInWorkflow(workflowName, stepType, stepName, projname)
# allow user to add a parameter to a workstep or update the value of the parameter. There
# is currently a limitation that all values are strings, e.g. "2.4" instead of 2.4.
if operation == "updateWorkstepParameter":
# if a float argument is sent, use this as the value for the parameter, instead of the
# string. A conversion is done to float to assure numberic values
if parameterValueNumber != None:
print "found number filter value"
parameterValue = float(parameterValueNumber)
api.updateWorkstepParameter(workflowName, stepName, parameterName, parameterValue, projname)
if operation == "connectWorksteps":
#api.connectStepsInWorkflow(workflowName,outStepName,outPortName,inStepName,inPortName,projname)
api.connectStepsInWorkflow(workflowName,outputStepName,inputStepName,projname)
if operation == "executeWorkflow":
api.executeWorkflowInProject(workflowName,projname)
if operation == "updateWorkflowFromString":
print "received request to update workflow: ",workflowName
api.updateExistingWorkflowInProject(workflowName,data,projname)
return "OK"
@tangelo.restful
def post(*pargs, **kwargs):
return "projmgr.post()"
@tangelo.restful
def delete(resource, projname, datatype=None, dataset=None):
if resource != "project":
return tangelo.HTTPStatusCode(400, "Bad resource type '%s' - allowed types are: project")
# (This is expressing xor)
if (datatype is None) != (dataset is None):
return tangelo.HTTPStatusCode(400, "Bad arguments - 'datatype' and 'dataset' must both be specified if either one is specified")
if datatype is None:
api.deleteProjectNamed(projname)
else:
api.deleteDataset(projname, datatype, dataset)
return "OK"
|
arborworkflows/ProjectManager
|
tangelo/projmgr.py
|
Python
|
apache-2.0
| 7,996
|
try:
import unittest2 as unittest # Python2.6
except ImportError:
import unittest
from tests.functional import test_base
@unittest.skipIf(test_base.get_test_server_api() == 1,
"The tag API didn't work at v1 - see frontend issue #927")
class TestTags(test_base.TestBase):
testcase_name = "tag API"
def test_create_delete(self, tag_id="create_tag"):
"""
Create a tag then delete it.
This test is a little contrived, since the tag create/delete
endpoints are only intended for internal use.
"""
# Create a tag
self.assertTrue(self.client.tag.create(tag_id))
# Check that the tag doesn't exist (It has no photos, so it's invisible)
self.assertNotIn(tag_id, [t.id for t in self.client.tags.list()])
# Create a tag on one of the photos
self.photos[0].update(tagsAdd=tag_id)
# Check that the tag now exists
self.assertIn(tag_id, [t.id for t in self.client.tags.list()])
# Delete the tag
self.assertTrue(self.client.tag.delete(tag_id))
# Check that the tag is now gone
self.assertNotIn(tag_id, [t.id for t in self.client.tags.list()])
# Also remove the tag from the photo
self.photos[0].update(tagsRemove=tag_id)
# Create the tag again
self.photos[0].update(tagsAdd=tag_id)
self.assertIn(tag_id, [t.id for t in self.client.tags.list()])
# Delete using the tag object directly
tag = [t for t in self.client.tags.list() if t.id == tag_id][0]
self.assertTrue(tag.delete())
# Check that the tag is now gone
self.assertNotIn(tag_id, [t.id for t in self.client.tags.list()])
# Also remove the tag from the photo
self.photos[0].update(tagsRemove=tag_id)
# TODO: Un-skip and update this tests once there are tag fields
# that can be updated (the owner field cannot be updated).
@unittest.skip("Can't test the tag.update endpoint, "
"since there are no fields that can be updated")
def test_update(self):
""" Test that a tag can be updated """
# Update the tag using the Trovebox class, passing in the tag object
owner = "test1@trovebox.com"
ret_val = self.client.tag.update(self.tags[0], owner=owner)
# Check that the tag is updated
self.tags = self.client.tags.list()
self.assertEqual(self.tags[0].owner, owner)
self.assertEqual(ret_val.owner, owner)
# Update the tag using the Trovebox class, passing in the tag id
owner = "test2@trovebox.com"
ret_val = self.client.tag.update(self.TEST_TAG, owner=owner)
# Check that the tag is updated
self.tags = self.client.tags.list()
self.assertEqual(self.tags[0].owner, owner)
self.assertEqual(ret_val.owner, owner)
# Update the tag using the Tag object directly
owner = "test3@trovebox.com"
ret_val = self.tags[0].update(owner=owner)
# Check that the tag is updated
self.tags = self.client.tags.list()
self.assertEqual(self.tags[0].owner, owner)
self.assertEqual(ret_val.owner, owner)
def test_tag_with_spaces(self):
""" Run test_create_delete using a tag containing spaces """
self.test_create_delete("tag with spaces")
def test_tag_with_slashes(self):
""" Run test_create_delete using a tag containing slashes """
self.test_create_delete("tag/with/slashes")
# TODO: Un-skip this test once issue #919 is resolved -
# tags with double-slashes cannot be deleted
@unittest.skip("Tags with double-slashed cannot be deleted")
def test_tag_with_double_slashes(self):
""" Run test_create_delete using a tag containing double-slashes """
self.test_create_delete("tag//with//double//slashes")
|
photo/openphoto-python
|
tests/functional/test_tags.py
|
Python
|
apache-2.0
| 3,889
|
# -*- coding:utf-8 -*-
import scrapy
|
ds17/reptiles_gh
|
lianjia/lianjia.py
|
Python
|
apache-2.0
| 37
|
from setuptools import setup, find_packages
setup(name='netdisco',
version='0.9.2',
description='Discover devices on your local network',
url='https://github.com/home-assistant/netdisco',
author='Paulus Schoutsen',
author_email='Paulus@PaulusSchoutsen.nl',
license='Apache License 2.0',
install_requires=['netifaces>=0.10.0', 'requests>=2.0',
'zeroconf==0.17.6'],
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False)
|
brburns/netdisco
|
setup.py
|
Python
|
apache-2.0
| 513
|
#coding: utf-8
#!/usr/bin/env python3
#Initial test code for MiSynth Wave Generator
#Opens Wave Files And Cuts And Plays Them As The FPGA will
#Synth plays back 2048 samples at frequency of note
#Effective sample rate is 901,120Hz @ 440Hz
#CURRENTLY A DRAWING LOOP TO BE SOLVED, THANKS WX/PYTHON FOR YOUR
#COMPLETE LACK OF TRANSPARENCY
#ALWAYS USE TKINTER
import wave
import wx
import audiothread
import wavehandle
import sdisp
class MyFrame(wx.Frame):
def __init__(self, parent, title, wavehandle):
wx.Frame.__init__(self, parent, -1, title, size=(1024, 624))
self.wavehandle = wavehandle
self.scale = 8
self.shift = 0
self.drawcnt = 0
self.scope = [0]
# Create the menubar
menuBar = wx.MenuBar()
menu = wx.Menu()
menu.Append(wx.ID_OPEN, "Open\tAlt-O", "Open Wave")
menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit")
# bind the menu event s
self.Bind(wx.EVT_MENU, self.OnOpenButton, id=wx.ID_OPEN)
self.Bind(wx.EVT_MENU, self.OnQuitButton, id=wx.ID_EXIT)
menuBar.Append(menu, "&Actions")
self.SetMenuBar(menuBar)
self.wavepanel = WavePanel(self, self.getscale, self.setsector)
self.wavepanel.SetBackgroundColour(wx.Colour(32,55,91))
self.scopepanel = ScopePanel(self)
self.scopepanel.SetBackgroundColour(wx.Colour(20,25,20))
self.buttonpanel = wx.Panel(self, -1, pos=(0, 384), size=(1024, 40))
self.textpanel = sdisp.TextPanel(self)
self.timestamp = wx.StaticText(self.wavepanel, -1,
("Time: " + str(0.0)
+ "/" + str(0.0)),
pos=(2, 2),
style=wx.ALIGN_LEFT)
self.timestamp.SetForegroundColour((217, 66, 244))
btnOpen = wx.Button(self.buttonpanel, wx.ID_OPEN, "Open",
pos=(2, 0), size=(80, 40))
btnExport = wx.Button(self.buttonpanel, -1, "Export",
pos=(84, 0), size=(80, 40))
btnQuit = wx.Button(self.buttonpanel, wx.ID_EXIT, "Quit",
pos=(166, 0), size=(80, 40))
self.btnPlay = wx.ToggleButton(self.buttonpanel, -1, "Play",
pos=(943, 0), size=(80, 40))
# bind the button events to handlers
self.Bind(wx.EVT_BUTTON, self.OnOpenButton, btnOpen)
self.Bind(wx.EVT_BUTTON, self.OnExportButton, btnExport)
self.Bind(wx.EVT_BUTTON, self.OnQuitButton, btnQuit)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnPlayButton, self.btnPlay)
self.Bind(wx.EVT_MOUSEWHEEL, self.onMouseWheel)
self.wavepanel.Bind(wx.EVT_PAINT, self.onPaint)
self.contentNotSaved = False
self.fileloaded = False
self.quadrant = -1
self.Centre()
def setsector(self, sector):
self.quadrant = abs(sector)
self.Refresh()
def getscale(self):
return self.scale
def getSample(self, sector):
print("obtaining sample")
if self.quadrant == -1:
self.setsector(1)
sample = self.wavehandle.getaudiodata(self.shift, 0, sector)
return sample
def onPaint(self, event):
self.drawcnt += 1
#print("Drawing" + str(self.drawcnt))
dc = wx.PaintDC(self.wavepanel)
dc.Clear()
totalseconds = self.wavehandle.gettotaltime()
shiftseconds = self.wavehandle.framestoseconds(self.shift)
self.timestamp.SetLabel("Time: " + str(shiftseconds) + "/" + str(
totalseconds))
dc.SetBrush(wx.Brush(wx.Colour(16, 28, 45), wx.SOLID))
dc.DrawRectangle(256, 0, 512, 256)
# Centre Line
pointdata = self.wavehandle.getdrawpoints(self.shift)
for x in range(1, 1024): # Ugly
if (x > 256) and (x < 768):
dc.SetPen(wx.Pen((0, 255, 242), 1, wx.PENSTYLE_SOLID))
else:
dc.SetPen(wx.Pen((183, 204, 163), 1, wx.PENSTYLE_SOLID))
dc.DrawLine(x - 1, pointdata[x - 1], x, pointdata[x])
#dc.DrawPoint(x, pointdata[x])
if (x == 256) or (x == 768):
dc.SetPen(wx.Pen((0, 0, 0), 1, wx.PENSTYLE_DOT))
dc.DrawLine(x, 0, x, 256)
if (x == 496) or (x == 528):
dc.SetPen(wx.Pen((0, 0, 0), 1, wx.PENSTYLE_DOT))
dc.DrawLine(x, 0, x, 256)
dc = wx.PaintDC(self.scopepanel)
dc.Clear()
dc.SetPen(wx.Pen((256,0,0), 1, wx.PENSTYLE_SOLID))
for x in range(0, 1024):
if len(self.scope) > 1:
p = self.scope[x % len(self.scope)] + 64
else:
p = 64
dc.DrawPoint(x, p)
def OnPlayButton(self, event):
if self.btnPlay.GetValue():
self.audiohandle = audiothread.AudioHandler()
if self.fileloaded:
self.audiohandle.setsample(self.getSample(self.quadrant), 2048)
self.scope = self.audiohandle.getscopesample()
print("sample length: " + str(len(self.scope)))
self.audiohandle.start()
else:
self.audiohandle.stop()
self.audiohandle = None
def onMouseWheel(self, event):
if self.wavepanel.mouseOver:
if self.wavepanel.ctrlDown:
if event.GetWheelRotation() > 0:
if(self.scale > 1):
self.scale = self.scale >> 1
else:
if(self.scale < 2097151):
self.scale = self.scale << 1
self.Refresh()
else:
if event.GetWheelRotation() > 0:
if(self.shift > 0):
self.shift -= 2000
else:
if (self.shift < 10000000):
self.shift += 2000
self.Refresh()
if self.scopepanel.mouseOver:
if event.GetWheelRotation() > 0:
self.audiohandle.setshift(1)
else:
self.audiohandle.setshift(-1)
self.scope = self.audiohandle.getscopesample()
self.Refresh()
def OnOpenButton(self, evt):
#Open file
with wx.FileDialog(self, "Open .wav file.", wildcard="WAV files (*.wav)|*.wav",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
pathname = fileDialog.GetPath()
try:
with wave.open(pathname, 'r') as file:
self.wavehandle.loadwave(file)
self.Refresh()
self.fileloaded = True
except IOError:
wx.LogError("Cannot open file '%s'." % pathname)
def OnExportButton(self, evt):
print("Export")
def OnQuitButton(self, evt):
self.Close()
class WavePanel(wx.Panel): #just handles mouseover events
def __init__(self, parent, getter, sender):
wx.Panel.__init__(self, parent, pos=(0,0),size=(1024, 256))
self.mouseOver = False
self.ctrlDown = False
self.Bind(wx.EVT_ENTER_WINDOW, self.onMouseOver)
self.Bind(wx.EVT_LEAVE_WINDOW, self.onMouseLeave)
self.Bind(wx.EVT_KEY_DOWN, self.onKeyPress)
self.Bind(wx.EVT_KEY_UP, self.onKeyRelease)
self.Bind(wx.EVT_LEFT_DOWN, self.onMouseClick)
self.getter = getter
self.sender = sender
def onMouseClick(self, event):
if self.mouseOver:
x, y = self.ScreenToClient(wx.GetMousePosition())
sector = abs(x // (2048 / self.getter()))
self.sender(sector)
def onMouseOver(self, event):
self.mouseOver = True
def onMouseLeave(self, event):
self.mouseOver = False
def onKeyPress(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_CONTROL:
self.ctrlDown = True
def onKeyRelease(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_CONTROL:
self.ctrlDown = False
class ScopePanel(wx.Panel): #just handles mouseover events
def __init__(self, parent):
wx.Panel.__init__(self, parent, pos=(0, 256), size=(1024, 128))
self.mouseOver = False
self.Bind(wx.EVT_ENTER_WINDOW, self.onMouseOver)
self.Bind(wx.EVT_LEAVE_WINDOW, self.onMouseLeave)
def onMouseOver(self, event):
self.mouseOver = True
def onMouseLeave(self, event):
self.mouseOver = False
class MyApp(wx.App):
def OnInit(self):
waveHandle = wavehandle.WaveHandler()
frame = MyFrame(None, "MiSynth Editor", waveHandle)
self.SetTopWindow(frame)
frame.Show(True)
return True
if __name__ == '__main__':
app = MyApp(redirect=True)
app.MainLoop()
|
magicmilo/MiSynth-Wavetable-Generator
|
main.py
|
Python
|
apache-2.0
| 9,015
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.special_math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class LBetaTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_one_dimensional_arg(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.session(use_gpu=True):
self.assertAllClose(
1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one))))
self.assertAllClose(
0.5, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual([], special_math_ops.lbeta(x_one).get_shape())
def test_one_dimensional_arg_dynamic(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.session(use_gpu=True):
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose(1, beta_ph.eval(feed_dict={ph: x_one}))
self.assertAllClose(0.5,
beta_ph.eval(feed_dict={ph: x_one_half}))
def test_four_dimensional_arg_with_partial_shape_dynamic(self):
x_ = np.ones((3, 2, 3, 4))
# Gamma(1) = 0! = 1
# Gamma(1 + 1 + 1 + 1) = Gamma(4) = 3! = 6
# ==> Beta([1, 1, 1, 1])
# = Gamma(1) * Gamma(1) * Gamma(1) * Gamma(1) / Gamma(1 + 1 + 1 + 1)
# = 1 / 6
expected_beta_x = 1 / 6 * np.ones((3, 2, 3))
with self.session(use_gpu=True):
x_ph = array_ops.placeholder(dtypes.float32, [3, 2, 3, None])
beta_ph = math_ops.exp(special_math_ops.lbeta(x_ph))
self.assertAllClose(expected_beta_x,
beta_ph.eval(feed_dict={x_ph: x_}))
@test_util.run_in_graph_and_eager_modes
def test_two_dimensional_arg(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.session(use_gpu=True):
self.assertAllClose(
[0.5, 0.5],
self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual((2,), special_math_ops.lbeta(x_one_half).get_shape())
def test_two_dimensional_arg_dynamic(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.session(use_gpu=True):
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose([0.5, 0.5],
beta_ph.eval(feed_dict={ph: x_one_half}))
@test_util.run_in_graph_and_eager_modes
def test_two_dimensional_proper_shape(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.session(use_gpu=True):
self.assertAllClose(
[0.5, 0.5],
self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual(
(2,),
self.evaluate(array_ops.shape(special_math_ops.lbeta(x_one_half))))
self.assertEqual(
tensor_shape.TensorShape([2]),
special_math_ops.lbeta(x_one_half).get_shape())
@test_util.run_in_graph_and_eager_modes
def test_complicated_shape(self):
with self.session(use_gpu=True):
x = ops.convert_to_tensor(np.random.rand(3, 2, 2))
self.assertAllEqual(
(3, 2), self.evaluate(array_ops.shape(special_math_ops.lbeta(x))))
self.assertEqual(
tensor_shape.TensorShape([3, 2]),
special_math_ops.lbeta(x).get_shape())
@test_util.run_in_graph_and_eager_modes
def test_length_1_last_dimension_results_in_one(self):
# If there is only one coefficient, the formula still works, and we get one
# as the answer, always.
x_a = [5.5]
x_b = [0.1]
with self.session(use_gpu=True):
self.assertAllClose(
1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_a))))
self.assertAllClose(
1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_b))))
self.assertEqual((), special_math_ops.lbeta(x_a).get_shape())
@test_util.run_in_graph_and_eager_modes
def test_empty_rank1_returns_negative_infinity(self):
with self.session(use_gpu=True):
x = constant_op.constant([], shape=[0])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant(-np.inf, shape=())
self.assertAllEqual(self.evaluate(expected_result),
self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
@test_util.run_in_graph_and_eager_modes
def test_empty_rank2_with_zero_last_dim_returns_negative_infinity(self):
with self.session(use_gpu=True):
event_size = 0
for batch_size in [0, 1, 2]:
x = constant_op.constant([], shape=[batch_size, event_size])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant(-np.inf, shape=[batch_size])
self.assertAllEqual(self.evaluate(expected_result),
self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
@test_util.run_in_graph_and_eager_modes
def test_empty_rank2_with_zero_batch_dim_returns_empty(self):
with self.session(use_gpu=True):
batch_size = 0
for event_size in [0, 1, 2]:
x = constant_op.constant([], shape=[batch_size, event_size])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant([], shape=[batch_size])
self.assertAllEqual(self.evaluate(expected_result),
self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
class BesselTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_bessel_i0(self):
x_single = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
x_double = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(special.i0(x_single),
self.evaluate(special_math_ops.bessel_i0(x_single)))
self.assertAllClose(special.i0(x_double),
self.evaluate(special_math_ops.bessel_i0(x_double)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@test_util.run_in_graph_and_eager_modes
def test_bessel_i1(self):
x_single = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
x_double = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(special.i1(x_single),
self.evaluate(special_math_ops.bessel_i1(x_single)))
self.assertAllClose(special.i1(x_double),
self.evaluate(special_math_ops.bessel_i1(x_double)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
class EinsumTest(test.TestCase):
simple_cases = [
'ij,jk->ik',
'ijk,jklm->il',
'ij,jk,kl->il',
'ijk->i',
'ijk->kji',
'ji,kj->ik',
'ikl,kji->kl',
'klj,lki->ij',
'ijk,ilj->kli',
'kij,mkb->ijmb',
'ijk,ijl,ikl->i',
'i,ijk,j->k',
'ij,ij,jk,kl->il',
'ij,kj,il,jm->ml',
'a,ab,abc->abc',
'a,b,ab->ab',
'ab,ab,c->',
'ab,ab,c->c',
'ab,ab,cd,cd->',
'ab,ab,cd,cd->ac',
'ab,ab,cd,cd->cd',
'ab,ab,cd,cd,ef,ef->',
'ab,cd,ef->abcdef',
'ab,cd,ef->acdf',
'ab,cd,de->abcde',
'ab,cd,de->be',
'ab,bcd,cd->abcd',
'ab,bcd,cd->abd',
'eb,cb,fb->cef',
'abcd,ad',
'bd,db,eac->ace',
'ba,ac,da->bcd',
'ab,ab',
'ab,ba',
'abc,abc',
'abc,bac',
'abc,cba',
'dba,ead,cad->bce',
'aef,fbc,dca->bde',
'iJ,Jk->ik',
'iJ,Ki->JK',
'iJk,Jklm->Jk'
'ij, jk, kl -> il',
'a, ab, abc -> abc',
'ab, ab, cd, cd, ef, ef -> ',
'abc, bac',
'iJ, Ki -> JK',
'iJk, Jklm -> Jk'
]
long_cases = [
'bca,cdb,dbf,afc->',
'efc,dbc,acf,fd->abe',
'ea,fb,gc,hd,abcd->efgh',
'ea,fb,abcd,gc,hd->efgh',
'abhe,hidj,jgba,hiab,gab',
'efc, dbc, acf, fd -> abe',
'abhe, hidj, jgba, hiab, gab',
]
invalid_cases = [
# bad formats
'',
'ijk ijk',
'ij.jk->ik',
'ij...,jk...->ik...',
'ij,k ->kji',
'ij,k-> kji',
# axis in output that does not exist
'ij,jk->im',
# incorrect number of dimensions
'ij,jkl->kl',
# this is allowed in numpy but not implemented here yet
'iij,jk'
]
dim_mismatch_cases = [('ijk,jkl->il', [(2, 3, 4), (3, 5, 6)])]
def disabled_test_simple(self):
for case in self.simple_cases:
self.run_test(case)
def test_long(self):
for case in self.long_cases:
self.run_test(case)
def test_invalid(self):
for axes in self.invalid_cases:
inputs = [
array_ops.placeholder(dtypes.float32, shape=(3, 4)),
array_ops.placeholder(dtypes.float32, shape=(3, 4)),
]
with self.assertRaises(ValueError):
_ = special_math_ops.einsum(axes, *inputs)
def test_invalid_keyword_arguments(self):
m0 = array_ops.placeholder(dtypes.int32, shape=(1, None))
m1 = array_ops.placeholder(dtypes.int32, shape=(None, 1))
with self.assertRaisesRegexp(
TypeError,
'invalid keyword arguments for this function: invalid1, invalid2'):
_ = special_math_ops.einsum(
'ij,jk->ik',
m0,
m1,
name='name',
invalid1='value1',
invalid2='value2')
def test_dim_mismatch(self):
for axes, input_shapes in self.dim_mismatch_cases:
inputs = [
array_ops.placeholder(dtypes.float32, shape=shape)
for shape in input_shapes
]
with self.assertRaises(ValueError):
_ = special_math_ops.einsum(axes, *inputs)
def run_test(self, axes):
all_axes = {ax: np.random.randint(4, 12) for ax in axes if ax.isalpha()}
input_vals = []
input_axes, _, _ = axes.partition('->')
for idx in input_axes.split(','):
shape = [all_axes[ax] for ax in idx if ax.isalpha()]
input_vals.append(np.random.random(shape))
input_tensors = [constant_op.constant(val) for val in input_vals]
output_tensor = special_math_ops.einsum(axes, *input_tensors)
with self.session(use_gpu=True):
output_value = self.evaluate(output_tensor)
correct_value = np.einsum(axes, *input_vals)
err = np.abs(correct_value - output_value).max()
# print(axes, err)
self.assertLess(err, 1e-8)
def test_input_is_placeholder(self):
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(1, None))
m1 = array_ops.placeholder(dtypes.int32, shape=(None, 1))
out = special_math_ops.einsum('ij,jk->ik', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[1, 2, 3]],
m1: [[2], [1], [1]],
}
self.assertAllClose([[7]], sess.run(out, feed_dict=feed_dict))
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(None, 3))
m1 = array_ops.placeholder(dtypes.int32, shape=(3,))
out = special_math_ops.einsum('ij,j->i', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[1, 2, 3]],
m1: [2, 1, 1],
}
self.assertAllClose([7], sess.run(out, feed_dict=feed_dict))
# Tests for placeholders which have two or more None values
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(None, None, 2))
m1 = array_ops.placeholder(dtypes.int32, shape=(2, 1))
out = special_math_ops.einsum('ijk,kl->ijl', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[[1, 2]]],
m1: [[3], [2]],
}
self.assertAllClose([[[7]]], sess.run(out, feed_dict=feed_dict))
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(2, 1))
m1 = array_ops.placeholder(dtypes.int32, shape=(None, None, 2))
out = special_math_ops.einsum('kl,ijk->ijl', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[3], [2]],
m1: [[[1, 2]]],
}
self.assertAllClose([[[7]]], sess.run(out, feed_dict=feed_dict))
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(None, None, 2))
m1 = array_ops.placeholder(dtypes.int32, shape=(2,))
out = special_math_ops.einsum('ijk,k->ij', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[[1, 2]]],
m1: [3, 2],
}
self.assertAllClose([[7]], sess.run(out, feed_dict=feed_dict))
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(None, 2, None, 2))
m1 = array_ops.placeholder(dtypes.int32, shape=(None, 2))
out = special_math_ops.einsum('ijkl,ij->ikl', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[[[1, 2]], [[2, 1]]]],
m1: [[3, 2]],
}
self.assertAllClose([[[7, 8]]], sess.run(out, feed_dict=feed_dict))
if __name__ == '__main__':
test.main()
|
girving/tensorflow
|
tensorflow/python/ops/special_math_ops_test.py
|
Python
|
apache-2.0
| 14,571
|
"""Constants for Google Assistant."""
from homeassistant.components import (
binary_sensor,
camera,
climate,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
)
DOMAIN = "google_assistant"
GOOGLE_ASSISTANT_API_ENDPOINT = "/api/google_assistant"
CONF_EXPOSE = "expose"
CONF_ENTITY_CONFIG = "entity_config"
CONF_EXPOSE_BY_DEFAULT = "expose_by_default"
CONF_EXPOSED_DOMAINS = "exposed_domains"
CONF_PROJECT_ID = "project_id"
CONF_ALIASES = "aliases"
CONF_API_KEY = "api_key"
CONF_ROOM_HINT = "room"
CONF_ALLOW_UNLOCK = "allow_unlock"
CONF_SECURE_DEVICES_PIN = "secure_devices_pin"
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
"climate",
"cover",
"fan",
"group",
"input_boolean",
"light",
"media_player",
"scene",
"script",
"switch",
"vacuum",
"lock",
"binary_sensor",
"sensor",
]
PREFIX_TYPES = "action.devices.types."
TYPE_CAMERA = PREFIX_TYPES + "CAMERA"
TYPE_LIGHT = PREFIX_TYPES + "LIGHT"
TYPE_SWITCH = PREFIX_TYPES + "SWITCH"
TYPE_VACUUM = PREFIX_TYPES + "VACUUM"
TYPE_SCENE = PREFIX_TYPES + "SCENE"
TYPE_FAN = PREFIX_TYPES + "FAN"
TYPE_THERMOSTAT = PREFIX_TYPES + "THERMOSTAT"
TYPE_LOCK = PREFIX_TYPES + "LOCK"
TYPE_BLINDS = PREFIX_TYPES + "BLINDS"
TYPE_GARAGE = PREFIX_TYPES + "GARAGE"
TYPE_OUTLET = PREFIX_TYPES + "OUTLET"
TYPE_SENSOR = PREFIX_TYPES + "SENSOR"
TYPE_DOOR = PREFIX_TYPES + "DOOR"
TYPE_TV = PREFIX_TYPES + "TV"
TYPE_SPEAKER = PREFIX_TYPES + "SPEAKER"
SERVICE_REQUEST_SYNC = "request_sync"
HOMEGRAPH_URL = "https://homegraph.googleapis.com/"
REQUEST_SYNC_BASE_URL = HOMEGRAPH_URL + "v1/devices:requestSync"
# Error codes used for SmartHomeError class
# https://developers.google.com/actions/reference/smarthome/errors-exceptions
ERR_DEVICE_OFFLINE = "deviceOffline"
ERR_DEVICE_NOT_FOUND = "deviceNotFound"
ERR_VALUE_OUT_OF_RANGE = "valueOutOfRange"
ERR_NOT_SUPPORTED = "notSupported"
ERR_PROTOCOL_ERROR = "protocolError"
ERR_UNKNOWN_ERROR = "unknownError"
ERR_FUNCTION_NOT_SUPPORTED = "functionNotSupported"
ERR_CHALLENGE_NEEDED = "challengeNeeded"
ERR_CHALLENGE_NOT_SETUP = "challengeFailedNotSetup"
ERR_TOO_MANY_FAILED_ATTEMPTS = "tooManyFailedAttempts"
ERR_PIN_INCORRECT = "pinIncorrect"
ERR_USER_CANCELLED = "userCancelled"
# Event types
EVENT_COMMAND_RECEIVED = "google_assistant_command"
EVENT_QUERY_RECEIVED = "google_assistant_query"
EVENT_SYNC_RECEIVED = "google_assistant_sync"
DOMAIN_TO_GOOGLE_TYPES = {
camera.DOMAIN: TYPE_CAMERA,
climate.DOMAIN: TYPE_THERMOSTAT,
cover.DOMAIN: TYPE_BLINDS,
fan.DOMAIN: TYPE_FAN,
group.DOMAIN: TYPE_SWITCH,
input_boolean.DOMAIN: TYPE_SWITCH,
light.DOMAIN: TYPE_LIGHT,
lock.DOMAIN: TYPE_LOCK,
media_player.DOMAIN: TYPE_SWITCH,
scene.DOMAIN: TYPE_SCENE,
script.DOMAIN: TYPE_SCENE,
switch.DOMAIN: TYPE_SWITCH,
vacuum.DOMAIN: TYPE_VACUUM,
}
DEVICE_CLASS_TO_GOOGLE_TYPES = {
(cover.DOMAIN, cover.DEVICE_CLASS_GARAGE): TYPE_GARAGE,
(cover.DOMAIN, cover.DEVICE_CLASS_DOOR): TYPE_DOOR,
(switch.DOMAIN, switch.DEVICE_CLASS_SWITCH): TYPE_SWITCH,
(switch.DOMAIN, switch.DEVICE_CLASS_OUTLET): TYPE_OUTLET,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_DOOR): TYPE_DOOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_GARAGE_DOOR): TYPE_GARAGE,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_LOCK): TYPE_SENSOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_OPENING): TYPE_SENSOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_WINDOW): TYPE_SENSOR,
(media_player.DOMAIN, media_player.DEVICE_CLASS_TV): TYPE_TV,
(media_player.DOMAIN, media_player.DEVICE_CLASS_SPEAKER): TYPE_SPEAKER,
(sensor.DOMAIN, sensor.DEVICE_CLASS_TEMPERATURE): TYPE_SENSOR,
}
CHALLENGE_ACK_NEEDED = "ackNeeded"
CHALLENGE_PIN_NEEDED = "pinNeeded"
CHALLENGE_FAILED_PIN_NEEDED = "challengeFailedPinNeeded"
|
fbradyirl/home-assistant
|
homeassistant/components/google_assistant/const.py
|
Python
|
apache-2.0
| 3,937
|
# UDP code taken from < https://pymotw.com/2/socket/udp.html >
import socket, time, fcntl, struct
def udpBeacon():
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
my_ip = getIP('wlan0')
spliced_subnet = my_ip[:my_ip.rfind('.')] + ".255"
# Define broadcasting address and message
server_address = (spliced_subnet, 5001)
message = 'Hello, I am a minibot!'
# Send message and resend every 9 seconds
while True:
try:
# Send data
print('sending broadcast: "%s"' % message)
sent = sock.sendto(bytes(message, 'utf8'), server_address)
except Exception as err:
print(err)
time.sleep(9)
def getIP(ifname):
"""
Returns the IP of the device
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', bytes(ifname[:15],'utf8'))
)[20:24])
|
cornell-cup/cs-minibot
|
minibot/hardware/communication/UDP.py
|
Python
|
apache-2.0
| 1,145
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for SuggestTrials
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1beta1_VizierService_SuggestTrials_sync]
from google.cloud import aiplatform_v1beta1
def sample_suggest_trials():
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.SuggestTrialsRequest(
parent="parent_value",
suggestion_count=1744,
client_id="client_id_value",
)
# Make the request
operation = client.suggest_trials(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_SuggestTrials_sync]
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_sync.py
|
Python
|
apache-2.0
| 1,664
|
import json
from datetime import datetime
from django.test import TestCase, Client
from authentication.models import Account
from recipies.models import Recipe, RecipeSteps
from recipies.serializers import RecipeSerializer, RecipeStepsSerializer
class Utility(TestCase):
"""Utility class para testing"""
@staticmethod
def checkElement(test_instance, model, data):
"""Helper Function. Either check two values against on another or call correct helper function"""
# IF the type is a list or dict, call the correct function to check its elements. ELSE directly
# compare the elements
if type(model) is list:
Utility.checkArrayModel(test_instance, model, data)
elif type(model) is dict:
Utility.checkDictModel(test_instance, model, data)
else:
test_instance.assertEqual(model, data)
@staticmethod
def checkArrayModel(test_instance, model, data):
"""Helper function. Check an array to see if the model data is present in the data array"""
for i in range(len(model)):
Utility.checkElement(test_instance, model[i], data[i])
@staticmethod
def checkDictModel(test_instance, model, data):
"""Helper function. Check a model dictionary against a data dictionary key by key"""
for key in model.keys():
Utility.checkElement(test_instance, model.get(key), data.__dict__.get(key))
# Create your tests here.
class TestRecipeModel(TestCase):
"""Test the custom method attached to the Recipe model"""
def setUp(self):
self.recipe_data = dict(
recipe_name="Test Recipe",
recipe_style="Kolsch",
recipe_notes="This is my first test recipe submited from a unit test.",
last_brew_date=datetime.now()
)
self.malts_data = [
dict(
malt_brand="ABrand",
malt_type="Extra Light",
amount_by_weight=3.3,
),
dict(
malt_brand="BBrand",
malt_type="Crystal",
amount_by_weight=1.5,
malt_extract=False
),
dict(
malt_brand="CBrand",
malt_type="Light",
amount_by_weight=3,
dry_malt=True,
),
]
self.hops_data = [
dict(
hop_name="Amarillo",
alpha_acid_content=12.3,
beta_acid_content=7.9,
hop_weight=1.5,
hop_weight_uom="g",
),
dict(
hop_name="Cascade",
alpha_acid_content=8.8,
hop_weight=0.5,
hop_weight_uom="oz",
),
dict(
hop_name="Citra",
alpha_acid_content=7.9,
beta_acid_content=4.6,
hop_weight=1.0,
hop_weight_uom="oz",
dry_hops=True,
),
]
self.user = Account.objects.create_user('test', 'foo')
def tearDown(self):
self.recipe_data=None
self.malts_data=None
self.hops_data=None
self.user.delete()
def test_RecipeManager_CreateValidRecipe(self):
recipe = Recipe.objects.create_recipe(self.user, self.recipe_data, malts_data=self.malts_data, hops_data=self.hops_data)
self.assertIsInstance(recipe, Recipe)
Utility.checkElement(self, self.hops_data, recipe.recipe_hops.order_by("hop_name"))
Utility.checkElement(self, self.malts_data, recipe.recipe_malts.order_by("malt_brand"))
Utility.checkElement(self, self.recipe_data, recipe)
def test_RecipeManager_FailNoRecipeData(self):
with self.assertRaises(ValueError) as err:
Recipe.objects.create_recipe(self.user, None, self.malts_data, self.hops_data)
self.assertEqual(err.exception.message, 'Recipe information is required to create a recipe.')
def test_RecipeManager_FailInactiveUser(self):
self.user.is_active=False
with self.assertRaises(ValueError) as err:
Recipe.objects.create_recipe(self.user, self.recipe_data, malts_data=self.malts_data, hops_data=self.hops_data)
self.assertEqual(err.exception.message, 'Account must be active to create a recipe.')
def test_RecipeManager_FailNotLoggedIn(self):
with self.assertRaises(ValueError) as err:
Recipe.objects.create_recipe(None, self.recipe_data, malts_data=self.malts_data, hops_data=self.hops_data)
self.assertEqual(err.exception.message, 'Need to be logged in to create a recipe.')
class TestRecipeStepModel(TestCase):
def setUp(self):
self.recipe_data = dict(
recipe_name="Test Recipe",
recipe_style="Kolsch",
recipe_notes="This is my first test recipe submited from a unit test.",
last_brew_date=datetime.now()
)
self.user = Account.objects.create_user('test', 'foo')
self.recipe = Recipe.objects.create_recipe(self.user, self.recipe_data)
def tearDown(self):
self.recipe_data = None
self.user.delete()
self.recipe.delete()
def test_RecipeStepsManager_CreateValidStep(self):
# Collect a reference to the recipe so that its id can be retrieved
recipe_obj = Recipe.objects.get(recipe_name=self.recipe_data['recipe_name'])
step_data = dict(
step='This is a step',
step_order=1
)
step_obj = RecipeSteps.objects.save_step(step_data, recipe_obj.id)
self.assertIsInstance(step_obj, RecipeSteps)
Utility.checkElement(self, step_data, step_obj)
class TestRecipeSerializer(TestCase):
"""Test the serializers for the recipe class"""
def setUp(self):
self.json_data = open('recipies/testRecipe.json','r').read()
self.data = self.retrieveRecipeData()
# Extract just the date portion from the datetime object
my_datetime = datetime.today()
self.data['last_brew_date'] = datetime.date(my_datetime)
self.account = Account.objects.create(username='foot',password='bar2')
def tearDown(self):
self.json_data = None
self.data = None
self.account.delete()
def retrieveRecipeData(self):
"""Retrieve a new decoding of the JSON recipe data"""
return json.loads(self.json_data)
def createRecipe(self, user, data):
"""Create a recipe for use with the update unit test"""
hops = data.pop("recipe_hops")
malts = data.pop("recipe_malts")
return Recipe.objects.create_recipe(user, data, malts, hops)
def test_RecipeSerializer_Create_ValidData(self):
serialized_data = RecipeSerializer(data=self.data)
self.assertTrue(serialized_data.is_valid())
recipe = serialized_data.save(user=self.account)
Utility.checkElement(self, self.data.pop('recipe_hops'), recipe.recipe_hops.order_by("hop_name"))
Utility.checkElement(self, self.data.pop('recipe_malts'), recipe.recipe_malts.order_by("malt_brand"))
Utility.checkElement(self, self.data, recipe)
def test_RecipeSerializer_Update_ValidData(self):
premade_recipe = self.createRecipe(self.account, self.data)
recipe_data = self.retrieveRecipeData()
# Add another hop
self.data['recipe_hops'] = list()
self.data['recipe_hops'].append(dict(
hop_name="Tettang",
alpha_acid_content=8.8,
beta_acid_content=6.4,
hop_weight=3.4,
hop_weight_uom="oz",
dry_hops=True,
))
# Change the malt
self.data['recipe_malts'] = list()
self.data['recipe_malts'].append(dict(
malt_brand="Fruity_Tooty",
malt_type="Crystal",
malt_extract=False,
amount_by_weight=7.0,
))
# Update the notes
self.data['recipe_notes'] = "Added this crystal to spice it up."
serializer = RecipeSerializer(instance=premade_recipe, data=self.data)
self.assertTrue(serializer.is_valid())
updated_recipe = serializer.save()
Utility.checkElement(self, self.data.pop('recipe_hops'), updated_recipe.recipe_hops.order_by("hop_name"))
Utility.checkElement(self, self.data.pop('recipe_malts'), updated_recipe.recipe_malts.order_by("malt_brand"))
Utility.checkElement(self, self.data, updated_recipe)
class TestRecipeStepsSerializer(TestCase):
def setUp(self):
self.recipe_data = dict(
recipe_name="Test Recipe",
recipe_style="Kolsch",
recipe_notes="This is my first test recipe submited from a unit test.",
last_brew_date=datetime.now()
)
self.user = Account.objects.create_user('test', 'foo')
self.recipe = Recipe.objects.create_recipe(self.user, self.recipe_data)
def tearDown(self):
self.recipe_data = None
self.user.delete()
self.recipe.delete()
def test_RecipeStepsSerializer_ValidData(self):
step_data = dict(
step_order=3,
step='You put the lime in the coke you nut',
recipe=self.recipe.id,
)
validated_step = RecipeStepsSerializer(data=step_data)
valid=validated_step.is_valid()
self.assertTrue(valid)
step = validated_step.save(recipe_id=self.recipe.id)
self.assertIsInstance(step, RecipeSteps)
class TestRecipeStepsView(TestCase):
def setUp(self):
# Get a referene to the Django testing AJAX client
self.client = Client()
# Create a user for use in creating a stub recipe
self.user = Account.objects.create_user(username='foot',password='bar2',email='fake@test.com',first_name='john',last_name='doe')
self.recipe_data = dict(
recipe_name="Test Recipe",
recipe_style="Kolsch",
recipe_notes="This is my first test recipe submited from a unit test.",
last_brew_date=datetime.now()
)
self.malts_data = [
dict(
malt_brand="ABrand",
malt_type="Extra Light",
amount_by_weight=3.3,
),
dict(
malt_brand="BBrand",
malt_type="Crystal",
amount_by_weight=1.5,
malt_extract=False
),
dict(
malt_brand="CBrand",
malt_type="Light",
amount_by_weight=3,
dry_malt=True,
),
]
self.hops_data = [
dict(
hop_name="Amarillo",
alpha_acid_content=12.3,
beta_acid_content=7.9,
hop_weight=1.5,
hop_weight_uom="oz",
),
dict(
hop_name="Cascade",
alpha_acid_content=8.8,
hop_weight=6.0,
hop_weight_uom="g",
),
dict(
hop_name="Citra",
alpha_acid_content=7.9,
beta_acid_content=4.6,
hop_weight=1.5,
hop_weight_uom="oz",
dry_hops=True,
),
]
# Generate the fake recipe
self.recipe = Recipe.objects.create_recipe(self.user, self.recipe_data, malts_data=self.malts_data, hops_data=self.hops_data)
def tearDown(self):
self.client = None
self.user.delete()
self.user = None
self.recipe_data = None
self.recipe.delete()
self.recipe = None
self.malts_data = None
self.hops_data = None
for step in RecipeSteps.objects.all():
step.delete()
def getRecipeId(self):
db_entry = Recipe.objects.get(recipe_name=self.recipe_data['recipe_name'])
return db_entry.id
def test_RecipeStepsView_ListNoSteps(self):
response = self.client.get('/api/v1/recipe/' + str(self.getRecipeId()) + '/step/')
self.assertEqual(response.status_code, 204)
def test_RecipeStepsView_ListSteps(self):
recipe_id = self.getRecipeId()
step_data = [
dict(
step='This is a step',
step_order=1
),
dict(
step='This is the second step',
step_order=2
)
]
for step in step_data:
step_obj = RecipeSteps.objects.save_step(step, recipe_id)
response = self.client.get('/api/v1/recipe/' + str(recipe_id) + '/step/')
self.assertEqual(response.status_code, 200)
# TODO: Leaving this off for now until I can figure out how to compare an orderdDict to regular dict.
#Utility.checkElement(self, step_data, response.data)
def test_RecipeStepsView_ListStepsInvalidRecipe(self):
response = self.client.get('/api/v1/recipe/' + str(9999) + '/step/')
self.assertEqual(response.status_code, 404)
self.assertEqual(response.reason_phrase.lower(), 'not found')
self.assertEqual(response.data['detail'].lower(), 'not found.')
def test_RecipeStepsView_CreateMultipleSteps(self):
step_data = [
dict(
step='This is a step',
step_order=1
),
dict(
step='This is the second step',
step_order=2
)
]
json_step_data = json.dumps(step_data)
response = self.client.post('/api/v1/recipe/' + str(self.getRecipeId()) + '/step/', data=json_step_data, content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(response.reason_phrase.lower(), 'created')
self.assertEqual(response.data['message'], 'Steps have been created.')
class TestRecipeViews(TestCase):
"""Check all of the http urls for the recipes"""
def setUp(self):
self.client = Client()
self.user = Account.objects.create_user(username='foot',password='bar2',email='fake@test.com',first_name='john',last_name='doe')
# Set the fake user to logged in as this is required for some of the requests.
self.client.login(username='foot',password='bar2')
my_datetime = datetime.today()
self.data = self.loadRecipeData()
for i in range(len(self.data)):
self.data[i]['last_brew_date'] = datetime.date(my_datetime)
self.setupRecipes(self.data, self.user)
def tearDown(self):
self.user.delete()
self.removeRecipes()
self.client = None
self.user = None
def loadRecipeData(self):
json_data = open('recipies/testRecipes.json' ,'r').read()
return json.loads(json_data)
def setupRecipes(self, recipes, user):
for i in range(len(recipes)):
Recipe.objects.create_recipe(user, recipes[i], recipes[i].get("recipe_malts"), recipes[i].get("recipe_hops"))
def removeRecipes(self):
self.data = []
for recipe in Recipe.objects.all():
recipe.delete()
def test_RecipeViews_ListRecipes_HasRecipes(self):
response = self.client.get('/api/v1/recipe/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), len(self.data))
def test_RecipeViews_ListRecipes_NoRecipes(self):
self.removeRecipes()
response = self.client.get('/api/v1/recipe/')
self.assertEqual(response.status_code, 204)
self.assertEqual(len(response.data), 0)
def test_RecipeViews_ListRecipes_UnauthorizedUser(self):
# First, delete the user
self.user.is_active = False
self.user.save()
response = self.client.get('/api/v1/recipe/')
self.assertEqual(response.status_code, 401)
self.assertEqual(response.reason_phrase.lower(), 'unauthorized')
self.assertEqual(response.data['status'].lower(), 'unauthorized')
self.assertEqual(response.data['message'], 'Requesting user is no longer active.')
def test_RecipeViews_DetailRecipe_InvalidId(self):
response = self.client.get('/api/v1/recipe/999/')
self.assertEqual(response.status_code, 404)
self.assertEqual(response.reason_phrase.lower(), 'not found')
self.assertEqual(response.data['detail'].lower(), 'not found.')
def test_RecipeViews_DetailRecipe_ValidRecipe(self):
# Use the first rescipe in the array
recipe_data = self.data[0]
db_entry = Recipe.objects.get(recipe_name=recipe_data['recipe_name'])
response = self.client.get('/api/v1/recipe/' + str(db_entry.id) + '/')
self.assertEqual(response.status_code, 200)
# TODO: Leaving this off for now until I can figure out how to compare an orderdDict to regular dict.
# Utility.checkElement(self, recipe_data, response.data)
def test_RecipeViews_DestroyRecipe_InvalidId(self):
response = self.client.delete('/api/v1/recipe/999/')
self.assertEqual(response.status_code, 404)
self.assertEqual(response.reason_phrase.lower(), 'not found')
self.assertEqual(response.data['detail'].lower(), 'not found.')
def test_RecipeViews_DestroyRecipe_ValidRecipe(self):
# Use the first rescipe in the array
before_recipe_length = len(Recipe.objects.all())
recipe_data = self.data[0]
db_entry = Recipe.objects.get(recipe_name=recipe_data['recipe_name'])
response = self.client.delete('/api/v1/recipe/' + str(db_entry.id) + '/')
after_recipe_length = len(Recipe.objects.all())
self.assertEqual(response.status_code, 204)
self.assertEqual(len(response.data), 0)
self.assertEqual(after_recipe_length, (before_recipe_length - 1) )
def test_RecipeViews_CreateRecipe_ValidData(self):
json_new_recipe = open('recipies/testRecipe.json' ,'r').read()
new_recipe = json.loads(json_new_recipe)
response = self.client.post('/api/v1/recipe/', data=json_new_recipe, content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(response.reason_phrase.lower(), 'created')
self.assertEqual(response.data['message'], 'Recipe has been created.')
# TODO: Deal with the ordered dict that is returned
# Utility.checkElement(self, new_recipe, response.data['recipe'])
def test_RecipeViews_CreateRecipe_InvalidData(self):
invalid_json = json.dumps({})
response = self.client.post('/api/v1/recipe/', data=invalid_json, content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.reason_phrase.lower(), 'bad request')
self.assertEqual(response.data['message'], 'Recipe could not be created with the received data.')
# Just make sure that there are items in the error array so that the user knows what they need to fix
self.assertTrue( ( len(response.data['errors']) > 0 ) )
def test_RecipeViews_UpdateRecipe_ExistingRecipe(self):
recipe_data = self.data[0]
db_entry = Recipe.objects.get(recipe_name=recipe_data['recipe_name'])
recipe_data['recipe_name'] = 'Updated Nommen'
recipe_data['last_brew_date'] = recipe_data['last_brew_date'].strftime('%Y-%m-%d')
json_recipe_data = json.dumps(recipe_data)
response = self.client.put('/api/v1/recipe/' + str(db_entry.id) + '/', data=json_recipe_data, content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(response.reason_phrase.lower(), 'created')
self.assertEqual(response.data['message'], 'Recipe has been updated.')
# TODO: Deal with those damn ordered dicts
# Utility.checkElement(self, recipe_data, response.data['recipe'])
def test_RecipeViews_UpdateRecipe_InvalidId(self):
recipe_data = self.data[0]
recipe_data['last_brew_date'] = recipe_data['last_brew_date'].strftime('%Y-%m-%d')
response = self.client.put('/api/v1/recipe/99/', data=json.dumps(self.data[0]), content_type='application/json')
# TODO: Since the get or 404 is a uniform function of Django, having a testing utility function to check the response would
# make sense.
self.assertEqual(response.status_code, 404)
self.assertEqual(response.reason_phrase.lower(), 'not found')
self.assertEqual(response.data['detail'].lower(), 'not found.')
def test_RecipeViews_UpdateRecipe_InvalidData(self):
recipe_data = self.data[0]
db_entry = Recipe.objects.get(recipe_name=recipe_data['recipe_name'])
response = self.client.put('/api/v1/recipe/' + str(db_entry.id) + '/', data=json.dumps({}), content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.reason_phrase.lower(), 'bad request')
self.assertEqual(response.data['message'], 'Recipe could not be updated with the received data.')
|
moonboy13/brew-journal
|
brew_journal/recipies/tests.py
|
Python
|
apache-2.0
| 21,233
|
#!/usr/bin/python
'''
This script is used to generate a set of random-ish events to
simulate log data from a Juniper Netscreen FW. It was built
around using netcat to feed data into Flume for ingestion
into a Hadoop cluster.
Once you have Flume configured you would use the following
command to populate data:
./gen_events.py 2>&1 | nc 127.0.0.1 9999
'''
import random
from netaddr import *
from time import sleep
protocols = ['6', '17']
common_ports = ['20','21','22','23','25','80','109','110','119','143','156','161','389','443']
action_list = ['Deny', 'Accept', 'Drop', 'Reject'];
src_network = IPNetwork('192.168.1.0/24')
dest_network = IPNetwork('172.35.0.0/16')
fo = open("replay_log.txt", "w")
while (1 == 1):
proto_index = random.randint(0,1)
protocol = protocols[proto_index]
src_port_index = random.randint(0,13)
dest_port_index = random.randint(0,13)
src_port = common_ports[src_port_index]
dest_port = common_ports[dest_port_index]
action_index = random.randint(0,3)
action = action_list[action_index]
src_ip_index = random.randint(1,254)
src_ip = src_network[src_ip_index]
dest_ip_index = random.randint(1,65535)
dest_ip = dest_network[dest_ip_index]
event = "192.168.1.3 Netscreen-FW1: NetScreen device_id=Netscreen-FW1 [Root]system-notification-00257(traffic): start_time=\"YYYY-MM-DD HH:MM:SS\" duration=0 policy_id=125 service=syslog proto=%s src zone=Untrust dst zone=Trust action=%s sent=0 rcvd=0 src=%s dst=%s src_port=%s dst_port=%s session_id=0" % (protocol, action, src_ip, dest_ip, src_port, dest_port)
fo.write(event + "\n")
print event
sleep(0.3)
fo.close()
|
jpacerqueira/jpac-flume-logs
|
generator/gen_events.py
|
Python
|
apache-2.0
| 1,662
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.add_product_to_cart_request import AddProductToCartRequest
class TestAddProductToCartRequest(unittest.TestCase):
""" AddProductToCartRequest unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testAddProductToCartRequest(self):
"""
Test AddProductToCartRequest
"""
model = kinow_client.models.add_product_to_cart_request.AddProductToCartRequest()
if __name__ == '__main__':
unittest.main()
|
kinow-io/kinow-python-sdk
|
test/test_add_product_to_cart_request.py
|
Python
|
apache-2.0
| 841
|
# -*- encoding: utf-8 -*-
# from django.test import TestCase
# from block.tests.helper import check_content
# from compose.tests.factories import HeaderFactory
# class TestHeader(TestCase):
#
# def test_content_methods(self):
# c = HeaderFactory()
# check_content(c)
|
pkimber/compose
|
compose/tests/test_header.py
|
Python
|
apache-2.0
| 291
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import RegionCommitmentsTransport
from .rest import RegionCommitmentsRestTransport
from .rest import RegionCommitmentsRestInterceptor
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[RegionCommitmentsTransport]]
_transport_registry["rest"] = RegionCommitmentsRestTransport
__all__ = (
"RegionCommitmentsTransport",
"RegionCommitmentsRestTransport",
"RegionCommitmentsRestInterceptor",
)
|
googleapis/python-compute
|
google/cloud/compute_v1/services/region_commitments/transports/__init__.py
|
Python
|
apache-2.0
| 1,127
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
import unittest
from citest.base import (
ExecutionContext,
JsonSnapshotHelper)
import citest.json_contract as jc
import citest.json_predicate as jp
_called_verifiers = []
_TEST_FOUND_ERROR_COMMENT='Found error.'
class TestObsoleteObservationFailureVerifier(jc.ObservationFailureVerifier):
def __init__(self, title, expect):
super(TestObsoleteObservationFailureVerifier, self).__init__(title)
self.__expect = expect
def _error_comment_or_none(self, error):
if error.args[0] == self.__expect:
return _TEST_FOUND_ERROR_COMMENT
return None
def _makeObservationVerifyResult(
valid, observation=None,
good_results=None, bad_results=None, failed_constraints=None):
default_result = jp.PredicateResult(valid=valid)
good_results = good_results or ([default_result] if valid else [])
bad_results = bad_results or ([] if valid else [default_result])
failed_constraints = failed_constraints or []
observation = observation or jc.Observation()
good_attempt_results = [jp.ObjectResultMapAttempt(observation, result)
for result in good_results]
bad_attempt_results = [jp.ObjectResultMapAttempt(observation, result)
for result in bad_results]
return jc.ObservationVerifyResult(
valid=valid, observation=observation,
good_results=good_attempt_results,
bad_results=bad_attempt_results,
failed_constraints=failed_constraints)
class FakeObservationVerifier(jc.ObservationVerifier):
def __init__(self, title, dnf_verifier, result):
super(FakeObservationVerifier, self).__init__(
title=title, dnf_verifiers=dnf_verifier)
self.__result = result
def __call__(self, context, observation):
_called_verifiers.append(self)
return self.__result
class ObservationVerifierTest(unittest.TestCase):
def assertEqual(self, expect, have, msg=''):
if not msg:
msg = 'EXPECTED\n{0!r}\nGOT\n{1!r}'.format(expect, have)
JsonSnapshotHelper.AssertExpectedValue(expect, have, msg)
def test_result_builder_add_good_result(self):
context = ExecutionContext()
observation = jc.Observation()
observation.add_object('A')
pred = jp.PathPredicate(None, jp.STR_EQ('A'))
builder = jc.ObservationVerifyResultBuilder(observation)
map_pred = jp.MapPredicate(pred)
map_result = map_pred(context, observation.objects)
builder.add_map_result(map_result)
verify_results = builder.build(True)
self.assertTrue(verify_results)
self.assertEqual(observation, verify_results.observation)
self.assertEqual([], verify_results.bad_results)
self.assertEqual([], verify_results.failed_constraints)
self.assertEqual(map_result.good_object_result_mappings,
verify_results.good_results)
def test_result_builder_add_bad_result(self):
context = ExecutionContext()
observation = jc.Observation()
observation.add_object('A')
pred = jp.PathPredicate(None, jp.STR_EQ('B'))
builder = jc.ObservationVerifyResultBuilder(observation)
map_pred = jp.MapPredicate(pred)
map_result = map_pred(context, observation.objects)
builder.add_map_result(map_result)
verify_results = builder.build(False)
self.assertFalse(verify_results)
self.assertEqual(observation, verify_results.observation)
self.assertEqual([], verify_results.good_results)
self.assertEqual([pred], verify_results.failed_constraints)
self.assertEqual(map_result.bad_object_result_mappings,
verify_results.bad_results)
def test_result_builder_add_mixed_results(self):
context = ExecutionContext()
observation = jc.Observation()
observation.add_object('GOOD')
observation.add_object('BAD')
pred = jp.PathPredicate(None, jp.STR_EQ('GOOD'))
builder = jc.ObservationVerifyResultBuilder(observation)
map_pred = jp.MapPredicate(pred)
map_result = map_pred(context, observation.objects)
builder.add_map_result(map_result)
verify_results = builder.build(False)
self.assertFalse(verify_results)
self.assertEqual(observation, verify_results.observation)
self.assertEqual(map_result.good_object_result_mappings,
verify_results.good_results)
self.assertEqual([], verify_results.failed_constraints)
self.assertEqual(map_result.bad_object_result_mappings,
verify_results.bad_results)
def test_result_observation_verifier_conjunction_ok(self):
context = ExecutionContext()
builder = jc.ObservationVerifierBuilder(title='Test')
verifiers = []
pred_results = []
for i in range(3):
this_result = jp.PredicateResult(True, comment='Pred {0}'.format(i))
pred_results.append(this_result)
result = _makeObservationVerifyResult(
valid=True, good_results=[this_result])
fake_verifier = FakeObservationVerifier(
title=i, dnf_verifier=[], result=result)
verifiers.append(fake_verifier)
builder.AND(fake_verifier)
# verify build can work multiple times
self.assertEqual(builder.build(), builder.build())
verifier = builder.build()
self.assertEqual([verifiers], verifier.dnf_verifiers)
expect = _makeObservationVerifyResult(True, good_results=pred_results)
global _called_verifiers
_called_verifiers = []
got = verifier(context, jc.Observation())
self.assertEqual(expect, got)
self.assertEqual(verifiers, _called_verifiers)
def test_result_observation_verifier_conjunction_failure_aborts_early(self):
context = ExecutionContext()
builder = jc.ObservationVerifierBuilder(title='Test')
verifiers = []
results = []
pred_results = [jp.PredicateResult(False, comment='Result %d' % i)
for i in range(3)]
for i in range(3):
result = _makeObservationVerifyResult(
valid=False, bad_results=[pred_results[i]])
fake_verifier = FakeObservationVerifier(
title=i, dnf_verifier=[], result=result)
verifiers.append(fake_verifier)
results.append(result)
builder.AND(fake_verifier)
# verify build can work multiple times
self.assertEqual(builder.build(), builder.build())
verifier = builder.build()
self.assertEqual([verifiers], verifier.dnf_verifiers)
expect = _makeObservationVerifyResult(
False, bad_results=[pred_results[0]])
global _called_verifiers
_called_verifiers = []
got = verifier(context, jc.Observation())
self.assertEqual(expect, got)
self.assertEqual(verifiers[:1], _called_verifiers)
def test_result_observation_verifier_disjunction_success_aborts_early(self):
context = ExecutionContext()
builder = jc.ObservationVerifierBuilder(title='Test')
verifiers = []
results = []
pred_results = [jp.PredicateResult(False, comment='Result %d' % i)
for i in range(2)]
for i in range(2):
result = _makeObservationVerifyResult(
valid=True, good_results=[pred_results[i]])
fake_verifier = FakeObservationVerifier(
title=i, dnf_verifier=[], result=result)
verifiers.append(fake_verifier)
results.append(result)
builder.OR(fake_verifier)
verifier = builder.build()
self.assertEqual([verifiers[0:1], verifiers[1:2]], verifier.dnf_verifiers)
expect = _makeObservationVerifyResult(True, good_results=[pred_results[0]])
global _called_verifiers
_called_verifiers = []
got = verifier(context, jc.Observation())
self.assertEqual(expect, got)
self.assertEqual(verifiers[:1], _called_verifiers)
def test_result_observation_verifier_disjunction_failure(self):
context = ExecutionContext()
observation = jc.Observation()
builder = jc.ObservationVerifierBuilder(title='Test')
verifiers = []
results = []
pred_results = [jp.PredicateResult(False, comment='Result %d' % i)
for i in range(2)]
for i in range(2):
result = _makeObservationVerifyResult(observation=observation,
valid=False, bad_results=[pred_results[i]])
fake_verifier = FakeObservationVerifier(
title=i, dnf_verifier=[], result=result)
verifiers.append(fake_verifier)
results.append(result)
builder.OR(fake_verifier)
verifier = builder.build()
self.assertEqual([verifiers[0:1], verifiers[1:2]], verifier.dnf_verifiers)
expect = _makeObservationVerifyResult(
False, observation=observation, bad_results=pred_results)
global _called_verifiers
_called_verifiers = []
got = verifier(context, observation)
self.assertEqual(expect, got)
self.assertEqual(verifiers, _called_verifiers)
def test_obsolete_observation_failure_ok(self):
error_text = 'the error'
context = ExecutionContext()
observation = jc.Observation()
error = ValueError(error_text)
observation.add_error(error)
failure_verifier = TestObsoleteObservationFailureVerifier(
'Test', error_text)
failure_pred_result = jc.ObservationFailedError([error], valid=True)
expect_failure = jc.ObservationVerifyResult(
valid=True, observation=observation,
good_results=[jp.ObjectResultMapAttempt(observation,
failure_pred_result)],
bad_results=[], failed_constraints=[],
comment=_TEST_FOUND_ERROR_COMMENT)
got = failure_verifier(context, observation)
self.assertEqual(expect_failure, got)
builder = jc.ObservationVerifierBuilder(title='Test')
builder.EXPECT(failure_verifier)
verifier = builder.build()
expect = jc.ObservationVerifyResult(
valid=True, observation=observation,
good_results=expect_failure.good_results,
bad_results=[], failed_constraints=[])
got = verifier(context, observation)
self.assertEqual(expect, got)
def test_observation_failure_ok(self):
error_text = 'the error'
context = ExecutionContext()
observation = jc.Observation()
error = ValueError(error_text)
observation.add_error(error)
exception_pred = jp.ExceptionMatchesPredicate(
ValueError, regex=error_text)
builder = jc.ObservationVerifierBuilder(title='Test')
builder.EXPECT(jc.ObservationErrorPredicate(jp.LIST_MATCHES([exception_pred])))
failure_verifier = builder.build()
observation_predicate_result = jc.ObservationPredicateResult(
True, observation, jp.LIST_MATCHES([exception_pred]),
jp.LIST_MATCHES([exception_pred])(context, [error]))
expect_failure = jc.ObservationVerifyResult(
True, observation,
good_results=[observation_predicate_result],
bad_results=[], failed_constraints=[])
got = failure_verifier(context, observation)
self.assertEqual(expect_failure, got)
def test_obsolete_observation_failure_not_ok(self):
error_text = 'the error'
context = ExecutionContext()
observation = jc.Observation()
error = ValueError('not the error')
observation.add_error(error)
failure_verifier = TestObsoleteObservationFailureVerifier(
'Test', error_text)
comment = failure_verifier._error_not_found_comment(observation)
failure_pred_result = jp.PredicateResult(valid=False, comment=comment)
expect_failure = jc.ObservationVerifyResult(
valid=False, observation=observation,
bad_results=[jp.ObjectResultMapAttempt(observation,
failure_pred_result)],
good_results=[], failed_constraints=[],
comment=comment)
self.assertEqual(expect_failure, failure_verifier(context, observation))
builder = jc.ObservationVerifierBuilder(title='Test Verifier')
builder.EXPECT(failure_verifier)
verifier = builder.build()
expect = jc.ObservationVerifyResult(
valid=False, observation=observation,
bad_results=expect_failure.bad_results,
good_results=[], failed_constraints=[])
got = verifier(context, observation)
self.assertEqual(expect, got)
def test_obsolete_observation_failure_or_found(self):
context = ExecutionContext()
observation = jc.Observation()
observation.add_error(ValueError('not the error'))
failure_verifier = TestObsoleteObservationFailureVerifier(
'Verify', 'NotFound')
comment = failure_verifier._error_not_found_comment(observation)
failure_result = jp.PredicateResult(valid=False, comment=comment)
# We've already established this result is what we expect
bad_observation_result = failure_verifier(context, observation)
success_pred_result = jp.PredicateResult(valid=True)
good_observation_result = _makeObservationVerifyResult(
valid=True,
good_results=[success_pred_result],
observation=observation)
success_verifier = FakeObservationVerifier(
'Found', dnf_verifier=[], result=good_observation_result)
builder = jc.ObservationVerifierBuilder(title='Observation Verifier')
builder.EXPECT(failure_verifier).OR(success_verifier)
verifier = builder.build()
expect = jc.ObservationVerifyResult(
valid=True, observation=observation,
bad_results=bad_observation_result.bad_results,
good_results=good_observation_result.good_results,
failed_constraints=[])
got = verifier(context, observation)
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
google/citest
|
tests/json_contract/observation_verifier_test.py
|
Python
|
apache-2.0
| 14,132
|
import re
import uuid as py_uuid
from common_fixtures import * # NOQA
from test_volume import VOLUME_CLEANUP_LABEL
TEST_IMAGE = 'ibuildthecloud/helloworld'
TEST_IMAGE_LATEST = TEST_IMAGE + ':latest'
TEST_IMAGE_UUID = 'docker:' + TEST_IMAGE
if_docker = pytest.mark.skipif("os.environ.get('DOCKER_TEST') == 'false'",
reason='DOCKER_TEST is not set')
os_environ = "os.environ.get('DOCKER_VERSION') != '1.12.1'"
if_docker_1_12 = pytest.mark.skipif(os_environ,
reason='Docker version is not 1.12.1')
sched_environ = "os.environ.get('CATTLE_TEST_RESOURCE_SCHEDULER') != 'true'"
if_resource_scheduler = pytest.mark.skipif(sched_environ)
@pytest.fixture(scope='session')
def docker_client(super_client):
for host in super_client.list_host(state='active', remove_null=True,
kind='docker'):
key = super_client.create_api_key(accountId=host.accountId)
super_client.wait_success(key)
wait_for(lambda: host.agent().state == 'active')
wait_for(lambda: len(host.storagePools()) > 0 and
host.storagePools()[0].state == 'active')
return api_client(key.publicValue, key.secretValue)
raise Exception('Failed to find docker host, please register one')
@if_docker
def test_docker_create_only(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
startOnCreate=False)
try:
container = docker_client.wait_success(container)
assert container is not None
assert 'container' == container.type
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_create_only_from_sha(docker_client, super_client):
image_name = 'tianon/true@sha256:662fc60808e6d5628a090e39' \
'b4bcae694add28a626031cc889109c2cf2af5d73'
uuid = 'docker:' + image_name
container = docker_client.create_container(name='test-sha256',
imageUuid=uuid,
networkMode='bridge',
startOnCreate=False)
try:
container = docker_client.wait_success(container)
assert container is not None
assert 'container' == container.type
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_create_with_start(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
try:
assert container.state == 'creating'
container = super_client.wait_success(container)
assert container.state == 'running'
assert container.data.dockerContainer.Image == TEST_IMAGE
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_build(docker_client, super_client):
uuid = 'image-' + random_str()
url = 'https://github.com/rancherio/tiny-build/raw/master/build.tar'
container = docker_client.create_container(imageUuid='docker:' + uuid,
networkMode='bridge',
build={
'context': url,
})
try:
assert container.state == 'creating'
container = super_client.wait_success(container)
# This builds tianon/true which just dies
assert container.state == 'running' or container.state == 'stopped'
assert container.transitioning == 'no'
assert container.data.dockerContainer.Image == uuid
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_create_with_start_using_docker_io(docker_client, super_client):
image = 'docker.io/' + TEST_IMAGE
uuid = 'docker:' + image
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
container = super_client.wait_success(container)
assert container.state == 'running'
assert container.data.dockerContainer.Image == image
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_command(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
command=['sleep', '42'])
try:
container = super_client.wait_success(container)
assert container.data.dockerInspect.Config.Cmd == ['sleep', '42']
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_command_args(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
command=['sleep', '1', '2',
'3'])
try:
container = super_client.wait_success(container)
assert container.data.dockerInspect.Config.Cmd == ['sleep', '1', '2',
'3']
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_short_lived_container(docker_client, super_client):
container = docker_client.create_container(imageUuid="docker:tianon/true",
networkMode='bridge')
container = wait_for_condition(
docker_client, container,
lambda x: x.state == 'stopped',
lambda x: 'State is: ' + x.state)
assert container.state == 'stopped'
assert container.transitioning == 'no'
@if_docker
def test_docker_stop(docker_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
assert container.state == 'creating'
container = docker_client.wait_success(container)
assert container.state == 'running'
start = time.time()
container = container.stop(timeout=0)
assert container.state == 'stopping'
container = docker_client.wait_success(container)
delta = time.time() - start
assert container.state == 'stopped'
assert delta < 10
@if_docker
def test_docker_purge(docker_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
assert container.state == 'creating'
container = docker_client.wait_success(container)
assert container.state == 'running'
container = container.stop(timeout=0)
assert container.state == 'stopping'
container = docker_client.wait_success(container)
assert container.state == 'stopped'
docker_client.delete(container)
container = docker_client.wait_success(container)
assert container.removed is not None
@if_docker
def test_docker_ports_from_container_publish_all(docker_client):
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(networkMode='bridge',
publishAllPorts=True,
imageUuid=uuid)
c = docker_client.wait_success(c)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
port = ports[0]
assert port.publicPort is not None
assert port.privatePort == 8080
assert port.publicIpAddressId is not None
assert port.kind == 'imagePort'
docker_client.delete(c)
@if_docker
def test_docker_ports_from_container_no_publish(docker_client):
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
c = docker_client.wait_success(c)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
port = ports[0]
assert port.publicPort is None
assert port.privatePort == 8080
assert port.publicIpAddressId is not None
assert port.kind == 'imagePort'
docker_client.delete(c)
@if_docker
def test_docker_ports_from_container(docker_client, super_client):
def reload(x):
return super_client.reload(x)
_ = reload
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(networkMode='bridge',
startOnCreate=False,
publishAllPorts=True,
imageUuid=uuid,
ports=[
'8081',
'8082/tcp',
'8083/udp'])
c = docker_client.wait_success(c)
assert c.state == 'stopped'
count = 0
for port in c.ports_link():
count += 1
assert port.kind == 'userPort'
assert port.publicPort is None
assert port.privateIpAddressId is None
assert port.publicIpAddressId is None
if port.privatePort == 8081:
assert port.protocol == 'tcp'
elif port.privatePort == 8082:
assert port.protocol == 'tcp'
elif port.privatePort == 8083:
assert port.protocol == 'udp'
else:
assert False
assert count == 3
c = docker_client.wait_success(c.start())
assert c.state == 'running'
count = 0
ip = None
privateIp = None
for port in c.ports_link():
count += 1
assert port.privateIpAddressId is not None
privateIp = port.privateIpAddress()
assert privateIp.kind == 'docker'
assert _(privateIp).subnetId is None
assert port.publicPort is not None
assert port.publicIpAddressId is not None
if ip is None:
ip = port.publicIpAddressId
assert port.publicIpAddressId == ip
if port.privatePort == 8081:
assert port.kind == 'userPort'
assert port.protocol == 'tcp'
elif port.privatePort == 8082:
assert port.kind == 'userPort'
assert port.protocol == 'tcp'
elif port.privatePort == 8083:
assert port.kind == 'userPort'
assert port.protocol == 'udp'
elif port.privatePort == 8080:
assert port.kind == 'imagePort'
else:
assert False
assert count == 4
assert c.primaryIpAddress == privateIp.address
c = docker_client.wait_success(c.stop(timeout=0))
assert c.state == 'stopped'
count = 0
for nic in _(c).nics():
for ip in nic.ipAddresses():
count += 1
assert ip.kind == 'docker'
assert ip.state == 'inactive'
assert ip.address is None
assert count == 1
c = docker_client.wait_success(c.start())
if c.state != 'running':
super_c = super_client.reload(c)
print 'DEBUG Container not running: %s' % super_c
assert c.state == 'running'
count = 0
for nic in _(c).nics():
for ip in nic.ipAddresses():
count += 1
assert ip.kind == 'docker'
assert ip.state == 'active'
assert ip.address is not None
assert count == 1
docker_client.delete(c)
@if_docker
def test_no_port_override(docker_client, super_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
ports=['8083:8080'])
try:
c = super_client.wait_success(c, timeout=240)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
assert ports[0].kind == 'userPort'
assert ports[0].publicPort == 8083
assert ports[0].privatePort == 8080
finally:
if c is not None:
super_client.delete(c)
@if_docker
def test_docker_volumes(docker_client, super_client):
uuid = TEST_IMAGE_UUID
bind_mount_uuid = py_uuid.uuid4().hex
bar_host_path = '/tmp/bar%s' % bind_mount_uuid
bar_bind_mount = '%s:/bar' % bar_host_path
c = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
startOnCreate=False,
dataVolumes=['/foo',
bar_bind_mount])
c = docker_client.wait_success(c)
assert len(c.dataVolumes) == 2
assert set(c.dataVolumes) == set(['/foo', bar_bind_mount])
c = super_client.wait_success(c.start())
mounts = c.mounts_link()
assert len(mounts) == 1
foo_mount = None
foo_vol = None
for mount in mounts:
assert mount.instance().id == c.id
if mount.path == '/foo':
foo_mount = mount
foo_vol = mount.volume()
foo_vol = wait_for_condition(
docker_client, foo_vol, lambda x: x.state == 'active')
assert foo_mount is not None
assert foo_mount.permissions == 'rw'
assert foo_vol is not None
assert not foo_vol.isHostPath
c2 = docker_client.create_container(name="volumes_from_test",
networkMode='bridge',
imageUuid=uuid,
startOnCreate=False,
dataVolumesFrom=[c.id])
c2 = docker_client.wait_success(c2)
assert len(c2.dataVolumesFrom) == 1
assert set(c2.dataVolumesFrom) == set([c.id])
c2 = super_client.wait_success(c2.start())
c2_mounts = c2.mounts_link()
assert len(c2_mounts) == 1
for mount in c2_mounts:
assert mount.instance().id == c2.id
if mount.path == '/foo':
assert mount.volumeId == foo_vol.id
c = docker_client.wait_success(c.stop(timeout=0))
c2 = docker_client.wait_success(c2.stop(timeout=0))
docker_client.delete(c2)
docker_client.wait_success(c2)
docker_client.delete(c)
docker_client.wait_success(c)
# set it as false bc we delete volume as soon as we delete container
_check_path(foo_vol, False, docker_client, super_client)
@if_docker
def test_stack_volume_delete(docker_client, super_client):
stack = docker_client.create_stack(name=random_str())
stack = docker_client.wait_success(stack)
docker_client.create_volumeTemplate(name="foo", stackId=stack.id)
# create service
launch_config = {"imageUuid": "docker:debian", "dataVolumes": "foo:/bar",
"networkMode": "none",
"labels": {"io.rancher.container.start_once": "true"},
"command": ["mkdir", "/bar/touched"]}
svc1 = docker_client.create_service(name=random_str(), stackId=stack.id,
launchConfig=launch_config, scale=1)
svc1 = docker_client.wait_success(svc1)
docker_client.wait_success(svc1.activate())
c = _validate_compose_instance_stopped(docker_client, svc1, stack, "1")
mounts = check_mounts(docker_client, c, 1)
vol = mounts[0].volume()
# remove stack, validate its volume is removed on the host
docker_client.wait_success(stack.remove())
_check_path(vol, False, docker_client, super_client,
["%s:/test" % vol.name], "/test/touched")
def _validate_compose_instance_stopped(client, service, env,
number, launch_config_name=None):
cn = launch_config_name + "-" if launch_config_name is not None else ""
name = env.name + "-" + service.name + "-" + cn + number
def wait_for_map_count(service):
instances = client.list_container(name=name, state="stopped")
return len(instances) == 1
wait_for(lambda: wait_for_condition(client, service, wait_for_map_count))
instances = client.list_container(name=name, state="stopped")
return instances[0]
@if_docker
def test_container_fields(docker_client, super_client):
caps = ["SYS_MODULE", "SYS_RAWIO", "SYS_PACCT", "SYS_ADMIN",
"SYS_NICE", "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG",
"MKNOD", "AUDIT_WRITE", "AUDIT_CONTROL", "MAC_OVERRIDE",
"MAC_ADMIN", "NET_ADMIN", "SYSLOG", "CHOWN", "NET_RAW",
"DAC_OVERRIDE", "FOWNER", "DAC_READ_SEARCH", "FSETID",
"KILL", "SETGID", "SETUID", "LINUX_IMMUTABLE",
"NET_BIND_SERVICE", "NET_BROADCAST", "IPC_LOCK",
"IPC_OWNER", "SYS_CHROOT", "SYS_PTRACE", "SYS_BOOT",
"LEASE", "SETFCAP", "WAKE_ALARM", "BLOCK_SUSPEND", "ALL"]
test_name = 'container_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
restart_policy = {"maximumRetryCount": 2, "name": "on-failure"}
c = docker_client.create_container(name=test_name + random_str(),
networkMode='bridge',
imageUuid=image_uuid,
capAdd=caps,
capDrop=caps,
dnsSearch=['8.8.8.8', '1.2.3.4'],
dns=['8.8.8.8', '1.2.3.4'],
privileged=True,
domainName="rancher.io",
memory=12000000,
memorySwap=16000000,
memoryReservation=4194304,
cpuSet="0,1",
stdinOpen=True,
tty=True,
command=["true"],
entryPoint=["/bin/sh", "-c"],
cpuShares=400,
restartPolicy=restart_policy,
devices="/dev/null:/dev/xnull:rw")
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
assert set(c.data['dockerInspect']['HostConfig']['CapAdd']) == set(caps)
assert set(c.data['dockerInspect']['HostConfig']['CapDrop']) == set(caps)
actual_dns = c.data['dockerInspect']['HostConfig']['Dns']
# TODO: when networking is back
# assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4', '169.254.169.250'])
assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4'])
actual_dns = c.data['dockerInspect']['HostConfig']['DnsSearch']
# TODO: when networking is back
# assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4', 'rancher.internal'])
assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4'])
assert c.data['dockerInspect']['HostConfig']['Privileged']
assert c.data['dockerInspect']['Config']['Domainname'] == "rancher.io"
assert c.data['dockerInspect']['HostConfig']['Memory'] == 12000000
assert c.data['dockerInspect']['HostConfig'][
'MemoryReservation'] == 4194304
# assert c.data['dockerInspect']['Config']['MemorySwap'] == 16000000
assert c.data['dockerInspect']['HostConfig']['CpusetCpus'] == "0,1"
assert c.data['dockerInspect']['Config']['Tty']
assert c.data['dockerInspect']['Config']['OpenStdin']
actual_entry_point = set(c.data['dockerInspect']['Config']['Entrypoint'])
assert actual_entry_point == set(["/bin/sh", "-c"])
assert c.data['dockerInspect']['HostConfig']['CpuShares'] == 400
act_restart_pol = c.data['dockerInspect']['HostConfig']['RestartPolicy']
assert act_restart_pol['MaximumRetryCount'] == 2
assert act_restart_pol['Name'] == "on-failure"
actual_devices = c.data['dockerInspect']['HostConfig']['Devices']
assert len(actual_devices) == 1
assert actual_devices[0]['CgroupPermissions'] == "rw"
assert actual_devices[0]['PathOnHost'] == "/dev/null"
assert actual_devices[0]['PathInContainer'] == "/dev/xnull"
@if_docker
def test_docker_newfields(docker_client, super_client):
test_name = 'container_field_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
privileged = True
blkioWeight = 100
cpuPeriod = 100000
cpuQuota = 50000
cpuSetMems = "0"
kernelMemory = 10000000
memory = 10000000
groupAdd = ['root']
memorySwappiness = 50
oomScoreAdj = 500
shmSize = 67108864
tmpfs = {"/run": "rw,noexec,nosuid,size=65536k"}
uts = "host"
ipcMode = "host"
stopSignal = "SIGTERM"
ulimits = [{"name": "cpu", "hard": 100000, "soft": 100000}]
c = docker_client.create_container(name=test_name,
imageUuid=image_uuid,
privileged=privileged,
blkioWeight=blkioWeight,
cpuPeriod=cpuPeriod,
cpuQuota=cpuQuota,
cpuSetMems=cpuSetMems,
kernelMemory=kernelMemory,
groupAdd=groupAdd,
memory=memory,
memorySwappiness=memorySwappiness,
oomScoreAdj=oomScoreAdj,
shmSize=shmSize,
tmpfs=tmpfs,
uts=uts,
ipcMode=ipcMode,
stopSignal=stopSignal,
networkMode='bridge',
ulimits=ulimits)
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
assert c.data['dockerInspect']['HostConfig']['BlkioWeight'] == 100
assert c.data['dockerInspect']['HostConfig']['CpuPeriod'] == 100000
assert c.data['dockerInspect']['HostConfig']['CpuQuota'] == 50000
assert c.data['dockerInspect']['HostConfig']['CpusetMems'] == "0"
assert c.data['dockerInspect']['HostConfig']['KernelMemory'] == 10000000
assert c.data['dockerInspect']['HostConfig']['Memory'] == 10000000
assert c.data['dockerInspect']['HostConfig']['MemorySwappiness'] == 50
assert c.data['dockerInspect']['HostConfig']['GroupAdd'] == ['root']
assert not c.data['dockerInspect']['HostConfig']['OomKillDisable']
assert c.data['dockerInspect']['HostConfig']['OomScoreAdj'] == 500
assert c.data['dockerInspect']['HostConfig']['ShmSize'] == 67108864
run_args = "rw,noexec,nosuid,size=65536k"
assert c.data['dockerInspect']['HostConfig']['Tmpfs'] == {"/run": run_args}
assert c.data['dockerInspect']['HostConfig']['UTSMode'] == 'host'
assert c.data['dockerInspect']['HostConfig']['IpcMode'] == 'host'
host_limits = {"Name": "cpu", "Hard": 100000, "Soft": 100000}
assert c.data['dockerInspect']['HostConfig']['Ulimits'] == [host_limits]
assert c.data['dockerInspect']['Config']['StopSignal'] == 'SIGTERM'
@if_docker_1_12
def test_docker_extra_newfields(docker_client, super_client):
test_name = 'container_field_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
sysctls = {"net.ipv4.ip_forward": "1"}
healthCmd = ["ls"]
healthInterval = 5
healthRetries = 3
healthTimeout = 60
c = docker_client.create_container(name=test_name,
imageUuid=image_uuid,
sysctls=sysctls,
healthCmd=healthCmd,
healthTimeout=healthTimeout,
healthRetries=healthRetries,
healthInterval=healthInterval)
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
host_sysctls = {"net.ipv4.ip_forward": "1"}
assert c.data['dockerInspect']['HostConfig']['Sysctls'] == host_sysctls
assert c.data['dockerInspect']['Config']['Healthcheck']['Test'] == ['ls']
h_interval = c.data['dockerInspect']['Config']['Healthcheck']['Interval']
assert h_interval == 5000000000
h_timeout = c.data['dockerInspect']['Config']['Healthcheck']['Timeout']
assert h_timeout == 60000000000
assert c.data['dockerInspect']['Config']['Healthcheck']['Retries'] == 3
@if_docker
def test_container_milli_cpu_reservation(docker_client, super_client):
test_name = 'container_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
c = docker_client.create_container(name=test_name,
imageUuid=image_uuid,
stdinOpen=True,
tty=True,
command=["true"],
entryPoint=["/bin/sh", "-c"],
networkMode='bridge',
milliCpuReservation=2000,
cpuShares=400)
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
# milliCpuReservation will take precedence over cpuShares and be converted
# to a value that is (milliCpuShares / 1000) * 1024
assert c.data['dockerInspect']['HostConfig']['CpuShares'] == 2048
def get_mounts(resource):
return [x for x in resource.mounts_link() if x.state != 'inactive']
def check_mounts(client, resource, count):
def wait_for_mount_count(res):
m = get_mounts(res)
return len(m) == count
wait_for_condition(client, resource, wait_for_mount_count)
mounts = get_mounts(resource)
return mounts
def volume_cleanup_setup(docker_client, uuid, strategy=None):
labels = {}
if strategy:
labels[VOLUME_CLEANUP_LABEL] = strategy
vol_name = random_str()
c = docker_client.create_container(name="volume_cleanup_test",
imageUuid=uuid,
networkMode='bridge',
dataVolumes=['/tmp/foo',
'%s:/foo' % vol_name],
labels=labels)
c = docker_client.wait_success(c)
if strategy:
assert c.labels[VOLUME_CLEANUP_LABEL] == strategy
mounts = check_mounts(docker_client, c, 2)
v1 = mounts[0].volume()
v2 = mounts[1].volume()
wait_for_condition(docker_client, v1, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v2, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
named_vol = v1 if v1.name == vol_name else v2
unnamed_vol = v1 if v1.name != vol_name else v2
c = docker_client.wait_success(c.stop(timeout=0))
docker_client.delete(c)
docker_client.wait_success(c)
check_mounts(docker_client, c, 0)
return c, named_vol, unnamed_vol
@if_docker
def test_cleanup_volume_strategy(docker_client):
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID)
assert docker_client.wait_success(named_vol).state == 'detached'
assert docker_client.wait_success(unnamed_vol).removed is not None
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID,
strategy='unnamed')
assert docker_client.wait_success(named_vol).state == 'detached'
assert docker_client.wait_success(unnamed_vol).removed is not None
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID,
strategy='none')
assert docker_client.wait_success(named_vol).state == 'detached'
assert docker_client.wait_success(unnamed_vol).state == 'detached'
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID,
strategy='all')
assert docker_client.wait_success(named_vol).removed is not None
assert docker_client.wait_success(unnamed_vol).removed is not None
@if_docker
def test_docker_volume_long(docker_client):
a = 'a' * 200
v = 'tmp:/tmp/{}'.format(a)
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
dataVolumes=[v],
command=['sleep', '42'])
c = docker_client.wait_success(c)
assert c.state == 'running'
vol = c.mounts_link()[0].volume()
vol = docker_client.wait_success(vol)
wait_state(docker_client, vol, 'active')
@if_docker
def test_docker_mount_life_cycle(docker_client):
# Using nginx because it has a baked in volume, which is a good test case
uuid = 'docker:nginx:1.9.0'
bind_mount_uuid = py_uuid.uuid4().hex
bar_host_path = '/tmp/bar%s' % bind_mount_uuid
bar_bind_mount = '%s:/bar' % bar_host_path
c = docker_client.create_container(imageUuid=uuid,
startOnCreate=False,
networkMode='bridge',
dataVolumes=['%s:/foo' % random_str(),
bar_bind_mount])
c = docker_client.wait_success(c)
c = docker_client.wait_success(c.start())
mounts = check_mounts(docker_client, c, 2)
v1 = mounts[0].volume()
v2 = mounts[1].volume()
wait_for_condition(docker_client, v1, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v2, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
c = docker_client.wait_success(c.stop(timeout=0))
assert c.state == 'stopped'
wait_for_condition(docker_client, v1, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v2, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
c = docker_client.wait_success(c.remove())
check_mounts(docker_client, c, 0)
# State can be either detached or removed depending on whether c got purged
assert docker_client.wait_success(v1).state != 'active'
assert docker_client.wait_success(v2).state != 'active'
@if_docker
def test_docker_labels(docker_client, super_client):
# 1.8 broke this behavior where labels would come from the images
# one day maybe they will bring it back.
# image_uuid = 'docker:ranchertest/labelled:v0.1.0'
image_uuid = TEST_IMAGE_UUID
c = docker_client.create_container(name="labels_test",
imageUuid=image_uuid,
networkMode='bridge',
labels={'io.rancher.testlabel.'
'fromapi': 'yes'})
c = docker_client.wait_success(c)
def labels_callback():
labels = c.instanceLabels()
if len(labels) >= 3:
return labels
return None
labels = wait_for(labels_callback)
actual_labels = {}
for l in labels:
actual_labels[l.key] = l.value
sc = super_client.reload(c)
mac_address = sc.nics()[0].macAddress
expected_labels = {
# 'io.rancher.testlabel': 'value1',
# 'io.rancher.testlabel.space': 'value 1',
'io.rancher.testlabel.fromapi': 'yes',
'io.rancher.container.uuid': c.uuid,
'io.rancher.container.name': c.name,
'io.rancher.container.mac_address': mac_address,
}
assert all(item in actual_labels.items()
for item in expected_labels.items())
docker_client.delete(c)
@if_docker
def test_container_odd_fields(super_client, docker_client):
c = docker_client.create_container(pidMode=None,
imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
logConfig={
'driver': None,
'config': None,
})
c = docker_client.wait_success(c)
assert c.state == 'running'
assert c.pidMode is None
assert c.logConfig == {'type': 'logConfig', 'driver': None, 'config': None}
c = super_client.reload(c)
assert c.data.dockerInspect.HostConfig.LogConfig['Type'] == 'json-file'
assert not c.data.dockerInspect.HostConfig.LogConfig['Config']
@if_docker
def test_container_bad_build(super_client, docker_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
build={
'context': None,
'remote': None
})
c = docker_client.wait_success(c)
assert c.state == 'running'
assert c.pidMode is None
assert c.build == {'context': None,
'dockerfile': None,
'forcerm': False,
'nocache': False,
'rm': False,
'remote': None,
'type': 'dockerBuild'}
c = super_client.reload(c)
assert c.data.dockerInspect.Config.Image == TEST_IMAGE
@if_docker
def test_service_link_emu_docker_link(super_client, docker_client):
env_name = random_str()
env = docker_client.create_stack(name=env_name)
env = docker_client.wait_success(env)
assert env.state == "active"
server = docker_client.create_service(name='server', launchConfig={
'networkMode': 'bridge',
'imageUuid': TEST_IMAGE_UUID
}, stackId=env.id)
service = docker_client.create_service(name='client', launchConfig={
'networkMode': 'bridge',
'imageUuid': TEST_IMAGE_UUID
}, stackId=env.id)
service_link = {"serviceId": server.id, "name": "other"}
service.setservicelinks(serviceLinks=[service_link])
server = docker_client.wait_success(server)
service = docker_client.wait_success(service)
server = docker_client.wait_success(server.activate())
assert server.state == 'active'
service = docker_client.wait_success(service.activate())
assert service.state == 'active'
instance = find_one(service.instances)
instance = super_client.reload(instance)
link = find_one(instance.instanceLinks)
target_instance = find_one(server.instances)
assert link.targetInstanceId == target_instance.id
assert link.instanceNames == ['{}-server-1'.format(env_name)]
docker_client.delete(env)
@if_docker
def test_service_links_with_no_ports(docker_client):
env = docker_client.create_stack(name=random_str())
env = docker_client.wait_success(env)
assert env.state == "active"
server = docker_client.create_service(name='server', launchConfig={
'imageUuid': TEST_IMAGE_UUID,
'networkMode': 'bridge',
'stdinOpen': True,
'tty': True,
}, stackId=env.id)
server = docker_client.wait_success(server)
assert server.state == 'inactive'
service = docker_client.create_service(name='client', launchConfig={
'imageUuid': TEST_IMAGE_UUID,
'networkMode': 'bridge',
'stdinOpen': True,
'tty': True,
}, stackId=env.id)
service = docker_client.wait_success(service)
assert service.state == 'inactive'
service_link = {"serviceId": server.id, "name": "bb"}
service.setservicelinks(serviceLinks=[service_link])
server = docker_client.wait_success(server.activate())
assert server.state == 'active'
service = docker_client.wait_success(service.activate())
assert service.state == 'active'
@if_docker
def test_blkio_device_options(super_client, docker_client):
dev_opts = {
'/dev/sda': {
'readIops': 1000,
'writeIops': 2000,
},
'/dev/null': {
'readBps': 3000,
}
}
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode=None,
blkioDeviceOptions=dev_opts)
c = docker_client.wait_success(c)
assert c.state == 'running'
super_c = super_client.reload(c)
hc = super_c.data.dockerInspect['HostConfig']
assert hc['BlkioDeviceReadIOps'] == [{'Path': '/dev/sda', 'Rate': 1000}]
assert hc['BlkioDeviceWriteIOps'] == [{'Path': '/dev/sda', 'Rate': 2000}]
assert hc['BlkioDeviceReadBps'] == [{'Path': '/dev/null', 'Rate': 3000}]
@if_resource_scheduler
def test_port_constraint(docker_client):
# Tests with the above label can only be ran when the external scheduler is
# is enabled. It isn't in CI, so we need to disable these tests by default
# They can (and should) be run locally if working on the scheduler
containers = []
try:
c = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9998:81/tcp']))
containers.append(c)
# try to deploy another container with same public port + protocol
c2 = docker_client.wait_transitioning(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9998:81/tcp']))
assert c2.transitioning == 'error'
assert '9998:81/tcp' in c2.transitioningMessage
assert c2.state == 'error'
containers.append(c2)
# try different public port
c3 = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9999:81/tcp']))
containers.append(c3)
# try different protocol
c4 = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9999:81/udp']))
containers.append(c4)
# UDP is now taken
c5 = docker_client.wait_transitioning(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9999:81/udp']))
assert c5.transitioning == 'error'
assert '9999:81/udp' in c5.transitioningMessage
assert c5.state == 'error'
containers.append(c5)
# try different bind IP
c6 = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.1:9997:81/tcp']))
containers.append(c6)
# Bind IP is now taken
c7 = docker_client.wait_transitioning(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.1:9997:81/tcp']))
assert c7.transitioning == 'error'
assert '127.2.2.1:9997:81/tcp' in c7.transitioningMessage
assert c7.state == 'error'
containers.append(c7)
finally:
for c in containers:
if c is not None:
c = docker_client.wait_success(docker_client.delete(c))
c.purge()
@if_resource_scheduler
def test_conflicting_ports_in_deployment_unit(docker_client):
env = docker_client.create_stack(name=random_str())
env = docker_client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": TEST_IMAGE_UUID, "ports": ['7777:6666']}
secondary_lc = {"imageUuid": TEST_IMAGE_UUID,
"name": "secondary", "ports": ['7777:6666']}
svc = docker_client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
svc = docker_client.wait_success(svc)
assert svc.state == "inactive"
svc = svc.activate()
c = _wait_for_compose_instance_error(docker_client, svc, env)
assert '7777:6666/tcp' in c.transitioningMessage
env.remove()
@if_resource_scheduler
def test_simultaneous_port_allocation(docker_client):
# This test ensures if two containers are allocated simultaneously, only
# one will get the port and the other will fail to allocate.
# By nature, this test is exercise a race condition, so it isn't perfect.
env = docker_client.create_stack(name=random_str())
env = docker_client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"ports": ['5555:6666']}
svc = docker_client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=2)
svc = docker_client.wait_success(svc)
assert svc.state == "inactive"
svc = svc.activate()
c = _wait_for_compose_instance_error(docker_client, svc, env)
assert '5555:6666/tcp' in c.transitioningMessage
@if_resource_scheduler
def test_docker_bindtest_docker_bind_address_address(docker_client,
super_client):
c = docker_client.create_container(name='bindAddrTest',
networkMode='bridge',
imageUuid=TEST_IMAGE_UUID,
ports=['127.0.0.1:89:8999'])
c = docker_client.wait_success(c)
assert c.state == 'running'
c = super_client.reload(c)
bindings = c.data['dockerInspect']['HostConfig']['PortBindings']
assert bindings['8999/tcp'] == [{'HostIp': '127.0.0.1', 'HostPort': '89'}]
c = docker_client.create_container(name='bindAddrTest2',
networkMode='bridge',
imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.2:89:8999'])
c = docker_client.wait_success(c)
assert c.state == 'running'
c = super_client.reload(c)
bindings = c.data['dockerInspect']['HostConfig']['PortBindings']
assert bindings['8999/tcp'] == [{'HostIp': '127.2.2.2', 'HostPort': '89'}]
c = docker_client.create_container(name='bindAddrTest3',
networkMode='bridge',
imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.2:89:8999'])
c = docker_client.wait_transitioning(c)
assert c.transitioning == 'error'
assert '127.2.2.2:89:8999' in c.transitioningMessage
assert c.state == 'error'
def _wait_for_compose_instance_error(client, service, env):
name = env.name + "-" + service.name + "%"
def check():
containers = client.list_container(name_like=name, state='error')
if len(containers) > 0:
return containers[0]
container = wait_for(check)
return container
def _check_path(volume, should_exist, client, super_client, extra_vols=None,
path_to_check=None):
if path_to_check:
path = path_to_check
else:
path = _path_to_volume(volume)
print 'Checking path [%s] for volume [%s].' % (path, volume)
data_vols = ['/var/lib/docker:/host/var/lib/docker', '/tmp:/host/tmp']
if extra_vols:
data_vols.extend(extra_vols)
c = client. \
create_container(name="volume_check" + random_str(),
imageUuid="docker:ranchertest/volume-test:v0.1.0",
networkMode=None,
environment={'TEST_PATH': path},
command='/opt/tools/check_path_exists.sh',
dataVolumes=data_vols)
c = super_client.wait_success(c)
assert c.state == 'running'
c = super_client.wait_success(c.stop())
assert c.state == 'stopped'
code = c.data.dockerInspect.State.ExitCode
# Note that the test in the container is testing to see if the path is a
# directory. Code for the test is here:
# https://github.com/rancher/test-images/tree/master/images/volume-test
if should_exist:
# The exit code of the container should be a 10 if the path existed
assert code == 10
else:
# And 11 if the path did not exist
assert code == 11
c.remove()
def _path_to_volume(volume):
path = volume.uri.replace('file://', '')
mounted_path = re.sub('^.*?/var/lib/docker', '/host/var/lib/docker',
path)
if not mounted_path.startswith('/host/var/lib/docker'):
mounted_path = re.sub('^.*?/tmp', '/host/tmp',
path)
return mounted_path
|
vincent99/cattle
|
tests/integration/cattletest/core/test_docker.py
|
Python
|
apache-2.0
| 46,359
|
import django_filters
from django_filters import rest_framework as filters
from django_rv_apps.apps.believe_his_prophets.models.book import Book
from django_rv_apps.apps.believe_his_prophets.models.bible_read import BibleRead
from django_rv_apps.apps.believe_his_prophets.models.testament import Testament
class BookFilter(django_filters.FilterSet):
testament = filters.ModelChoiceFilter(
queryset=Testament.objects.all())
class Meta:
model = Book
fields = ('id', 'testament',
'book_order')
|
davrv93/creed-en-sus-profetas-backend
|
django_rv_apps/apps/believe_his_prophets_api/views/book/filters.py
|
Python
|
apache-2.0
| 550
|
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demo of the Google CloudSpeech recognizer."""
import argparse
import locale
import logging
from aiy.board import Board, Led
from aiy.cloudspeech import CloudSpeechClient
def get_hints(language_code):
if language_code.startswith('en_'):
return ('turn on the light',
'turn off the light',
'blink the light',
'goodbye')
return None
def locale_language():
language, _ = locale.getdefaultlocale()
return language
def main():
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Assistant service example.')
parser.add_argument('--language', default=locale_language())
args = parser.parse_args()
logging.info('Initializing for language %s...', args.language)
hints = get_hints(args.language)
client = CloudSpeechClient()
with Board() as board:
while True:
if hints:
logging.info('Say something, e.g. %s.' % ', '.join(hints))
else:
logging.info('Say something.')
text = client.recognize(language_code=args.language,
hint_phrases=hints)
if text is None:
logging.info('You said nothing.')
continue
logging.info('You said: "%s"' % text)
text = text.lower()
if 'turn on the light' in text:
board.led.state = Led.ON
elif 'turn off the light' in text:
board.led.state = Led.OFF
elif 'blink the light' in text:
board.led.state = Led.BLINK
elif 'goodbye' in text:
break
if __name__ == '__main__':
main()
|
google/aiyprojects-raspbian
|
src/examples/voice/cloudspeech_demo.py
|
Python
|
apache-2.0
| 2,336
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteExperiment
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3beta1_generated_Experiments_DeleteExperiment_sync]
from google.cloud import dialogflowcx_v3beta1
def sample_delete_experiment():
# Create a client
client = dialogflowcx_v3beta1.ExperimentsClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.DeleteExperimentRequest(
name="name_value",
)
# Make the request
client.delete_experiment(request=request)
# [END dialogflow_v3beta1_generated_Experiments_DeleteExperiment_sync]
|
googleapis/python-dialogflow-cx
|
samples/generated_samples/dialogflow_v3beta1_generated_experiments_delete_experiment_sync.py
|
Python
|
apache-2.0
| 1,446
|
#!/usr/bin/python
#title :gentotsv.py
#description :Script to process impute files .gz and create a csv file
#author :Diego Alvarez
#date :2016-06-05
#python_version :3.5
#==============================================================================
import gzip
import os
import fnmatch
import csv
import sys
import getopt
import time
import linecache
import utils
import config
from multiprocessing import Pool, Process
import multiprocessing
from functools import partial
def script_usage():
print 'gentotsv.py -h<help> -t<threads> -s<sourcedir> -d<destinationdir> -f<samplefile>'
print '---------'
print 'If no parameters are passed, default values are taken from <config.py>'
print 'Default #threads = #processor cores'
print '----------------'
return
def get_gen_file_columns(p_source_dir,p_source_file):
with gzip.open(p_source_dir+p_source_file,'rb') as genfile:
utils.log(logger,"GEN file: "+ p_source_file)
columns=genfile.readline().split()
totalcolumns = len(columns)
utils.log(logger,"Columns in GEN file: "+str(totalcolumns))
genfile.close()
return totalcolumns
def create_sample_file(p_source_dir,p_destination_dir, p_source_file, p_file_type):
utils.log(logger,"Begin - create_sample_file -")
samplecountlines = 0
source_file = utils.get_file_name(str(p_source_file))
with open(p_destination_dir+"SAM_"+source_file+p_file_type, 'wb') as xfile:
utils.log(logger,"Reading file SAMPLE: " + p_source_file)
csvwriter = csv.writer(xfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
with open(p_source_dir+p_source_file,'rb') as samplefile:
INDfilelist = []
for line in samplefile:
samplecountlines=samplecountlines+1
if samplecountlines <= 2:
seq=str(samplecountlines * (-1)).split()
columns=line.split()
csvwriter.writerow(seq+columns)
#Start counting individuals
if samplecountlines > 2:
seq=str(samplecountlines-2).split()
columns=line.split()
col01= columns[0:2] #to create the file ID
csvwriter.writerow(seq+columns)
#Create empty INDIVIDUAL file
INDfilename = create_individuals_file(p_destination_dir, seq[0]+"_"+col01[0]+"_"+col01[1], p_file_type)
#Create list with Individuals file
INDfilelist.append(INDfilename)
samplefile.close()
xfile.close()
utils.log(logger,"SAMPLE file lines: "+ str(samplecountlines))
utils.log(logger,"End - create_sample_file -")
return INDfilelist
def create_individuals_sample_files(p_source_dir,p_destination_dir, p_source_file, p_file_type):
utils.log(logger,"Begin - create_individuals_sample_files -")
samplecountlines = 0
source_file = utils.get_file_name(str(p_source_file))
INDfilelist = []
with open(p_source_dir+p_source_file,'rb') as samplefile:
for line in samplefile:
samplecountlines = samplecountlines + 1
columns = line.split()
if samplecountlines == 1:
headerline = columns[:]
elif samplecountlines == 2:
datatypeline = columns[:]
else:
individualline = samplecountlines - 2
with open(p_destination_dir+"SAM_"+str(individualline)+"_"+str(columns[0])+"_"+str(columns[1])+p_file_type, 'wb') as xfile:
csvwriter = csv.writer(xfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(0, len(columns)):
csvwriter.writerow([headerline[i]]+[datatypeline[i]]+[columns[i]])
#Create empty INDIVIDUAL file
INDfilename = create_individuals_file(p_destination_dir, str(individualline)+"_"+columns[0]+"_"+columns[1], p_file_type)
#Create list with Individuals file
INDfilelist.append(INDfilename)
xfile.close()
samplefile.close()
utils.log(logger,"SAMPLE file lines: "+ str(samplecountlines))
utils.log(logger,"End - create_individuals_sample_files -")
return INDfilelist
def create_snp_file(p_source_dir,p_destination_dir, p_source_file_type, p_dest_file_type):
utils.log(logger,"Begin - Create SNP file -")
filename = p_destination_dir+"SNP"+p_dest_file_type
open(filename, 'w').close()
for file_list in sorted(os.listdir(p_source_dir)):
if fnmatch.fnmatch(file_list,'*'+p_source_file_type):
with gzip.open(p_source_dir+file_list,'rb') as genfile:
sequence=0
gencountlines=0
utils.log(logger,"Reading file GEN: " + file_list)
with open(filename,'ab') as SNPfile:
csvwriter = csv.writer(SNPfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
#readlines() Loads full .gen file into memory and split in lines. To many threads
# or very big files can cause memory overflow.
#for line in genfile.readlines():
for line in genfile: #Read file line by line
gencountlines=gencountlines+1
columns=line.split()
col05=columns[0:5]
source_file = utils.get_file_name(file_list)
sequence=sequence+1
seq=str(sequence).split()
csvwriter.writerow([source_file]+seq+col05)
SNPfile.close()
genfile.close()
utils.log(logger,"End - Create SNP file -")
return
def create_individuals_file(p_destination_dir, p_filename, p_file_type):
filename = p_destination_dir+"IND_"+p_filename+p_file_type
open(filename, 'w').close()
return filename
def convert_cols_to_lines(p_source_dir,p_source_file,p_destination_dir,p_dest_file_list, p_individualsposlist, p_gen_column):
utils.log(logger,"Begin - convert_gen_cols_to_ind_lines - ")
positionindex = p_individualsposlist.index(p_gen_column)
regex = r"^{0}.*{1}$".format(p_destination_dir+"IND_"+str(positionindex+1)+"_",destination_file_type)
p_indfilename = utils.find_file_in_list(p_dest_file_list,regex)
source_file = utils.get_file_name(str(p_source_file))
try:
col = int(p_gen_column)
except:
e = sys.exc_info()[0]
utils.log(logger,e)
#Open individuals file
with open(p_indfilename,'a') as indfile:
utils.log(logger,"Writing IND .tsv file: "+ p_indfilename)
csvwriter = csv.writer(indfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
sequence = 0
with gzip.open(p_source_dir+p_source_file,'rb') as genfile:
for line in genfile: #reads line by line .gen file.
#readlines() loads full .gen file into memory and split in lines. To many threads
# or very big files can cause memory overflow.
#for line in genfile.readlines():
sequence=sequence+1
seq=str(sequence).split()
columns=line.split()
csvwriter.writerow([source_file]+seq+columns[col:col+3])
indfile.close()
utils.log(logger,"Lines in source file: "+ str(sequence))
genfile.close()
utils.log(logger,"End - convert_gen_cols_to_ind_lines - ")
return
def update_individuals_file(p_source_dir,p_source_file_type,p_destination_dir,p_dest_file_list):
utils.log(logger,"Begin - update_individuals_file -")
for file_list in sorted(os.listdir(p_source_dir)):
if fnmatch.fnmatch(file_list,'*'+p_source_file_type):
if __name__ =='__main__':
#with gzip.open(p_source_dir+file_list,'rb') as genfile:
#read only first line
genfile = gzip.open(p_source_dir+file_list,'rb')
columns=genfile.readline().split()
genfile_columns = len(columns)
genfile.close()
utils.log(logger, "numthreads: "+str(numthreads))
pool = Pool(int(numthreads))
utils.log(logger,"Reading GEN file: "+ file_list)
index =5
individualpos = 0
individualsposlist = []
#create list with all individuals position
while(index < genfile_columns):
individualsposlist.append(index)
index = index + 3
func = partial(convert_cols_to_lines,p_source_dir,file_list,p_destination_dir,p_dest_file_list,individualsposlist)
pool.map_async(func,individualsposlist).get(9999999)
utils.log(logger,"End - update_individuals_file -")
return
###########################################################################################################
###############################################Main function###############################################
###########################################################################################################
try:
print 'ARGV :', sys.argv[1:]
opts, args = getopt.getopt(sys.argv[1:], 'ht:s:d:f:', ['help=','threads=','sourcedir=','destinationdir=','samplefile='])
print 'OPTIONS :', opts
#Initialization
help=0
samplecount=0
samplecounttotal=0
gencount=0
gencounttotal=0
poscount=0
#Get default values
source_dir = config.source_dir_oxford
source_file_type = config.source_file_type_oxford
destination_dir = config.destination_dir_oxford
destination_file_type =config.destination_file_type_oxford
sample_file = config.sample_file_oxford
sample_file_format = config.sample_file_format_oxford
numthreads = multiprocessing.cpu_count()
#Pass the script name to Log
logger=utils.create_logger("gentotsv")
start_time = time.time()
print "Start time: "+time.ctime()
utils.log(logger, "Start time: "+time.ctime())
for opt,arg in opts:
if opt=='-h':
help = 1
script_usage()
elif opt=='-t':
global numtreads
numthreads = arg
elif opt=='-s':
source_dir = arg
elif opt=='-d':
destination_dir = arg
elif opt=='-f':
sample_file = arg
if help == 0:
print "Number of threads: "+str(numthreads)
utils.log(logger, "Number of threads: "+str(numthreads))
print "Sample file format: "+sample_file_format
utils.log(logger, "Sample file format: "+sample_file_format)
print "Source directory: "+source_dir
utils.log(logger, "Source directory: "+source_dir)
print "Destination directory: "+destination_dir
utils.log(logger, "Destination directory: "+destination_dir)
print "Sample file name: "+sample_file
utils.log(logger, "Sample file name: "+sample_file)
if not os.path.exists(source_dir):
utils.log(logger, "EXCEPTION - Source directory "+source_dir+" does not exist")
sys.exit("EXCEPTION - Source directory "+source_dir+" does not exist")
#Create destination directory
try:
os.makedirs(destination_dir)
except OSError as err:
pass
if os.path.isfile(source_dir+sample_file):
#----------------------
#Convert SAMPLE to TSV
# 1 file = 1 individual
#----------------------
INDfilelist = create_individuals_sample_files(source_dir,destination_dir,sample_file,destination_file_type)
# 2 threads. Parallel processing.
if __name__=='__main__':
#----------------------
#Create SNP file with 1st 5 columns of GEN file
#----------------------
#create_snp_file(source_dir,destination_dir,source_file_type, destination_file_type)
p1 = Process(target=create_snp_file, args=(source_dir,destination_dir,source_file_type, destination_file_type))
p1.start()
#----------------------
#Convert GEN to TSV
# 1 file = 1 individual
#----------------------
#update_individuals_file(source_dir,source_file_type,destination_dir,INDfilelist)
p2 = Process(target=update_individuals_file, args=(source_dir,source_file_type,destination_dir,INDfilelist))
p2.start()
p1.join()
p2.join()
else:
utils.log(logger," EXCEPTION - Sample File: " + sample_file + " does not exists")
sys.exit("Sample File: " + sample_file + " does not exists")
print time.ctime()
utils.log(logger, "End time: "+time.ctime())
except getopt.GetoptError as err:
print str(err)
utils.log(logger,str(err))
sys.exit(2)
except:
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
utils.log(logger,'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
sys.exit(2)
|
bigdatafoundation/qngene
|
Export_gen_files/gentotsv.py
|
Python
|
apache-2.0
| 14,598
|
# Copyright (c) 2015 Alex Meade
# Copyright (c) 2015 Rushil Chugh
# Copyright (c) 2015 Navneet Singh
# Copyright (c) 2015 Yogesh Kshirsagar
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import math
import socket
import time
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder import utils as cinder_utils
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp.eseries import exception as eseries_exc
from cinder.volume.drivers.netapp.eseries import host_mapper
from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(na_opts.netapp_basicauth_opts)
CONF.register_opts(na_opts.netapp_connection_opts)
CONF.register_opts(na_opts.netapp_eseries_opts)
CONF.register_opts(na_opts.netapp_transport_opts)
CONF.register_opts(na_opts.netapp_san_opts)
class NetAppESeriesLibrary(object):
"""Executes commands relating to Volumes."""
VERSION = "1.0.0"
REQUIRED_FLAGS = ['netapp_server_hostname', 'netapp_controller_ips',
'netapp_login', 'netapp_password',
'netapp_storage_pools']
SLEEP_SECS = 5
HOST_TYPES = {'aix': 'AIX MPIO',
'avt': 'AVT_4M',
'factoryDefault': 'FactoryDefault',
'hpux': 'HP-UX TPGS',
'linux_atto': 'LnxTPGSALUA',
'linux_dm_mp': 'LnxALUA',
'linux_mpp_rdac': 'Linux',
'linux_pathmanager': 'LnxTPGSALUA_PM',
'macos': 'MacTPGSALUA',
'ontap': 'ONTAP',
'svc': 'SVC',
'solaris_v11': 'SolTPGSALUA',
'solaris_v10': 'Solaris',
'vmware': 'VmwTPGSALUA',
'windows':
'Windows 2000/Server 2003/Server 2008 Non-Clustered',
'windows_atto': 'WinTPGSALUA',
'windows_clustered':
'Windows 2000/Server 2003/Server 2008 Clustered'
}
# NOTE(ameade): This maps what is reported by the e-series api to a
# consistent set of values that are reported by all NetApp drivers
# to the cinder scheduler.
SSC_DISK_TYPE_MAPPING = {
'scsi': 'SCSI',
'fibre': 'FCAL',
'sas': 'SAS',
'sata': 'SATA',
}
SSC_UPDATE_INTERVAL = 60 # seconds
WORLDWIDENAME = 'worldWideName'
DEFAULT_HOST_TYPE = 'linux_dm_mp'
def __init__(self, driver_name, driver_protocol="iSCSI",
configuration=None, **kwargs):
self.configuration = configuration
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(
na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(na_opts.netapp_eseries_opts)
self.configuration.append_config_values(na_opts.netapp_san_opts)
self.lookup_service = fczm_utils.create_lookup_service()
self._backend_name = self.configuration.safe_get(
"volume_backend_name") or "NetApp_ESeries"
self.driver_name = driver_name
self.driver_protocol = driver_protocol
self._stats = {}
self._ssc_stats = {}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
self._client = self._create_rest_client(self.configuration)
self._check_mode_get_or_register_storage_system()
if self.configuration.netapp_enable_multiattach:
self._ensure_multi_attach_host_group_exists()
def _create_rest_client(self, configuration):
port = configuration.netapp_server_port
scheme = configuration.netapp_transport_type.lower()
if port is None:
if scheme == 'http':
port = 8080
elif scheme == 'https':
port = 8443
return client.RestClient(
scheme=scheme,
host=configuration.netapp_server_hostname,
port=port,
service_path=configuration.netapp_webservice_path,
username=configuration.netapp_login,
password=configuration.netapp_password)
def _start_periodic_tasks(self):
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc_info)
ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL)
def check_for_setup_error(self):
self._check_host_type()
self._check_multipath()
self._check_storage_system()
self._start_periodic_tasks()
def _check_host_type(self):
host_type = (self.configuration.netapp_host_type
or self.DEFAULT_HOST_TYPE)
self.host_type = self.HOST_TYPES.get(host_type)
if not self.host_type:
raise exception.NetAppDriverException(
_('Configured host type is not supported.'))
def _check_multipath(self):
if not self.configuration.use_multipath_for_image_xfer:
LOG.warning(_LW('Production use of "%(backend)s" backend requires '
'the Cinder controller to have multipathing '
'properly set up and the configuration option '
'"%(mpflag)s" to be set to "True".'),
{'backend': self._backend_name,
'mpflag': 'use_multipath_for_image_xfer'})
def _ensure_multi_attach_host_group_exists(self):
try:
host_group = self._client.get_host_group_by_name(
utils.MULTI_ATTACH_HOST_GROUP_NAME)
LOG.info(_LI("The multi-attach E-Series host group '%(label)s' "
"already exists with clusterRef %(clusterRef)s"),
host_group)
except exception.NotFound:
host_group = self._client.create_host_group(
utils.MULTI_ATTACH_HOST_GROUP_NAME)
LOG.info(_LI("Created multi-attach E-Series host group %(label)s "
"with clusterRef %(clusterRef)s"), host_group)
def _check_mode_get_or_register_storage_system(self):
"""Does validity checks for storage system registry and health."""
def _resolve_host(host):
try:
ip = na_utils.resolve_hostname(host)
return ip
except socket.gaierror as e:
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'),
{'host': host, 'e': e})
raise exception.NoValidHost(
_("Controller IP '%(host)s' could not be resolved: %(e)s.")
% {'host': host, 'e': e})
ips = self.configuration.netapp_controller_ips
ips = [i.strip() for i in ips.split(",")]
ips = [x for x in ips if _resolve_host(x)]
host = na_utils.resolve_hostname(
self.configuration.netapp_server_hostname)
if host in ips:
LOG.info(_LI('Embedded mode detected.'))
system = self._client.list_storage_systems()[0]
else:
LOG.info(_LI('Proxy mode detected.'))
system = self._client.register_storage_system(
ips, password=self.configuration.netapp_sa_password)
self._client.set_system_id(system.get('id'))
def _check_storage_system(self):
"""Checks whether system is registered and has good status."""
try:
system = self._client.list_storage_system()
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
LOG.info(_LI("System with controller addresses [%s] is not "
"registered with web service."),
self.configuration.netapp_controller_ips)
password_not_in_sync = False
if system.get('status', '').lower() == 'passwordoutofsync':
password_not_in_sync = True
new_pwd = self.configuration.netapp_sa_password
self._client.update_stored_system_password(new_pwd)
time.sleep(self.SLEEP_SECS)
sa_comm_timeout = 60
comm_time = 0
while True:
system = self._client.list_storage_system()
status = system.get('status', '').lower()
# wait if array not contacted or
# password was not in sync previously.
if ((status == 'nevercontacted') or
(password_not_in_sync and status == 'passwordoutofsync')):
LOG.info(_LI('Waiting for web service array communication.'))
time.sleep(self.SLEEP_SECS)
comm_time = comm_time + self.SLEEP_SECS
if comm_time >= sa_comm_timeout:
msg = _("Failure in communication between web service and"
" array. Waited %s seconds. Verify array"
" configuration parameters.")
raise exception.NetAppDriverException(msg %
sa_comm_timeout)
else:
break
msg_dict = {'id': system.get('id'), 'status': status}
if (status == 'passwordoutofsync' or status == 'notsupported' or
status == 'offline'):
raise exception.NetAppDriverException(
_("System %(id)s found with bad status - "
"%(status)s.") % msg_dict)
LOG.info(_LI("System %(id)s has %(status)s status."), msg_dict)
return True
def _get_volume(self, uid):
label = utils.convert_uuid_to_es_fmt(uid)
return self._get_volume_with_label_wwn(label)
def _get_volume_with_label_wwn(self, label=None, wwn=None):
"""Searches volume with label or wwn or both."""
if not (label or wwn):
raise exception.InvalidInput(_('Either volume label or wwn'
' is required as input.'))
wwn = wwn.replace(':', '').upper() if wwn else None
eseries_volume = None
for vol in self._client.list_volumes():
if label and vol.get('label') != label:
continue
if wwn and vol.get(self.WORLDWIDENAME).upper() != wwn:
continue
eseries_volume = vol
break
if not eseries_volume:
raise KeyError()
return eseries_volume
def _get_snapshot_group_for_snapshot(self, snapshot_id):
label = utils.convert_uuid_to_es_fmt(snapshot_id)
for group in self._client.list_snapshot_groups():
if group['label'] == label:
return group
msg = _("Specified snapshot group with label %s could not be found.")
raise exception.NotFound(msg % label)
def _get_latest_image_in_snapshot_group(self, snapshot_id):
group = self._get_snapshot_group_for_snapshot(snapshot_id)
images = self._client.list_snapshot_images()
if images:
filtered_images = filter(lambda img: (img['pitGroupRef'] ==
group['pitGroupRef']),
images)
sorted_imgs = sorted(filtered_images, key=lambda x: x[
'pitTimestamp'])
return sorted_imgs[0]
msg = _("No snapshot image found in snapshot group %s.")
raise exception.NotFound(msg % group['label'])
def _is_volume_containing_snaps(self, label):
"""Checks if volume contains snapshot groups."""
vol_id = utils.convert_es_fmt_to_uuid(label)
for snap in self._client.list_snapshot_groups():
if snap['baseVolume'] == vol_id:
return True
return False
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
eseries_volume = self._get_volume(volume['name_id'])
storage_pool = self._client.get_storage_pool(
eseries_volume['volumeGroupRef'])
if storage_pool:
return storage_pool.get('label')
def create_volume(self, volume):
"""Creates a volume."""
LOG.debug('create_volume on %s', volume['host'])
# get E-series pool label as pool name
eseries_pool_label = volume_utils.extract_host(volume['host'],
level='pool')
if eseries_pool_label is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['name_id'])
# get size of the requested volume creation
size_gb = int(volume['size'])
self._create_volume(eseries_pool_label,
eseries_volume_label,
size_gb)
def _create_volume(self, eseries_pool_label, eseries_volume_label,
size_gb):
"""Creates volume with given label and size."""
if self.configuration.netapp_enable_multiattach:
volumes = self._client.list_volumes()
# NOTE(ameade): Ensure we do not create more volumes than we could
# map to the multi attach ESeries host group.
if len(volumes) > utils.MAX_LUNS_PER_HOST_GROUP:
msg = (_("Cannot create more than %(req)s volumes on the "
"ESeries array when 'netapp_enable_multiattach' is "
"set to true.") %
{'req': utils.MAX_LUNS_PER_HOST_GROUP})
raise exception.NetAppDriverException(msg)
target_pool = None
pools = self._get_storage_pools()
for pool in pools:
if pool["label"] == eseries_pool_label:
target_pool = pool
break
if not target_pool:
msg = _("Pools %s does not exist")
raise exception.NetAppDriverException(msg % eseries_pool_label)
try:
vol = self._client.create_volume(target_pool['volumeGroupRef'],
eseries_volume_label, size_gb)
LOG.info(_LI("Created volume with "
"label %s."), eseries_volume_label)
except exception.NetAppDriverException as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating volume. Msg - %s."), e)
return vol
def _schedule_and_create_volume(self, label, size_gb):
"""Creates volume with given label and size."""
avl_pools = self._get_sorted_available_storage_pools(size_gb)
for pool in avl_pools:
try:
vol = self._client.create_volume(pool['volumeGroupRef'],
label, size_gb)
LOG.info(_LI("Created volume with label %s."), label)
return vol
except exception.NetAppDriverException as e:
LOG.error(_LE("Error creating volume. Msg - %s."), e)
msg = _("Failure creating volume %s.")
raise exception.NetAppDriverException(msg % label)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
label = utils.convert_uuid_to_es_fmt(volume['id'])
size = volume['size']
dst_vol = self._schedule_and_create_volume(label, size)
try:
src_vol = None
src_vol = self._create_snapshot_volume(snapshot['id'])
self._copy_volume_high_prior_readonly(src_vol, dst_vol)
LOG.info(_LI("Created volume with label %s."), label)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
self._client.delete_volume(dst_vol['volumeRef'])
finally:
if src_vol:
try:
self._client.delete_snapshot_volume(src_vol['id'])
except exception.NetAppDriverException as e:
LOG.error(_LE("Failure deleting snap vol. Error: %s."), e)
else:
LOG.warning(_LW("Snapshot volume not found."))
def _create_snapshot_volume(self, snapshot_id):
"""Creates snapshot volume for given group with snapshot_id."""
group = self._get_snapshot_group_for_snapshot(snapshot_id)
LOG.debug("Creating snap vol for group %s", group['label'])
image = self._get_latest_image_in_snapshot_group(snapshot_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
capacity = int(image['pitCapacity']) / units.Gi
storage_pools = self._get_sorted_available_storage_pools(capacity)
s_id = storage_pools[0]['volumeGroupRef']
return self._client.create_snapshot_volume(image['pitRef'], label,
group['baseVolume'], s_id)
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
"""Copies src volume to dest volume."""
LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s."),
{'src': src_vol['label'], 'dst': dst_vol['label']})
try:
job = None
job = self._client.create_volume_copy_job(src_vol['id'],
dst_vol['volumeRef'])
while True:
j_st = self._client.list_vol_copy_job(job['volcopyRef'])
if (j_st['status'] == 'inProgress' or j_st['status'] ==
'pending' or j_st['status'] == 'unknown'):
time.sleep(self.SLEEP_SECS)
continue
if j_st['status'] == 'failed' or j_st['status'] == 'halted':
LOG.error(_LE("Vol copy job status %s."), j_st['status'])
raise exception.NetAppDriverException(
_("Vol copy job for dest %s failed.") %
dst_vol['label'])
LOG.info(_LI("Vol copy job completed for dest %s."),
dst_vol['label'])
break
finally:
if job:
try:
self._client.delete_vol_copy_job(job['volcopyRef'])
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting "
"job %s."), job['volcopyRef'])
else:
LOG.warning(_LW('Volume copy job for src vol %s not found.'),
src_vol['id'])
LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
snapshot = {'id': uuid.uuid4(), 'volume_id': src_vref['id'],
'volume': src_vref}
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
finally:
try:
self.delete_snapshot(snapshot)
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting temp snapshot %s."),
snapshot['id'])
def delete_volume(self, volume):
"""Deletes a volume."""
try:
vol = self._get_volume(volume['name_id'])
self._client.delete_volume(vol['volumeRef'])
except exception.NetAppDriverException:
LOG.warning(_LI("Volume %s already deleted."), volume['id'])
return
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
snap_grp, snap_image = None, None
snapshot_name = utils.convert_uuid_to_es_fmt(snapshot['id'])
os_vol = snapshot['volume']
vol = self._get_volume(os_vol['name_id'])
vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi
pools = self._get_sorted_available_storage_pools(vol_size_gb)
try:
snap_grp = self._client.create_snapshot_group(
snapshot_name, vol['volumeRef'], pools[0]['volumeGroupRef'])
snap_image = self._client.create_snapshot_image(
snap_grp['pitGroupRef'])
LOG.info(_LI("Created snap grp with label %s."), snapshot_name)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
if snap_image is None and snap_grp:
self.delete_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
snap_grp = self._get_snapshot_group_for_snapshot(snapshot['id'])
except exception.NotFound:
LOG.warning(_LW("Snapshot %s already deleted."), snapshot['id'])
return
self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
def map_volume_to_host(self, volume, eseries_volume, initiators):
"""Ensures the specified initiator has access to the volume."""
existing_maps = self._client.get_volume_mappings_for_volume(
eseries_volume)
host = self._get_or_create_host(initiators, self.host_type)
# There can only be one or zero mappings on a volume in E-Series
current_map = existing_maps[0] if existing_maps else None
if self.configuration.netapp_enable_multiattach and current_map:
self._ensure_multi_attach_host_group_exists()
mapping = host_mapper.map_volume_to_multiple_hosts(self._client,
volume,
eseries_volume,
host,
current_map)
else:
mapping = host_mapper.map_volume_to_single_host(
self._client, volume, eseries_volume, host, current_map,
self.configuration.netapp_enable_multiattach)
return mapping
def initialize_connection_fc(self, volume, connector):
"""Initializes the connection and returns connection info.
Assigns the specified volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '500a098280feeba5',
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5'],
'21000024ff406cc2': ['500a098280feeba5']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['500a098280feeba5', '500a098290feeba5',
'500a098190feeba5', '500a098180feeba5'],
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5',
'500a098290feeba5'],
'21000024ff406cc2': ['500a098190feeba5',
'500a098180feeba5']
}
}
}
"""
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
eseries_vol = self._get_volume(volume['name_id'])
mapping = self.map_volume_to_host(volume, eseries_vol,
initiators)
lun_id = mapping['lun']
initiator_info = self._build_initiator_target_map_fc(connector)
target_wwpns, initiator_target_map, num_paths = initiator_info
if target_wwpns:
msg = ("Successfully fetched target details for LUN %(id)s "
"and initiator(s) %(initiators)s.")
msg_fmt = {'id': volume['id'], 'initiators': initiators}
LOG.debug(msg, msg_fmt)
else:
msg = _('Failed to get LUN target details for the LUN %s.')
raise exception.VolumeBackendAPIException(data=msg % volume['id'])
target_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_discovered': True,
'target_lun': int(lun_id),
'target_wwn': target_wwpns,
'access_mode': 'rw',
'initiator_target_map': initiator_target_map}}
return target_info
def terminate_connection_fc(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:returns: data - the target_wwns and initiator_target_map if the
zone is to be removed, otherwise the same map with
an empty dict for the 'data' key
"""
eseries_vol = self._get_volume(volume['name_id'])
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
host = self._get_host_with_matching_port(initiators)
mappings = eseries_vol.get('listOfMappings', [])
# There can only be one or zero mappings on a volume in E-Series
mapping = mappings[0] if mappings else None
if not mapping:
raise eseries_exc.VolumeNotMapped(volume_id=volume['id'],
host=host['label'])
host_mapper.unmap_volume_from_host(self._client, volume, host, mapping)
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if len(self._client.get_volume_mappings_for_host(
host['hostRef'])) == 0:
# No more exports for this host, so tear down zone.
LOG.info(_LI("Need to remove FC Zone, building initiator "
"target map."))
initiator_info = self._build_initiator_target_map_fc(connector)
target_wwpns, initiator_target_map, num_paths = initiator_info
info['data'] = {'target_wwn': target_wwpns,
'initiator_target_map': initiator_target_map}
return info
def _build_initiator_target_map_fc(self, connector):
"""Build the target_wwns and the initiator target map."""
# get WWPNs from controller and strip colons
all_target_wwpns = self._client.list_target_wwpns()
all_target_wwpns = [six.text_type(wwpn).replace(':', '')
for wwpn in all_target_wwpns]
target_wwpns = []
init_targ_map = {}
num_paths = 0
if self.lookup_service:
# Use FC SAN lookup to determine which ports are visible.
dev_map = self.lookup_service.get_device_mapping_from_network(
connector['wwpns'],
all_target_wwpns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
target_wwpns += fabric['target_port_wwn_list']
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
for target in init_targ_map[initiator]:
num_paths += 1
target_wwpns = list(set(target_wwpns))
else:
initiator_wwns = connector['wwpns']
target_wwpns = all_target_wwpns
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwpns
return target_wwpns, init_targ_map, num_paths
def initialize_connection_iscsi(self, volume, connector):
"""Allow connection to connector and return connection info."""
initiator_name = connector['initiator']
eseries_vol = self._get_volume(volume['name_id'])
mapping = self.map_volume_to_host(volume, eseries_vol,
[initiator_name])
lun_id = mapping['lun']
msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
LOG.debug("Mapped volume %(id)s to the initiator %(initiator_name)s.",
msg_fmt)
iscsi_details = self._get_iscsi_service_details()
iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol,
iscsi_details)
LOG.debug("Successfully fetched target details for volume %(id)s and "
"initiator %(initiator_name)s.", msg_fmt)
iqn = iscsi_portal['iqn']
address = iscsi_portal['ip']
port = iscsi_portal['tcp_port']
properties = na_utils.get_iscsi_connection_properties(lun_id, volume,
iqn, address,
port)
return properties
def _get_iscsi_service_details(self):
"""Gets iscsi iqn, ip and port information."""
ports = []
hw_inventory = self._client.list_hardware_inventory()
iscsi_ports = hw_inventory.get('iscsiPorts')
if iscsi_ports:
for port in iscsi_ports:
if (port.get('ipv4Enabled') and port.get('iqn') and
port.get('ipv4Data') and
port['ipv4Data'].get('ipv4AddressData') and
port['ipv4Data']['ipv4AddressData']
.get('ipv4Address') and port['ipv4Data']
['ipv4AddressData'].get('configState')
== 'configured'):
iscsi_det = {}
iscsi_det['ip'] =\
port['ipv4Data']['ipv4AddressData']['ipv4Address']
iscsi_det['iqn'] = port['iqn']
iscsi_det['tcp_port'] = port.get('tcpListenPort')
iscsi_det['controller'] = port.get('controllerId')
ports.append(iscsi_det)
if not ports:
msg = _('No good iscsi portals found for %s.')
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
return ports
def _get_iscsi_portal_for_vol(self, volume, portals, anyController=True):
"""Get the iscsi portal info relevant to volume."""
for portal in portals:
if portal.get('controller') == volume.get('currentManager'):
return portal
if anyController and portals:
return portals[0]
msg = _('No good iscsi portal found in supplied list for %s.')
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
def _get_or_create_host(self, port_ids, host_type):
"""Fetch or create a host by given port."""
try:
host = self._get_host_with_matching_port(port_ids)
ht_def = self._get_host_type_definition(host_type)
if host.get('hostTypeIndex') != ht_def.get('index'):
try:
host = self._client.update_host_type(
host['hostRef'], ht_def)
except exception.NetAppDriverException as e:
LOG.warning(_LW("Unable to update host type for host with "
"label %(l)s. %(e)s"),
{'l': host['label'], 'e': e.msg})
return host
except exception.NotFound as e:
LOG.warning(_LW("Message - %s."), e.msg)
return self._create_host(port_ids, host_type)
def _get_host_with_matching_port(self, port_ids):
"""Gets or creates a host with given port id."""
# Remove any extra colons
port_ids = [six.text_type(wwpn).replace(':', '')
for wwpn in port_ids]
hosts = self._client.list_hosts()
for port_id in port_ids:
for host in hosts:
if host.get('hostSidePorts'):
ports = host.get('hostSidePorts')
for port in ports:
address = port.get('address').upper().replace(':', '')
if address == port_id.upper():
return host
msg = _("Host with ports %(ports)s not found.")
raise exception.NotFound(msg % {'ports': port_ids})
def _create_host(self, port_ids, host_type, host_group=None):
"""Creates host on system with given initiator as port_id."""
LOG.info(_LI("Creating host with ports %s."), port_ids)
host_label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
host_type = self._get_host_type_definition(host_type)
port_type = self.driver_protocol.lower()
return self._client.create_host_with_ports(host_label,
host_type,
port_ids,
group_id=host_group,
port_type=port_type)
def _get_host_type_definition(self, host_type):
"""Gets supported host type if available on storage system."""
host_types = self._client.list_host_types()
for ht in host_types:
if ht.get('name', 'unknown').lower() == host_type.lower():
return ht
raise exception.NotFound(_("Host type %s not supported.") % host_type)
def terminate_connection_iscsi(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
eseries_vol = self._get_volume(volume['name_id'])
initiator = connector['initiator']
host = self._get_host_with_matching_port([initiator])
mappings = eseries_vol.get('listOfMappings', [])
# There can only be one or zero mappings on a volume in E-Series
mapping = mappings[0] if mappings else None
if not mapping:
raise eseries_exc.VolumeNotMapped(volume_id=volume['id'],
host=host['label'])
host_mapper.unmap_volume_from_host(self._client, volume, host, mapping)
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service."""
if refresh:
if not self._ssc_stats:
self._update_ssc_info()
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Update volume statistics."""
LOG.debug("Updating volume stats.")
data = dict()
data["volume_backend_name"] = self._backend_name
data["vendor_name"] = "NetApp"
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.driver_protocol
data["pools"] = []
for storage_pool in self._get_storage_pools():
cinder_pool = {}
cinder_pool["pool_name"] = storage_pool.get("label")
cinder_pool["QoS_support"] = False
cinder_pool["reserved_percentage"] = 0
tot_bytes = int(storage_pool.get("totalRaidedSpace", 0))
used_bytes = int(storage_pool.get("usedSpace", 0))
cinder_pool["free_capacity_gb"] = ((tot_bytes - used_bytes) /
units.Gi)
cinder_pool["total_capacity_gb"] = tot_bytes / units.Gi
pool_ssc_stats = self._ssc_stats.get(
storage_pool["volumeGroupRef"])
if pool_ssc_stats:
cinder_pool.update(pool_ssc_stats)
data["pools"].append(cinder_pool)
self._stats = data
self._garbage_collect_tmp_vols()
@cinder_utils.synchronized("netapp_update_ssc_info", external=False)
def _update_ssc_info(self):
"""Periodically runs to update ssc information from the backend.
The self._ssc_stats attribute is updated with the following format.
{<volume_group_ref> : {<ssc_key>: <ssc_value>}}
"""
LOG.info(_LI("Updating storage service catalog information for "
"backend '%s'"), self._backend_name)
self._ssc_stats = \
self._update_ssc_disk_encryption(self._get_storage_pools())
self._ssc_stats = \
self._update_ssc_disk_types(self._get_storage_pools())
def _update_ssc_disk_types(self, volume_groups):
"""Updates the given ssc dictionary with new disk type information.
:param volume_groups: The volume groups this driver cares about
"""
ssc_stats = copy.deepcopy(self._ssc_stats)
all_disks = self._client.list_drives()
relevant_disks = filter(lambda x: x.get('currentVolumeGroupRef') in
volume_groups, all_disks)
for drive in relevant_disks:
current_vol_group = drive.get('currentVolumeGroupRef')
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
if drive.get("driveMediaType") == 'ssd':
ssc_stats[current_vol_group]['netapp_disk_type'] = 'SSD'
else:
disk_type = drive.get('interfaceType').get('driveType')
ssc_stats[current_vol_group]['netapp_disk_type'] = \
self.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')
return ssc_stats
def _update_ssc_disk_encryption(self, volume_groups):
"""Updates the given ssc dictionary with new disk encryption information.
:param volume_groups: The volume groups this driver cares about
"""
ssc_stats = copy.deepcopy(self._ssc_stats)
all_pools = self._client.list_storage_pools()
relevant_pools = filter(lambda x: x.get('volumeGroupRef') in
volume_groups, all_pools)
for pool in relevant_pools:
current_vol_group = pool.get('volumeGroupRef')
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
ssc_stats[current_vol_group]['netapp_disk_encryption'] = 'true' \
if pool['securityType'] == 'enabled' else 'false'
return ssc_stats
def _get_storage_pools(self):
conf_enabled_pools = []
for value in self.configuration.netapp_storage_pools.split(','):
if value:
conf_enabled_pools.append(value.strip().lower())
filtered_pools = []
storage_pools = self._client.list_storage_pools()
for storage_pool in storage_pools:
# Check if pool can be used
if (storage_pool.get('raidLevel') == 'raidDiskPool'
and storage_pool['label'].lower() in conf_enabled_pools):
filtered_pools.append(storage_pool)
return filtered_pools
def _get_sorted_available_storage_pools(self, size_gb):
"""Returns storage pools sorted on available capacity."""
size = size_gb * units.Gi
sorted_pools = sorted(self._get_storage_pools(), key=lambda x:
(int(x.get('totalRaidedSpace', 0))
- int(x.get('usedSpace', 0))), reverse=True)
avl_pools = filter(lambda x: ((int(x.get('totalRaidedSpace', 0)) -
int(x.get('usedSpace', 0)) >= size)),
sorted_pools)
if not avl_pools:
LOG.warning(_LW("No storage pool found with available capacity "
"%s."), size_gb)
return avl_pools
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
stage_1, stage_2 = 0, 0
src_vol = self._get_volume(volume['name_id'])
src_label = src_vol['label']
stage_label = 'tmp-%s' % utils.convert_uuid_to_es_fmt(uuid.uuid4())
extend_vol = {'id': uuid.uuid4(), 'size': new_size}
self.create_cloned_volume(extend_vol, volume)
new_vol = self._get_volume(extend_vol['id'])
try:
stage_1 = self._client.update_volume(src_vol['id'], stage_label)
stage_2 = self._client.update_volume(new_vol['id'], src_label)
new_vol = stage_2
LOG.info(_LI('Extended volume with label %s.'), src_label)
except exception.NetAppDriverException:
if stage_1 == 0:
with excutils.save_and_reraise_exception():
self._client.delete_volume(new_vol['id'])
if stage_2 == 0:
with excutils.save_and_reraise_exception():
self._client.update_volume(src_vol['id'], src_label)
self._client.delete_volume(new_vol['id'])
def _garbage_collect_tmp_vols(self):
"""Removes tmp vols with no snapshots."""
try:
if not na_utils.set_safe_attr(self, 'clean_job_running', True):
LOG.warning(_LW('Returning as clean tmp '
'vol job already running.'))
return
for vol in self._client.list_volumes():
label = vol['label']
if (label.startswith('tmp-') and
not self._is_volume_containing_snaps(label)):
try:
self._client.delete_volume(vol['volumeRef'])
except exception.NetAppDriverException as e:
LOG.debug("Error deleting vol with label %s: %s",
(label, e))
finally:
na_utils.set_safe_attr(self, 'clean_job_running', False)
@cinder_utils.synchronized('manage_existing')
def manage_existing(self, volume, existing_ref):
"""Brings an existing storage object under Cinder management."""
vol = self._get_existing_vol_with_manage_ref(volume, existing_ref)
label = utils.convert_uuid_to_es_fmt(volume['id'])
if label == vol['label']:
LOG.info(_LI("Volume with given ref %s need not be renamed during"
" manage operation."), existing_ref)
managed_vol = vol
else:
managed_vol = self._client.update_volume(vol['id'], label)
LOG.info(_LI("Manage operation completed for volume with new label"
" %(label)s and wwn %(wwn)s."),
{'label': label, 'wwn': managed_vol[self.WORLDWIDENAME]})
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
vol = self._get_existing_vol_with_manage_ref(volume, existing_ref)
return int(math.ceil(float(vol['capacity']) / units.Gi))
def _get_existing_vol_with_manage_ref(self, volume, existing_ref):
try:
return self._get_volume_with_label_wwn(
existing_ref.get('source-name'), existing_ref.get('source-id'))
except exception.InvalidInput:
reason = _('Reference must contain either source-name'
' or source-id element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
except KeyError:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_('Volume not found on configured storage pools.'))
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. Logs a
message to indicate the volume is no longer under Cinder's control.
"""
managed_vol = self._get_volume(volume['id'])
LOG.info(_LI("Unmanaged volume with current label %(label)s and wwn "
"%(wwn)s."), {'label': managed_vol['label'],
'wwn': managed_vol[self.WORLDWIDENAME]})
|
saeki-masaki/cinder
|
cinder/volume/drivers/netapp/eseries/library.py
|
Python
|
apache-2.0
| 46,616
|
#!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") == "context-player":
yql_url = "http://marcolemmens.com/ziggo/api.php?query=playerInfo"
result = urlopen(yql_url).read()
data = json.loads(result)
output = data.get('output')
playerId = data.get('playerInfo').get("id")
playerName = data.get('playerInfo').get("playerName")
return {
"speech": output,
# "data": data,
"contextOut": [{"name":"context-player", "lifespan":1, "parameters":{"player-id": playerId}},{"name":"context-player", "lifespan":1, "parameters":{"player-name": playerName}}],
"source": "apiai-weather-webhook-sample"
}
if req.get("result").get("action") == "context-player-salary":
playerId = req.get("result").get("contexts")[0].get("parameters").get("player-id")
playerName = req.get("result").get("contexts")[0].get("parameters").get("player-name")
yql_url = "http://marcolemmens.com/ziggo/api.php?query=playerSalary&playerId=" + playerId+"&playerName=" + playerName
result = urlopen(yql_url).read()
data = json.loads(result)
output = data.get('output')
playerId = data.get('playerInfo').get("id")
playerName = data.get('playerInfo').get("playerName")
return {
"speech": output,
"data": playerName,
"contextOut": [{"name":"context-player", "lifespan":1, "parameters":{"player-id": playerId}},{"name":"context-player", "lifespan":1, "parameters":{"player-name": playerName}}],
"source": "apiai-weather-webhook-sample"
}
if req.get("result").get("action") == "context-player-length":
playerId = req.get("result").get("contexts")[0].get("parameters").get("player-id")
playerName = req.get("result").get("contexts")[0].get("parameters").get("player-name")
yql_url = "http://marcolemmens.com/ziggo/api.php?query=playerLength&playerId=" + playerId+"&playerName=" + playerName
result = urlopen(yql_url).read()
data = json.loads(result)
output = data.get('output')
playerId = data.get('playerInfo').get("id")
playerName = data.get('playerInfo').get("playerName")
return {
"speech": output,
"data": playerName,
"contextOut": [{"name":"context-player", "lifespan":1, "parameters":{"player-id": playerId}},{"name":"context-player", "lifespan":1, "parameters":{"player-name": playerName}}],
"source": "apiai-weather-webhook-sample"
}
if req.get("result").get("action") == "specific-player":
playerName = req.get("result").get("metadata").get("intentId")
yql_url = "http://marcolemmens.com/ziggo/api.php?query=specificPlayerInfo&playerName=" + playerName
result = urlopen(yql_url).read()
data = json.loads(result)
output = data.get('output')
return {
"speech": output,
"data": playerName,
"contextOut": [{"name":"context-player", "lifespan":1, "parameters":{"player-id": playerId}},{"name":"context-player", "lifespan":1, "parameters":{"player-name": playerName}}],
"source": "apiai-weather-webhook-sample"
}
if req.get("result").get("action") == "last-event":
yql_url = "http://marcolemmens.com/ziggo/api.php?query=lastEvent
result = urlopen(yql_url).read()
data = json.loads(result)
output = data.get('output')
eventName = data.get('eventInfo').get("eventName")
return {
"speech": output,
"data": eventName,
"contextOut": [{"name":"context-event", "lifespan":1, "parameters":{"event-name": eventName}}],
"source": "apiai-weather-webhook-sample"
}
else:
return {}
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def makeWebhookResult(data):
query = data.get('query')
if query is None:
return {}
result = query.get('results')
if result is None:
return {}
channel = result.get('channel')
if channel is None:
return {}
item = channel.get('item')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (units is None):
return {}
condition = item.get('condition')
if condition is None:
return {}
# print(json.dumps(item, indent=4))
speech = "That would be Eden Hazard"
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
|
MarcoLemmens/sporttotaal
|
app.py
|
Python
|
apache-2.0
| 5,918
|
#!/usr/bin/env python3
__author__ = "Ashwin Nanjappa"
# GUI viewer to view JSON data as tree.
# Ubuntu packages needed:
# python3-pyqt5
# Std
import argparse
import collections
import json
import sys
# External
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
class TextToTreeItem:
def __init__(self):
self.text_list = []
self.titem_list = []
def append(self, text_list, titem):
for text in text_list:
self.text_list.append(text)
self.titem_list.append(titem)
# Return model indices that match string
def find(self, find_str):
titem_list = []
for i, s in enumerate(self.text_list):
if find_str in s:
titem_list.append(self.titem_list[i])
return titem_list
class JsonView(QtWidgets.QWidget):
def __init__(self, fpath):
super(JsonView, self).__init__()
self.find_box = None
self.tree_widget = None
self.text_to_titem = TextToTreeItem()
self.find_str = ""
self.found_titem_list = []
self.found_idx = 0
jfile = open(fpath)
jdata = json.load(jfile, object_pairs_hook=collections.OrderedDict)
# Find UI
find_layout = self.make_find_ui()
# Tree
self.tree_widget = QtWidgets.QTreeWidget()
self.tree_widget.setHeaderLabels(["Key", "Value"])
self.tree_widget.header().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
root_item = QtWidgets.QTreeWidgetItem(["Root"])
self.recurse_jdata(jdata, root_item)
self.tree_widget.addTopLevelItem(root_item)
# Add table to layout
layout = QtWidgets.QHBoxLayout()
layout.addWidget(self.tree_widget)
# Group box
gbox = QtWidgets.QGroupBox(fpath)
gbox.setLayout(layout)
layout2 = QtWidgets.QVBoxLayout()
layout2.addLayout(find_layout)
layout2.addWidget(gbox)
self.setLayout(layout2)
def make_find_ui(self):
# Text box
self.find_box = QtWidgets.QLineEdit()
self.find_box.returnPressed.connect(self.find_button_clicked)
# Find Button
find_button = QtWidgets.QPushButton("Find")
find_button.clicked.connect(self.find_button_clicked)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(self.find_box)
layout.addWidget(find_button)
return layout
def find_button_clicked(self):
find_str = self.find_box.text()
# Very common for use to click Find on empty string
if find_str == "":
return
# New search string
if find_str != self.find_str:
self.find_str = find_str
self.found_titem_list = self.text_to_titem.find(self.find_str)
self.found_idx = 0
else:
item_num = len(self.found_titem_list)
self.found_idx = (self.found_idx + 1) % item_num
self.tree_widget.setCurrentItem(self.found_titem_list[self.found_idx])
def recurse_jdata(self, jdata, tree_widget):
if isinstance(jdata, dict):
for key, val in jdata.items():
self.tree_add_row(key, val, tree_widget)
elif isinstance(jdata, list):
for i, val in enumerate(jdata):
key = str(i)
self.tree_add_row(key, val, tree_widget)
else:
print("This should never be reached!")
def tree_add_row(self, key, val, tree_widget):
text_list = []
if isinstance(val, dict) or isinstance(val, list):
text_list.append(key)
row_item = QtWidgets.QTreeWidgetItem([key])
self.recurse_jdata(val, row_item)
else:
text_list.append(key)
text_list.append(str(val))
row_item = QtWidgets.QTreeWidgetItem([key, str(val)])
tree_widget.addChild(row_item)
self.text_to_titem.append(text_list, row_item)
class JsonViewer(QtWidgets.QMainWindow):
def __init__(self):
super(JsonViewer, self).__init__()
fpath = sys.argv[1]
json_view = JsonView(fpath)
self.setCentralWidget(json_view)
self.setWindowTitle("JSON Viewer")
self.show()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Escape:
self.close()
def main():
qt_app = QtWidgets.QApplication(sys.argv)
json_viewer = JsonViewer()
sys.exit(qt_app.exec_())
if "__main__" == __name__:
main()
|
ashwin/json-viewer
|
json_viewer.py
|
Python
|
apache-2.0
| 4,537
|
"""The tests for the Script component."""
# pylint: disable=protected-access
import asyncio
from contextlib import contextmanager
from datetime import timedelta
import logging
from unittest import mock
import pytest
import voluptuous as vol
# Otherwise can't test just this file (import order issue)
from homeassistant import exceptions
import homeassistant.components.scene as scene
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_ON
from homeassistant.core import Context, CoreState, callback
from homeassistant.helpers import config_validation as cv, script
from homeassistant.helpers.event import async_call_later
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import (
async_capture_events,
async_fire_time_changed,
async_mock_service,
)
ENTITY_ID = "script.test"
@pytest.fixture
def mock_timeout(hass, monkeypatch):
"""Mock async_timeout.timeout."""
class MockTimeout:
def __init__(self, timeout):
self._timeout = timeout
self._loop = asyncio.get_event_loop()
self._task = None
self._cancelled = False
self._unsub = None
async def __aenter__(self):
if self._timeout is None:
return self
self._task = asyncio.Task.current_task()
if self._timeout <= 0:
self._loop.call_soon(self._cancel_task)
return self
# Wait for a time_changed event instead of real time passing.
self._unsub = async_call_later(hass, self._timeout, self._cancel_task)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if exc_type is asyncio.CancelledError and self._cancelled:
self._unsub = None
self._task = None
raise asyncio.TimeoutError
if self._timeout is not None and self._unsub:
self._unsub()
self._unsub = None
self._task = None
return None
@callback
def _cancel_task(self, now=None):
if self._task is not None:
self._task.cancel()
self._cancelled = True
monkeypatch.setattr(script, "timeout", MockTimeout)
def async_watch_for_action(script_obj, message):
"""Watch for message in last_action."""
flag = asyncio.Event()
@callback
def check_action():
if script_obj.last_action and message in script_obj.last_action:
flag.set()
script_obj.change_listener = check_action
return flag
async def test_firing_event_basic(hass):
"""Test the firing of events."""
event = "test_event"
context = Context()
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA({"event": event, "event_data": {"hello": "world"}})
script_obj = script.Script(hass, sequence)
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].context is context
assert events[0].data.get("hello") == "world"
async def test_firing_event_template(hass):
"""Test the firing of events."""
event = "test_event"
context = Context()
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
{
"event": event,
"event_data_template": {
"dict": {
1: "{{ is_world }}",
2: "{{ is_world }}{{ is_world }}",
3: "{{ is_world }}{{ is_world }}{{ is_world }}",
},
"list": ["{{ is_world }}", "{{ is_world }}{{ is_world }}"],
},
}
)
script_obj = script.Script(hass, sequence)
await script_obj.async_run({"is_world": "yes"}, context=context)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].context is context
assert events[0].data == {
"dict": {1: "yes", 2: "yesyes", 3: "yesyesyes"},
"list": ["yes", "yesyes"],
}
async def test_calling_service_basic(hass):
"""Test the calling of a service."""
context = Context()
calls = async_mock_service(hass, "test", "script")
sequence = cv.SCRIPT_SCHEMA({"service": "test.script", "data": {"hello": "world"}})
script_obj = script.Script(hass, sequence)
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get("hello") == "world"
async def test_calling_service_template(hass):
"""Test the calling of a service."""
context = Context()
calls = async_mock_service(hass, "test", "script")
sequence = cv.SCRIPT_SCHEMA(
{
"service_template": """
{% if True %}
test.script
{% else %}
test.not_script
{% endif %}""",
"data_template": {
"hello": """
{% if is_world == 'yes' %}
world
{% else %}
not world
{% endif %}
"""
},
}
)
script_obj = script.Script(hass, sequence)
await script_obj.async_run({"is_world": "yes"}, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get("hello") == "world"
async def test_multiple_runs_no_wait(hass):
"""Test multiple runs with no wait in script."""
logger = logging.getLogger("TEST")
calls = []
heard_event = asyncio.Event()
async def async_simulate_long_service(service):
"""Simulate a service that takes a not insignificant time."""
fire = service.data.get("fire")
listen = service.data.get("listen")
service_done = asyncio.Event()
@callback
def service_done_cb(event):
logger.debug("simulated service (%s:%s) done", fire, listen)
service_done.set()
calls.append(service)
logger.debug("simulated service (%s:%s) started", fire, listen)
unsub = hass.bus.async_listen(listen, service_done_cb)
hass.bus.async_fire(fire)
await service_done.wait()
unsub()
hass.services.async_register("test", "script", async_simulate_long_service)
@callback
def heard_event_cb(event):
logger.debug("heard: %s", event)
heard_event.set()
sequence = cv.SCRIPT_SCHEMA(
[
{
"service": "test.script",
"data_template": {"fire": "{{ fire1 }}", "listen": "{{ listen1 }}"},
},
{
"service": "test.script",
"data_template": {"fire": "{{ fire2 }}", "listen": "{{ listen2 }}"},
},
]
)
script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=2)
# Start script twice in such a way that second run will be started while first run
# is in the middle of the first service call.
unsub = hass.bus.async_listen("1", heard_event_cb)
logger.debug("starting 1st script")
hass.async_create_task(
script_obj.async_run(
{"fire1": "1", "listen1": "2", "fire2": "3", "listen2": "4"}
)
)
await asyncio.wait_for(heard_event.wait(), 1)
unsub()
logger.debug("starting 2nd script")
await script_obj.async_run(
{"fire1": "2", "listen1": "3", "fire2": "4", "listen2": "4"}
)
await hass.async_block_till_done()
assert len(calls) == 4
async def test_activating_scene(hass):
"""Test the activation of a scene."""
context = Context()
calls = async_mock_service(hass, scene.DOMAIN, SERVICE_TURN_ON)
sequence = cv.SCRIPT_SCHEMA({"scene": "scene.hello"})
script_obj = script.Script(hass, sequence)
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get(ATTR_ENTITY_ID) == "scene.hello"
@pytest.mark.parametrize("count", [1, 3])
async def test_stop_no_wait(hass, count):
"""Test stopping script."""
service_started_sem = asyncio.Semaphore(0)
finish_service_event = asyncio.Event()
event = "test_event"
events = async_capture_events(hass, event)
async def async_simulate_long_service(service):
"""Simulate a service that takes a not insignificant time."""
service_started_sem.release()
await finish_service_event.wait()
hass.services.async_register("test", "script", async_simulate_long_service)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=count)
# Get script started specified number of times and wait until the test.script
# service has started for each run.
tasks = []
for _ in range(count):
hass.async_create_task(script_obj.async_run())
tasks.append(hass.async_create_task(service_started_sem.acquire()))
await asyncio.wait_for(asyncio.gather(*tasks), 1)
# Can't assert just yet because we haven't verified stopping works yet.
# If assert fails we can hang test if async_stop doesn't work.
script_was_runing = script_obj.is_running
were_no_events = len(events) == 0
# Begin the process of stopping the script (which should stop all runs), and then
# let the service calls complete.
hass.async_create_task(script_obj.async_stop())
finish_service_event.set()
await hass.async_block_till_done()
assert script_was_runing
assert were_no_events
assert not script_obj.is_running
assert len(events) == 0
async def test_delay_basic(hass, mock_timeout):
"""Test the delay."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 5}, "alias": delay_alias})
script_obj = script.Script(hass, sequence)
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.last_action is None
async def test_multiple_runs_delay(hass, mock_timeout):
"""Test multiple runs with delay in script."""
event = "test_event"
events = async_capture_events(hass, event)
delay = timedelta(seconds=5)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"delay": delay},
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=2)
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[-1].data["value"] == 1
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
# Start second run of script while first run is in a delay.
script_obj.sequence[1]["alias"] = "delay run 2"
delay_started_flag = async_watch_for_action(script_obj, "delay run 2")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
async_fire_time_changed(hass, dt_util.utcnow() + delay)
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 4
assert events[-3].data["value"] == 1
assert events[-2].data["value"] == 2
assert events[-1].data["value"] == 2
async def test_delay_template_ok(hass, mock_timeout):
"""Test the delay as a template."""
sequence = cv.SCRIPT_SCHEMA({"delay": "00:00:{{ 5 }}"})
script_obj = script.Script(hass, sequence)
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
async def test_delay_template_invalid(hass, caplog):
"""Test the delay as a template that fails."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"delay": "{{ invalid_delay }}"},
{"delay": {"seconds": 5}},
{"event": event},
]
)
script_obj = script.Script(hass, sequence)
start_idx = len(caplog.records)
await script_obj.async_run()
await hass.async_block_till_done()
assert any(
rec.levelname == "ERROR" and "Error rendering" in rec.message
for rec in caplog.records[start_idx:]
)
assert not script_obj.is_running
assert len(events) == 1
async def test_delay_template_complex_ok(hass, mock_timeout):
"""Test the delay with a working complex template."""
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": "{{ 5 }}"}})
script_obj = script.Script(hass, sequence)
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
async def test_delay_template_complex_invalid(hass, caplog):
"""Test the delay with a complex template that fails."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"delay": {"seconds": "{{ invalid_delay }}"}},
{"delay": {"seconds": 5}},
{"event": event},
]
)
script_obj = script.Script(hass, sequence)
start_idx = len(caplog.records)
await script_obj.async_run()
await hass.async_block_till_done()
assert any(
rec.levelname == "ERROR" and "Error rendering" in rec.message
for rec in caplog.records[start_idx:]
)
assert not script_obj.is_running
assert len(events) == 1
async def test_cancel_delay(hass):
"""Test the cancelling while the delay is present."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA([{"delay": {"seconds": 5}}, {"event": event}])
script_obj = script.Script(hass, sequence)
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
await script_obj.async_stop()
assert not script_obj.is_running
# Make sure the script is really stopped.
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 0
async def test_wait_template_basic(hass):
"""Test the wait template."""
wait_alias = "wait step"
sequence = cv.SCRIPT_SCHEMA(
{
"wait_template": "{{ states.switch.test.state == 'off' }}",
"alias": wait_alias,
}
)
script_obj = script.Script(hass, sequence)
wait_started_flag = async_watch_for_action(script_obj, wait_alias)
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == wait_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.last_action is None
async def test_multiple_runs_wait_template(hass):
"""Test multiple runs with wait_template in script."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=2)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[-1].data["value"] == 1
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
# Start second run of script while first run is in wait_template.
hass.async_create_task(script_obj.async_run())
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 4
assert events[-3].data["value"] == 1
assert events[-2].data["value"] == 2
assert events[-1].data["value"] == 2
async def test_cancel_wait_template(hass):
"""Test the cancelling while wait_template is present."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event},
]
)
script_obj = script.Script(hass, sequence)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
await script_obj.async_stop()
assert not script_obj.is_running
# Make sure the script is really stopped.
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 0
async def test_wait_template_not_schedule(hass):
"""Test the wait template with correct condition."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"wait_template": "{{ states.switch.test.state == 'on' }}"},
{"event": event},
]
)
script_obj = script.Script(hass, sequence)
hass.states.async_set("switch.test", "on")
await script_obj.async_run()
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2
@pytest.mark.parametrize(
"continue_on_timeout,n_events", [(False, 0), (True, 1), (None, 1)]
)
async def test_wait_template_timeout(hass, mock_timeout, continue_on_timeout, n_events):
"""Test the wait template, halt on timeout."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = [
{"wait_template": "{{ states.switch.test.state == 'off' }}", "timeout": 5},
{"event": event},
]
if continue_on_timeout is not None:
sequence[0]["continue_on_timeout"] = continue_on_timeout
sequence = cv.SCRIPT_SCHEMA(sequence)
script_obj = script.Script(hass, sequence)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == n_events
async def test_wait_template_variables(hass):
"""Test the wait template with variables."""
sequence = cv.SCRIPT_SCHEMA({"wait_template": "{{ is_state(data, 'off') }}"})
script_obj = script.Script(hass, sequence)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run({"data": "switch.test"}))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
async def test_condition_basic(hass):
"""Test if we can use conditions in a script."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{
"condition": "template",
"value_template": "{{ states.test.entity.state == 'hello' }}",
},
{"event": event},
]
)
script_obj = script.Script(hass, sequence)
hass.states.async_set("test.entity", "hello")
await script_obj.async_run()
await hass.async_block_till_done()
assert len(events) == 2
hass.states.async_set("test.entity", "goodbye")
await script_obj.async_run()
await hass.async_block_till_done()
assert len(events) == 3
@patch("homeassistant.helpers.script.condition.async_from_config")
async def test_condition_created_once(async_from_config, hass):
"""Test that the conditions do not get created multiple times."""
sequence = cv.SCRIPT_SCHEMA(
{
"condition": "template",
"value_template": '{{ states.test.entity.state == "hello" }}',
}
)
script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=2)
async_from_config.reset_mock()
hass.states.async_set("test.entity", "hello")
await script_obj.async_run()
await script_obj.async_run()
await hass.async_block_till_done()
async_from_config.assert_called_once()
assert len(script_obj._config_cache) == 1
async def test_condition_all_cached(hass):
"""Test that multiple conditions get cached."""
sequence = cv.SCRIPT_SCHEMA(
[
{
"condition": "template",
"value_template": '{{ states.test.entity.state == "hello" }}',
},
{
"condition": "template",
"value_template": '{{ states.test.entity.state != "hello" }}',
},
]
)
script_obj = script.Script(hass, sequence)
hass.states.async_set("test.entity", "hello")
await script_obj.async_run()
await hass.async_block_till_done()
assert len(script_obj._config_cache) == 2
async def test_repeat_count(hass):
"""Test repeat action w/ count option."""
event = "test_event"
events = async_capture_events(hass, event)
count = 3
sequence = cv.SCRIPT_SCHEMA(
{
"repeat": {
"count": count,
"sequence": {
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
}
}
)
script_obj = script.Script(hass, sequence)
await script_obj.async_run()
await hass.async_block_till_done()
assert len(events) == count
for index, event in enumerate(events):
assert event.data.get("first") == str(index == 0)
assert event.data.get("index") == str(index + 1)
assert event.data.get("last") == str(index == count - 1)
@pytest.mark.parametrize("condition", ["while", "until"])
async def test_repeat_conditional(hass, condition):
"""Test repeat action w/ while option."""
event = "test_event"
events = async_capture_events(hass, event)
count = 3
sequence = {
"repeat": {
"sequence": [
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
},
},
{"wait_template": "{{ is_state('sensor.test', 'next') }}"},
{"wait_template": "{{ not is_state('sensor.test', 'next') }}"},
],
}
}
if condition == "while":
sequence["repeat"]["while"] = {
"condition": "template",
"value_template": "{{ not is_state('sensor.test', 'done') }}",
}
else:
sequence["repeat"]["until"] = {
"condition": "template",
"value_template": "{{ is_state('sensor.test', 'done') }}",
}
script_obj = script.Script(hass, cv.SCRIPT_SCHEMA(sequence))
wait_started = async_watch_for_action(script_obj, "wait")
hass.states.async_set("sensor.test", "1")
hass.async_create_task(script_obj.async_run())
try:
for index in range(2, count + 1):
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", "next")
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", index)
await asyncio.wait_for(wait_started.wait(), 1)
hass.states.async_set("sensor.test", "next")
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", "done")
await asyncio.wait_for(hass.async_block_till_done(), 1)
except asyncio.TimeoutError:
await script_obj.async_stop()
raise
assert len(events) == count
for index, event in enumerate(events):
assert event.data.get("first") == str(index == 0)
assert event.data.get("index") == str(index + 1)
@pytest.mark.parametrize("condition", ["while", "until"])
async def test_repeat_var_in_condition(hass, condition):
"""Test repeat action w/ while option."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = {"repeat": {"sequence": {"event": event}}}
if condition == "while":
sequence["repeat"]["while"] = {
"condition": "template",
"value_template": "{{ repeat.index <= 2 }}",
}
else:
sequence["repeat"]["until"] = {
"condition": "template",
"value_template": "{{ repeat.index == 2 }}",
}
script_obj = script.Script(hass, cv.SCRIPT_SCHEMA(sequence))
with mock.patch(
"homeassistant.helpers.condition._LOGGER.error",
side_effect=AssertionError("Template Error"),
):
await script_obj.async_run()
assert len(events) == 2
async def test_repeat_nested(hass):
"""Test nested repeats."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{
"event": event,
"event_data_template": {
"repeat": "{{ None if repeat is not defined else repeat }}"
},
},
{
"repeat": {
"count": 2,
"sequence": [
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
{
"repeat": {
"count": 2,
"sequence": {
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
}
},
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
],
}
},
{
"event": event,
"event_data_template": {
"repeat": "{{ None if repeat is not defined else repeat }}"
},
},
]
)
script_obj = script.Script(hass, sequence, "test script")
with mock.patch(
"homeassistant.helpers.condition._LOGGER.error",
side_effect=AssertionError("Template Error"),
):
await script_obj.async_run()
assert len(events) == 10
assert events[0].data == {"repeat": "None"}
assert events[-1].data == {"repeat": "None"}
for index, result in enumerate(
(
("True", "1", "False"),
("True", "1", "False"),
("False", "2", "True"),
("True", "1", "False"),
("False", "2", "True"),
("True", "1", "False"),
("False", "2", "True"),
("False", "2", "True"),
),
1,
):
assert events[index].data == {
"first": result[0],
"index": result[1],
"last": result[2],
}
@pytest.mark.parametrize("var,result", [(1, "first"), (2, "second"), (3, "default")])
async def test_choose(hass, var, result):
"""Test choose action."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
{
"choose": [
{
"conditions": {
"condition": "template",
"value_template": "{{ var == 1 }}",
},
"sequence": {"event": event, "event_data": {"choice": "first"}},
},
{
"conditions": {
"condition": "template",
"value_template": "{{ var == 2 }}",
},
"sequence": {"event": event, "event_data": {"choice": "second"}},
},
],
"default": {"event": event, "event_data": {"choice": "default"}},
}
)
script_obj = script.Script(hass, sequence)
await script_obj.async_run({"var": var})
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["choice"] == result
@pytest.mark.parametrize(
"action",
[
{"repeat": {"count": 1, "sequence": {"event": "abc"}}},
{"choose": {"conditions": [], "sequence": {"event": "abc"}}},
{"choose": [], "default": {"event": "abc"}},
],
)
async def test_multiple_runs_repeat_choose(hass, caplog, action):
"""Test parallel runs with repeat & choose actions & max_runs > default."""
max_runs = script.DEFAULT_MAX + 1
script_obj = script.Script(
hass, cv.SCRIPT_SCHEMA(action), script_mode="parallel", max_runs=max_runs
)
events = async_capture_events(hass, "abc")
for _ in range(max_runs):
hass.async_create_task(script_obj.async_run())
await hass.async_block_till_done()
assert "WARNING" not in caplog.text
assert "ERROR" not in caplog.text
assert len(events) == max_runs
async def test_last_triggered(hass):
"""Test the last_triggered."""
event = "test_event"
sequence = cv.SCRIPT_SCHEMA({"event": event})
script_obj = script.Script(hass, sequence)
assert script_obj.last_triggered is None
time = dt_util.utcnow()
with mock.patch("homeassistant.helpers.script.utcnow", return_value=time):
await script_obj.async_run()
await hass.async_block_till_done()
assert script_obj.last_triggered == time
async def test_propagate_error_service_not_found(hass):
"""Test that a script aborts when a service is not found."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(hass, sequence)
with pytest.raises(exceptions.ServiceNotFound):
await script_obj.async_run()
assert len(events) == 0
assert not script_obj.is_running
async def test_propagate_error_invalid_service_data(hass):
"""Test that a script aborts when we send invalid service data."""
event = "test_event"
events = async_capture_events(hass, event)
calls = async_mock_service(hass, "test", "script", vol.Schema({"text": str}))
sequence = cv.SCRIPT_SCHEMA(
[{"service": "test.script", "data": {"text": 1}}, {"event": event}]
)
script_obj = script.Script(hass, sequence)
with pytest.raises(vol.Invalid):
await script_obj.async_run()
assert len(events) == 0
assert len(calls) == 0
assert not script_obj.is_running
async def test_propagate_error_service_exception(hass):
"""Test that a script aborts when a service throws an exception."""
event = "test_event"
events = async_capture_events(hass, event)
@callback
def record_call(service):
"""Add recorded event to set."""
raise ValueError("BROKEN")
hass.services.async_register("test", "script", record_call)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(hass, sequence)
with pytest.raises(ValueError):
await script_obj.async_run()
assert len(events) == 0
assert not script_obj.is_running
async def test_referenced_entities(hass):
"""Test referenced entities."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(
[
{
"service": "test.script",
"data": {"entity_id": "light.service_not_list"},
},
{
"service": "test.script",
"data": {"entity_id": ["light.service_list"]},
},
{
"condition": "state",
"entity_id": "sensor.condition",
"state": "100",
},
{"service": "test.script", "data": {"without": "entity_id"}},
{"scene": "scene.hello"},
{"event": "test_event"},
{"delay": "{{ delay_period }}"},
]
),
)
assert script_obj.referenced_entities == {
"light.service_not_list",
"light.service_list",
"sensor.condition",
"scene.hello",
}
# Test we cache results.
assert script_obj.referenced_entities is script_obj.referenced_entities
async def test_referenced_devices(hass):
"""Test referenced entities."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(
[
{"domain": "light", "device_id": "script-dev-id"},
{
"condition": "device",
"device_id": "condition-dev-id",
"domain": "switch",
},
]
),
)
assert script_obj.referenced_devices == {"script-dev-id", "condition-dev-id"}
# Test we cache results.
assert script_obj.referenced_devices is script_obj.referenced_devices
@contextmanager
def does_not_raise():
"""Indicate no exception is expected."""
yield
async def test_script_mode_single(hass, caplog):
"""Test overlapping runs with max_runs = 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(hass, sequence)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
await script_obj.async_run()
assert "Already running" in caplog.text
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2
assert events[1].data["value"] == 2
@pytest.mark.parametrize(
"script_mode,messages,last_events",
[("restart", ["Restarting"], [2]), ("parallel", [], [2, 2])],
)
async def test_script_mode_2(hass, caplog, script_mode, messages, last_events):
"""Test overlapping runs with max_runs > 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
]
)
logger = logging.getLogger("TEST")
max_runs = 1 if script_mode == "restart" else 2
script_obj = script.Script(
hass, sequence, script_mode=script_mode, max_runs=max_runs, logger=logger
)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
wait_started_flag.clear()
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 2
assert events[1].data["value"] == 1
assert all(
any(
rec.levelname == "INFO"
and rec.name == "TEST"
and message in rec.message
for rec in caplog.records
)
for message in messages
)
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2 + len(last_events)
for idx, value in enumerate(last_events, start=2):
assert events[idx].data["value"] == value
async def test_script_mode_queued(hass):
"""Test overlapping runs with script_mode = 'queued' & max_runs > 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
{"wait_template": "{{ states.switch.test.state == 'on' }}"},
]
)
logger = logging.getLogger("TEST")
script_obj = script.Script(
hass, sequence, script_mode="queued", max_runs=2, logger=logger
)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
# This second run should not start until the first run has finished.
hass.async_create_task(script_obj.async_run())
await asyncio.sleep(0)
assert script_obj.is_running
assert len(events) == 1
wait_started_flag.clear()
hass.states.async_set("switch.test", "off")
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 2
assert events[1].data["value"] == 2
wait_started_flag.clear()
hass.states.async_set("switch.test", "on")
await asyncio.wait_for(wait_started_flag.wait(), 1)
await asyncio.sleep(0)
assert script_obj.is_running
assert len(events) == 3
assert events[2].data["value"] == 1
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await asyncio.sleep(0)
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 4
assert events[3].data["value"] == 2
async def test_script_logging(hass, caplog):
"""Test script logging."""
script_obj = script.Script(hass, [], "Script with % Name")
script_obj._log("Test message with name %s", 1)
assert "Script with % Name: Test message with name 1" in caplog.text
script_obj = script.Script(hass, [])
script_obj._log("Test message without name %s", 2)
assert "Test message without name 2" in caplog.text
async def test_shutdown_at(hass, caplog):
"""Test stopping scripts at shutdown."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 120}, "alias": delay_alias})
script_obj = script.Script(hass, sequence, "test script")
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.bus.async_fire("homeassistant_stop")
await hass.async_block_till_done()
assert not script_obj.is_running
assert "Stopping scripts running at shutdown: test script" in caplog.text
async def test_shutdown_after(hass, caplog):
"""Test stopping scripts at shutdown."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 120}, "alias": delay_alias})
script_obj = script.Script(hass, sequence, "test script")
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
hass.state = CoreState.stopping
hass.bus.async_fire("homeassistant_stop")
await hass.async_block_till_done()
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=60))
await hass.async_block_till_done()
assert not script_obj.is_running
assert (
"Stopping scripts running too long after shutdown: test script"
in caplog.text
)
async def test_update_logger(hass, caplog):
"""Test updating logger."""
sequence = cv.SCRIPT_SCHEMA({"event": "test_event"})
script_obj = script.Script(hass, sequence)
await script_obj.async_run()
await hass.async_block_till_done()
assert script.__name__ in caplog.text
log_name = "testing.123"
script_obj.update_logger(logging.getLogger(log_name))
await script_obj.async_run()
await hass.async_block_till_done()
assert log_name in caplog.text
|
pschmitt/home-assistant
|
tests/helpers/test_script.py
|
Python
|
apache-2.0
| 46,540
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple generator and discriminator models.
Based on the convolutional and "deconvolutional" models presented in
"Unsupervised Representation Learning with Deep Convolutional Generative
Adversarial Networks" by A. Radford et. al.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def _leaky_relu(x):
return tf.nn.leaky_relu(x, alpha=0.2)
def _batch_norm(x, is_training, name):
return tf.layers.batch_normalization(
x, momentum=0.9, epsilon=1e-5, training=is_training, name=name)
def _dense(x, channels, name):
return tf.layers.dense(
x, channels,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def _conv2d(x, filters, kernel_size, stride, name):
return tf.layers.conv2d(
x, filters, [kernel_size, kernel_size],
strides=[stride, stride], padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def _deconv2d(x, filters, kernel_size, stride, name):
return tf.layers.conv2d_transpose(
x, filters, [kernel_size, kernel_size],
strides=[stride, stride], padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def discriminator(x, is_training=True, scope='Discriminator'):
# conv64-lrelu + conv128-bn-lrelu + fc1024-bn-lrelu + fc1
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x = _conv2d(x, 64, 4, 2, name='d_conv1')
x = _leaky_relu(x)
x = _conv2d(x, 128, 4, 2, name='d_conv2')
x = _leaky_relu(_batch_norm(x, is_training, name='d_bn2'))
x = tf.reshape(x, [-1, 7 * 7 * 128])
x = _dense(x, 1024, name='d_fc3')
x = _leaky_relu(_batch_norm(x, is_training, name='d_bn3'))
x = _dense(x, 1, name='d_fc4')
return x
def generator(x, is_training=True, scope='Generator'):
# fc1024-bn-relu + fc6272-bn-relu + deconv64-bn-relu + deconv1-tanh
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x = _dense(x, 1024, name='g_fc1')
x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn1'))
x = _dense(x, 7 * 7 * 128, name='g_fc2')
x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn2'))
x = tf.reshape(x, [-1, 7, 7, 128])
x = _deconv2d(x, 64, 4, 2, name='g_dconv3')
x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn3'))
x = _deconv2d(x, 1, 4, 2, name='g_dconv4')
x = tf.tanh(x)
return x
# TODO(chrisying): objective score (e.g. MNIST score)
|
tensorflow/tpu
|
models/experimental/dcgan/mnist_model.py
|
Python
|
apache-2.0
| 3,215
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Models for TranspileConfig and its related components."""
from qiskit.transpiler.models import TranspileConfigSchema
from qiskit.validation import BaseModel, bind_schema
@bind_schema(TranspileConfigSchema)
class TranspileConfig(BaseModel):
"""Model for TranspileConfig.
Please note that this class only describes the required fields. For the
full description of the model, please check ``TranspileConfigSchema``.
Attributes:
optimization_level (int): a non-negative integer indicating the
optimization level. 0 means no transformation on the circuit. Higher
levels may produce more optimized circuits, but may take longer.
"""
def __init__(self, optimization_level, **kwargs):
self.optimization_level = optimization_level
super().__init__(**kwargs)
|
QISKit/qiskit-sdk-py
|
qiskit/transpiler/transpile_config.py
|
Python
|
apache-2.0
| 1,338
|
# Copyright 2015 Tianchuan Du University of Delaware
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import cPickle
import gzip
import numpy
import os
import sys
import theano
from theano.tensor.shared_randomstreams import RandomStreams
import time
from io_func.model_io import _nnet2file, _file2nnet, _cfg2file, log
from learning.sgd import train_sgd_verbose, validate_by_minibatch_verbose
from models.cnn_sat import CNN_SAT
import theano.tensor as T
from utils.network_config import NetworkConfig
from utils.utils import parse_arguments
# Implements the Speaker Adaptive Training of DNNs proposed in the following papers:
# [1] Yajie Miao, Hao Zhang, Florian Metze. "Towards Speaker Adaptive Training of Deep
# Neural Network Acoustic Models". Interspeech 2014.
# [2] Yajie Miao, Lu Jiang, Hao Zhang, Florian Metze. "Improvements to Speaker Adaptive
# Training of Deep Neural Networks". SLT 2014.
if __name__ == '__main__':
# check the arguments
arg_elements = [sys.argv[i] for i in range(1, len(sys.argv))]
arguments = parse_arguments(arg_elements)
required_arguments = ['train_data', 'valid_data', 'si_nnet_spec', 'si_conv_nnet_spec', 'wdir', 'adapt_nnet_spec', 'init_model']
for arg in required_arguments:
if arguments.has_key(arg) == False:
print "Error: the argument %s has to be specified" % (arg); exit(1)
# mandatory arguments
train_data_spec = arguments['train_data']; valid_data_spec = arguments['valid_data']
si_nnet_spec = arguments['si_nnet_spec']
si_conv_nnet_spec = arguments['si_conv_nnet_spec']
adapt_nnet_spec = arguments['adapt_nnet_spec'];
wdir = arguments['wdir']
init_model_file = arguments['init_model']
# parse network configuration from arguments, and initialize data reading
cfg_si = NetworkConfig(); cfg_si.model_type = 'CNN'
cfg_si.parse_config_cnn(arguments, '10:' + si_nnet_spec, si_conv_nnet_spec)
cfg_si.init_data_reading(train_data_spec, valid_data_spec)
# parse the structure of the i-vector network
cfg_adapt = NetworkConfig()
net_split = adapt_nnet_spec.split(':')
adapt_nnet_spec = ''
for n in xrange(len(net_split) - 1):
adapt_nnet_spec += net_split[n] + ':'
cfg_adapt.parse_config_dnn(arguments, adapt_nnet_spec + '0')
numpy_rng = numpy.random.RandomState(89677)
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
log('> ... initializing the model')
# setup up the model
dnn = CNN_SAT(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg_si = cfg_si, cfg_adapt = cfg_adapt)
# read the initial DNN (the SI DNN which has been well trained)
# _file2nnet(dnn.cnn_si.layers, filename = init_model_file)
_file2nnet(dnn.cnn_si.layers, filename = 'BKUP/nnet.param.si')
_file2nnet(dnn.dnn_adapt.layers, filename = 'BKUP/nnet.param.adapt')
# get the training and validation functions for adaptation network training
dnn.params = dnn.dnn_adapt.params # only update the parameters of the i-vector nnet
dnn.delta_params = dnn.dnn_adapt.delta_params
log('> ... getting the finetuning functions for iVecNN')
train_fn, valid_fn = dnn.build_finetune_functions(
(cfg_si.train_x, cfg_si.train_y), (cfg_si.valid_x, cfg_si.valid_y),
batch_size = cfg_adapt.batch_size)
log('> ... learning the adaptation network')
cfg = cfg_adapt
while (cfg.lrate.get_rate() != 0):
# one epoch of sgd training
# train_error = train_sgd_verbose(train_fn, cfg_si.train_sets, cfg_si.train_xy,
# cfg.batch_size, cfg.lrate.get_rate(), cfg.momentum)
# log('> epoch %d, training error %f ' % (cfg.lrate.epoch, 100*numpy.mean(train_error)) + '(%)')
# validation
valid_error = validate_by_minibatch_verbose(valid_fn, cfg_si.valid_sets, cfg_si.valid_xy, cfg.batch_size)
log('> epoch %d, lrate %f, validation error %f ' % (cfg.lrate.epoch, cfg.lrate.get_rate(), 100*numpy.mean(valid_error)) + '(%)')
cfg.lrate.get_next_rate(current_error = 100 * numpy.mean(valid_error))
cfg.lrate.rate = 0
# save the model and network configuration
if cfg.param_output_file != '':
_nnet2file(dnn.dnn_adapt.layers, filename = cfg.param_output_file + '.adapt',
input_factor = cfg_adapt.input_dropout_factor, factor = cfg_adapt.dropout_factor)
_nnet2file(dnn.cnn_si.layers, filename = cfg.param_output_file + '.si',
input_factor = cfg_si.input_dropout_factor, factor = cfg_si.dropout_factor)
log('> ... the final PDNN model parameter is ' + cfg.param_output_file + ' (.si, .adapt)')
if cfg.cfg_output_file != '':
_cfg2file(cfg_adapt, filename=cfg.cfg_output_file + '.adapt')
_cfg2file(cfg_si, filename=cfg.cfg_output_file + '.si')
log('> ... the final PDNN model config is ' + cfg.cfg_output_file + ' (.si, .adapt)')
# output the model into Kaldi-compatible format
if cfg.kaldi_output_file != '':
dnn.cnn_si.fc_dnn.write_model_to_kaldi(cfg.kaldi_output_file + '.si')
dnn.dnn_adapt.write_model_to_kaldi(cfg.kaldi_output_file + '.adapt', with_softmax = False)
log('> ... the final Kaldi model is ' + cfg.kaldi_output_file + ' (.si, .adapt)')
|
magic2du/dlnn
|
cmds2/run_CNN_SAT.py
|
Python
|
apache-2.0
| 5,839
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import os
from datetime import datetime
from .world import world, logged_wait, res_filename
from nose.tools import eq_, assert_less
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from bigml.util import is_url
from .read_script_steps import i_get_the_script
#@step(r'the script code is "(.*)" and the value of "(.*)" is "(.*)"')
def the_script_code_and_attributes(step, source_code, param, param_value):
res_param_value = world.script[param]
eq_(res_param_value, param_value,
("The script %s is %s and the expected %s is %s" %
(param, param_value, param, param_value)))
#@step(r'I create a whizzml script from a excerpt of code "(.*)"$')
def i_create_a_script(step, source_code):
resource = world.api.create_script(source_code,
{"project": world.project_id})
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.script = resource['object']
world.scripts.append(resource['resource'])
#@step(r'I create a whizzml script from file "(.*)"$')
def i_create_a_script_from_file_or_url(step, source_code):
if not is_url(source_code):
source_code = res_filename(source_code)
resource = world.api.create_script(source_code,
{"project": world.project_id})
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.script = resource['object']
world.scripts.append(resource['resource'])
#@step(r'I update the script with "(.*)", "(.*)"$')
def i_update_a_script(step, param, param_value):
resource = world.api.update_script(world.script['resource'],
{param: param_value})
world.status = resource['code']
eq_(world.status, HTTP_ACCEPTED)
world.location = resource['location']
world.script = resource['object']
#@step(r'I wait until the script status code is either (\d) or (-\d) less than (\d+)')
def wait_until_script_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
delta = int(secs) * world.delta
script_id = world.script['resource']
i_get_the_script(step, script_id)
status = get_status(world.script)
count = 0
while (status['code'] != int(code1) and
status['code'] != int(code2)):
count += 1
logged_wait(start, delta, count, "script")
assert_less((datetime.utcnow() - start).seconds, delta)
i_get_the_script(step, script_id)
status = get_status(world.script)
eq_(status['code'], int(code1))
#@step(r'I wait until the script is ready less than (\d+)')
def the_script_is_finished(step, secs):
wait_until_script_status_code_is(step, FINISHED, FAULTY, secs)
|
mmerce/python
|
bigml/tests/create_script_steps.py
|
Python
|
apache-2.0
| 3,531
|
# Generated by Django 2.2.24 on 2021-10-02 14:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0072_product_order_index'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='order_index',
),
]
|
flavoi/diventi
|
diventi/products/migrations/0073_remove_product_order_index.py
|
Python
|
apache-2.0
| 337
|
#!/usr/bin/env python
# -*- coding: rot13 -*-
cevag "Uryyb TvgUho!".rapbqr("rot13")
|
ET-CS/python-patterns
|
examples/python2/fun/rot13.py
|
Python
|
apache-2.0
| 85
|
# -*- coding: utf-8 -*-
"""Configure Watchmaker documentation."""
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import os
import sys
#
# Created by sphinx-quickstart on Thu Jun 30 20:11:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, os.path.abspath('../src/'))
rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# extensions = []
extensions = [
'myst_parser',
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
}
autoclass_content = 'class'
autodoc_member_order = 'bysource'
autodoc_default_options = {'members': True, 'show-inheritance': True}
myst_heading_anchors = 4
napoleon_use_param = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffixes as a map of suffix => parser:
#
# source_suffix = {
# '.md': 'markdown',
# '.rst': 'restructuredtext',
# }
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Watchmaker'
copyright = u'2016, Plus3 IT Systems' # noqa: A001
author = u'Plus3 IT Systems'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = u'0.1'
# The full version, including alpha/beta/rc tags.
# release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme
# Add any paths that contain custom themes here, relative to this directory.
#
# html_them_path
if not rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'MothBall v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Watchmaker'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Watchmaker.tex', u'Watchmaker Documentation',
u'Plus3 IT Systems', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'watchmaker', u'Watchmaker Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Watchmaker', u'Watchmaker Documentation',
author, 'Watchmaker', 'Applied Configuration Management.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Prefix document path to section labels, otherwise autogenerated labels would
# look like 'heading' rather than 'path/to/file:heading'
autosectionlabel_prefix_document = True
linkcheck_ignore = [
r'https://github\.com/plus3it/watchmaker/compare/(\d+\.){3}\.(\.\d+){3}',
r'https://github\.com/plus3it/watchmaker/compare/(\d+\.){3}\.(\.x){3}',
r'https://watchmaker\.cloudarmor\.io/releases/.*$',
r'https://watchmaker\.cloudarmor\.io\/list\.html#.*',
r'https://docs\.saltstack\.com/en/latest/ref/modules/all/[a-z\.]*#[a-z\.]*', # noqa: E501, pylint: disable=line-too-long
]
def setup(app): # noqa: D103
app.add_css_file("theme_overrides.css")
|
plus3it/watchmaker
|
docs/conf.py
|
Python
|
apache-2.0
| 11,108
|
import sys
import warnings
from django.db.models.fields import FieldDoesNotExist
from django.utils.text import capfirst
from django.utils.encoding import smart_text
try:
from django.db.models.options import get_verbose_name
except ImportError:
from django.utils.text import camel_case_to_spaces as get_verbose_name
from mongoengine.fields import ReferenceField
class PkWrapper(object):
"""Used to wrap the Primary Key so it can mimic Django's expectations
"""
editable = False
remote_field = None
def __init__(self, wrapped):
self.obj = wrapped
def __getattr__(self, attr):
if attr in dir(self.obj):
return getattr(self.obj, attr)
raise AttributeError("{} has no {}".format(self, attr))
def __setattr__(self, attr, value):
if attr != 'obj' and hasattr(self.obj, attr):
setattr(self.obj, attr, value)
super(PkWrapper, self).__setattr__(attr, value)
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(obj.pk)
class DocumentMetaWrapper(object):
"""
Used to store mongoengine's _meta dict to make the document admin
as compatible as possible to django's meta class on models.
"""
_pk = None
pk_name = None
app_label = None
model_name = None
verbose_name = None
has_auto_field = False
object_name = None
proxy = []
virtual_fields = []
concrete_fields = []
proxied_children = []
parents = {}
many_to_many = []
swapped = False
_field_cache = None
document = None
_meta = None
def __init__(self, document):
if isinstance(document._meta, DocumentMetaWrapper):
meta = document._meta._meta
else:
meta = document._meta
self.document = document
self._meta = meta or {}
self.model = document
self.concrete_model = document
self.concrete_fields = document._fields.values()
self.fields = self.concrete_fields
try:
self.object_name = self.document.__name__
except AttributeError:
self.object_name = self.document.__class__.__name__
self.model_name = self.object_name.lower()
self.app_label = self.get_app_label()
self.verbose_name = self.get_verbose_name()
# EmbeddedDocuments don't have an id field.
try:
self.pk_name = self._meta['id_field']
self._init_pk()
except KeyError:
pass
@property
def module_name(self):
"""
This property has been deprecated in favor of `model_name`.
"""
warnings.warn(
"Options.module_name has been deprecated in favor of model_name",
PendingDeprecationWarning, stacklevel=2)
return self.model_name
def get_app_label(self):
model_module = sys.modules[self.document.__module__]
return model_module.__name__.split('.')[-2]
def get_verbose_name(self):
"""
Returns the verbose name of the document.
Checks the original meta dict first. If it is not found
then generates a verbose name from from the object name.
"""
try:
return capfirst(get_verbose_name(self._meta['verbose_name']))
except KeyError:
return capfirst(get_verbose_name(self.object_name))
@property
def verbose_name_raw(self):
return self.verbose_name
@property
def verbose_name_plural(self):
return "%ss" % self.verbose_name
@property
def pk(self):
if not hasattr(self._pk, 'attname'):
self._init_pk()
return self._pk
def get_fields(self, include_parents=True, include_hidden=False):
# XXX: simple placeholder; TODO: handle options;
return self.concrete_fields
def _init_pk(self):
"""
Adds a wrapper around the documents pk field. The wrapper object gets the attributes
django expects on the pk field, like name and attname.
The function also adds a _get_pk_val method to the document.
"""
if self.id_field is None:
return
try:
pk_field = getattr(self.document, self.id_field)
self._pk = PkWrapper(pk_field)
self._pk.name = self.id_field
self._pk.attname = self.id_field
except AttributeError:
return
def get_add_permission(self):
return 'add_%s' % self.object_name.lower()
def get_change_permission(self):
return 'change_%s' % self.object_name.lower()
def get_delete_permission(self):
return 'delete_%s' % self.object_name.lower()
def get_ordered_objects(self):
return []
def get_field_by_name(self, name):
"""
Returns the (field_object, model, direct, m2m), where field_object is
the Field instance for the given name, model is the model containing
this field (None for local fields), direct is True if the field exists
on this model, and m2m is True for many-to-many relations. When
'direct' is False, 'field_object' is the corresponding RelatedObject
for this field (since the field doesn't have an instance associated
with it).
Uses a cache internally, so after the first access, this is very fast.
"""
try:
try:
return self._field_cache[name]
except TypeError:
self._init_field_cache()
return self._field_cache[name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r'
% (self.object_name, name))
def _init_field_cache(self):
if self._field_cache is None:
self._field_cache = {}
for f in self.document._fields.values():
if isinstance(f, ReferenceField):
document = f.document_type
self._field_cache[document._meta.module_name] = (f, document, False, False)
else:
self._field_cache[f.name] = (f, None, True, False)
return self._field_cache
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
return self.get_field_by_name(name)[0]
def __getattr__(self, name):
try:
return self._meta[name]
except KeyError as e:
raise AttributeError(*e.args)
def __setattr__(self, name, value):
if not hasattr(self, name):
self._meta[name] = value
else:
super(DocumentMetaWrapper, self).__setattr__(name, value)
def __getitem__(self, key):
return self._meta[key]
def __setitem__(self, key, value):
self._meta[key] = value
def __contains__(self, key):
return key in self._meta
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def get_parent_list(self):
return []
def get_all_related_objects(self, *args, **kwargs):
return []
def iteritems(self):
return self._meta.iteritems()
def items(self):
return self._meta.items()
|
iandd0824/ri-app
|
web/django_mongoengine/forms/document_options.py
|
Python
|
apache-2.0
| 7,439
|
from typing import Any, Dict, List, Optional, Set
from annotypes import add_call_types, stringify_error
from cothread import cothread
from p4p import Value
from p4p.server import DynamicProvider, Server, ServerOperation
from p4p.server.cothread import Handler, SharedPV
from malcolm.core import (
APublished,
BlockMeta,
Controller,
Delta,
Error,
Method,
MethodModel,
Post,
ProcessPublishHook,
Put,
Response,
Return,
RLock,
Subscribe,
Unsubscribe,
method_return_unpacked,
)
from malcolm.modules import builtin
from .pvaconvert import convert_dict_to_value, convert_value_to_dict, update_path
class BlockHandler(Handler):
def __init__(self, controller: Controller, field: str = None) -> None:
self.controller = controller
# Lock to control access to self.pv
self._lock = RLock()
self.field = field
self.pv: Optional[SharedPV] = None
self.value: Value = None
self.put_paths: Set[str] = set()
def rpc(self, pv: SharedPV, op: ServerOperation) -> None:
value = op.value()
if value.getID() == "epics:nt/NTURI:1.0":
# We got an NTURI, get path from path and parameters from query
assert value.scheme == "pva", "Can only handle NTURI with scheme=pva"
prefix = self.controller.mri + "."
assert value.path.startswith(
prefix
), "NTURI path '%s' doesn't start with '%s'" % (value.path, prefix)
method = value.path[len(prefix) :]
parameters = convert_value_to_dict(value.query)
else:
# We got something else, take path from pvRequest method and our mri
# and parameters from the full value
if self.field is not None:
# We already know the method name
method = self.field
else:
# Get the path and string "value" from the put value
method = op.pvRequest().get("method")
assert method, "No 'method' in pvRequest:\n%s" % op.pvRequest()
parameters = convert_value_to_dict(value)
path = [self.controller.mri, method]
view = self.controller.block_view()[method]
assert isinstance(
view, Method
), "%s.%s is not a Method so cannot do RPC" % tuple(path)
add_wrapper = method_return_unpacked() in view.meta.tags
self.controller.log.debug(
f"{self.controller.mri}: RPC method {method} called with "
f"params {parameters}"
)
post = Post(path=path, parameters=parameters)
def handle_post_response(response: Response) -> None:
if isinstance(response, Return):
ret: Any
if add_wrapper:
# Method gave us return unpacked (bare string or other type)
# so we must wrap it in a structure to send it
ret = {"return": response.value}
else:
ret = response.value
v = convert_dict_to_value(ret)
if ret:
self.controller.log.debug(
f"{self.controller.mri}: RPC method {method} returned with "
f"value {ret}"
)
else:
self.controller.log.debug(
f"{self.controller.mri}: RPC method {method} returned"
)
op.done(v)
else:
if isinstance(response, Error):
message = stringify_error(response.message)
self.controller.log.debug(
f"{self.controller.mri}: RPC method {method} resulted in "
f"error ({message})"
)
else:
message = "BadResponse: %s" % response.to_dict()
self.controller.log.debug(
f"{self.controller.mri}: RPC method {method} got a bad "
f"response ({message})"
)
op.done(error=message)
post.set_callback(handle_post_response)
self.controller.handle_request(post).get()
def put(self, pv: SharedPV, op: ServerOperation) -> None:
path = [self.controller.mri]
# We work out what to Put by taking every field that is marked as
# changed and walking up the tree, adding every dotted field name
# to the tree on the way up. This set will contain something like:
# {"attr.value", "attr"}
# Or for a table:
# {"table.value.colA", "table.value.colB", "table.value", "table"}
# Or if self.field:
# {"value"}
changed_fields_inc_parents = op.value().changedSet(parents=True, expand=False)
# Taking the intersection with all puttable paths should yield the
# thing we want to change, so value_changed would be:
# {"attr.value"} or {"table.value"} or {"value"}
value_changed = changed_fields_inc_parents.intersection(self.put_paths)
assert (
len(value_changed) == 1
), "Can only do a Put to a single field, got %s" % list(value_changed)
changed = list(value_changed)[0]
if self.field is not None:
# Only accept a Put to "value"
assert changed == "value", "Can only put to value of %s.%s, not %s" % (
self.controller.mri,
self.field,
changed,
)
path += [self.field, "value"]
op_value = op.value()
else:
# Get the path and string "value" from the put value
split = changed.split(".")
assert (
len(split) == 2 and split[1] == "value"
), "Can only put to value of %s.%s, not %s" % (
self.controller.mri,
split[0],
split[1],
)
path += list(split)
op_value = op.value()[split[0]]
value = convert_value_to_dict(op_value)["value"]
put = Put(path=path, value=value)
def handle_put_response(response: Response) -> None:
if isinstance(response, Return):
op.done()
else:
if isinstance(response, Error):
message = stringify_error(response.message)
else:
message = "BadResponse: %s" % response.to_dict()
op.done(error=message)
put.set_callback(handle_put_response)
self.controller.handle_request(put).get()
def handle(self, response: Response) -> None:
# Called from whatever thread the child block could be in, so
# must already be a good thread to take the lock
with self._lock:
if self.pv:
# onFirstConnect has been called, should be able to update it
try:
assert isinstance(response, Delta), (
"Expecting Delta response, got %s" % response
)
# We got a delta, create or update value and notify
if self.value is None:
# Open it with the value
self.controller.log.debug("About to open")
self._create_initial_value(response)
elif self.pv.isOpen():
# Update it with values
self._update_value(response)
except Exception:
self.controller.log.debug(
f"Closing pv because of error in response {response}",
exc_info=True,
)
# We got a return or error, close the connection to clients
self.pv.close()
def _create_initial_value(self, response: Delta) -> None:
# Called with the lock taken
assert response.changes, "No changes"
assert (
len(response.changes) == 1
and len(response.changes[0]) == 2
and response.changes[0][0] == []
), "Expected root update, got %s" % (response.changes,)
self.value = convert_dict_to_value(response.changes[0][1])
unputtable_ids = (MethodModel.typeid, BlockMeta.typeid)
if not self.field:
self.put_paths = set(
"%s.value" % x
for x, v in self.value.items()
if v.getID() not in unputtable_ids
)
elif self.value.getID() not in unputtable_ids:
self.put_paths = {"value"}
else:
self.put_paths = set()
self.controller.log.debug(f"Opening with {list(self.value)}")
assert self.pv, "No pv"
self.pv.open(self.value)
def _update_value(self, delta: Delta) -> None:
# Called with the lock taken
self.value.unmark()
assert delta.changes, "No Delta changes"
for change in delta.changes:
assert len(change) == 2, "Path %s deleted" % change[0]
assert len(change[0]) > 0, "Can't handle root update %s after initial" % (
change,
)
# Path will have at least one element
path, update = change
update_path(self.value, path, update)
# No type change, post the updated value
assert self.pv, "No pv"
self.pv.post(self.value)
# Need camelCase as called by p4p Server
# noinspection PyPep8Naming
def onFirstConnect(self, pv: SharedPV) -> None:
# Store the PV, but don't open it now, let the first Delta do this
with self._lock:
self.pv = pv
path = [self.controller.mri]
if self.field is not None:
path.append(self.field)
request = Subscribe(path=path, delta=True)
request.set_callback(self.handle)
# No need to wait for first update here
self.controller.handle_request(request)
# Need camelCase as called by p4p Server
# noinspection PyPep8Naming
def onLastDisconnect(self, pv: SharedPV) -> None:
assert self.pv, "onFirstConnect not called yet"
# No-one listening, unsubscribe
with self._lock:
self.pv.close()
self.pv = None
self.value = None
request = Unsubscribe()
request.set_callback(self.handle)
self.controller.handle_request(request).get(timeout=1)
class PvaServerComms(builtin.controllers.ServerComms):
"""A class for communication between pva client and server"""
def __init__(self, mri: builtin.controllers.AMri) -> None:
super().__init__(mri)
self._pva_server = None
self._provider = None
self._published: Set[str] = set()
self._pvs: Dict[str, Dict[Optional[str], SharedPV]] = {}
# Hooks
self.register_hooked(ProcessPublishHook, self.publish)
# Need camelCase as called by p4p Server
# noinspection PyPep8Naming
def testChannel(self, channel_name: str) -> bool:
if channel_name in self._published:
# Someone is asking for a Block
return True
elif "." in channel_name:
# Someone is asking for the field of a Block
mri, field = channel_name.rsplit(".", 1)
return mri in self._published
else:
# We don't have it
return False
# Need camelCase as called by p4p Server
# noinspection PyPep8Naming
def makeChannel(self, channel_name: str, src: str) -> SharedPV:
# Need to spawn as we take a lock here and in process
return cothread.CallbackResult(
self._make_channel, channel_name, src, callback_timeout=1.0
)
def _make_channel(self, channel_name: str, src: str) -> SharedPV:
self.log.debug(f"Making PV {channel_name} for {src}")
if channel_name in self._published:
# Someone is asking for a Block
mri = channel_name
field = None
elif "." in channel_name:
# Someone is asking for the field of a Block
mri, field = channel_name.rsplit(".", 1)
else:
raise NameError("Bad channel %s" % channel_name)
with self._lock:
pvs = self._pvs.setdefault(mri, {})
try:
pv = pvs[field]
except KeyError:
assert self.process, "No attached process"
controller = self.process.get_controller(mri)
handler = BlockHandler(controller, field)
# We want any client passing a pvRequest field() to ONLY receive
# that field. The default behaviour of p4p is to send a masked
# version of the full structure. The mapperMode option allows us
# to tell p4p to send a slice instead
# https://github.com/mdavidsaver/pvDataCPP/blob/master/src/copy/pv/createRequest.h#L76
pv = SharedPV(handler=handler, options={"mapperMode": "Slice"})
pvs[field] = pv
return pv
def do_init(self):
super().do_init()
if self._pva_server is None:
self.log.info("Starting PVA server")
self._provider = DynamicProvider("PvaServerComms", self)
self._pva_server = Server(providers=[self._provider])
self.log.info("Started PVA server")
def do_disable(self):
super().do_disable()
if self._pva_server is not None:
self.log.info("Stopping PVA server")
# Stop the server
self._pva_server.stop()
# Disconnect everyone
self.disconnect_pv_clients(list(self._pvs))
# Get rid of the server reference so we can't stop again
self._pva_server = None
self.log.info("Stopped PVA server")
@add_call_types
def publish(self, published: APublished) -> None:
self._published = set(published)
if self._pva_server:
with self._lock:
mris = [mri for mri in self._pvs if mri not in published]
# Delete blocks we no longer have
self.disconnect_pv_clients(mris)
def disconnect_pv_clients(self, mris: List[str]) -> None:
"""Disconnect anyone listening to any of the given mris"""
for mri in mris:
for pv in self._pvs.pop(mri, {}).values():
# Close pv with force destroy on, this will call
# onLastDisconnect
pv.close(destroy=True, sync=True, timeout=1.0)
|
dls-controls/pymalcolm
|
malcolm/modules/pva/controllers/pvaservercomms.py
|
Python
|
apache-2.0
| 14,734
|
#!/usr/bin/python
# Copyright 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import subprocess
import requests
HEAT_PARAMS_PATH = '/etc/sysconfig/heat-params'
PUBLIC_IP_URL = 'http://169.254.169.254/latest/meta-data/public-ipv4'
CERT_DIR = '/etc/docker'
CERT_CONF_DIR = '%s/conf' % CERT_DIR
CA_CERT_PATH = '%s/ca.crt' % CERT_DIR
SERVER_CONF_PATH = '%s/server.conf' % CERT_CONF_DIR
SERVER_KEY_PATH = '%s/server.key' % CERT_DIR
SERVER_CSR_PATH = '%s/server.csr' % CERT_DIR
SERVER_CERT_PATH = '%s/server.crt' % CERT_DIR
CSR_CONFIG_TEMPLATE = """
[req]
distinguished_name = req_distinguished_name
req_extensions = req_ext
x509_extensions = req_ext
prompt = no
copy_extensions = copyall
[req_distinguished_name]
CN = swarm.invalid
[req_ext]
subjectAltName = %(subject_alt_names)s
extendedKeyUsage = clientAuth,serverAuth
"""
def _parse_config_value(value):
parsed_value = value
if parsed_value[-1] == '\n':
parsed_value = parsed_value[:-1]
return parsed_value[1:-1]
def load_config():
config = dict()
with open(HEAT_PARAMS_PATH, 'r') as fp:
for line in fp.readlines():
key, value = line.split('=', 1)
config[key] = _parse_config_value(value)
return config
def create_dirs():
os.makedirs(CERT_CONF_DIR)
def _get_public_ip():
return requests.get(PUBLIC_IP_URL).text
def _build_subject_alt_names(config):
subject_alt_names = [
'IP:%s' % _get_public_ip(),
'IP:%s' % config['SWARM_NODE_IP'],
'IP:127.0.0.1'
]
return ','.join(subject_alt_names)
def write_ca_cert(config):
bay_cert_url = '%s/certificates/%s' % (config['MAGNUM_URL'],
config['BAY_UUID'])
headers = {'X-Auth-Token': config['USER_TOKEN']}
ca_cert_resp = requests.get(bay_cert_url,
headers=headers)
with open(CA_CERT_PATH, 'w') as fp:
fp.write(ca_cert_resp.json()['pem'])
def write_server_key():
subprocess.call(['openssl', 'genrsa',
'-out', SERVER_KEY_PATH,
'4096'])
def _write_csr_config(config):
with open(SERVER_CONF_PATH, 'w') as fp:
params = {
'subject_alt_names': _build_subject_alt_names(config)
}
fp.write(CSR_CONFIG_TEMPLATE % params)
def create_server_csr(config):
_write_csr_config(config)
subprocess.call(['openssl', 'req', '-new',
'-days', '1000',
'-key', SERVER_KEY_PATH,
'-out', SERVER_CSR_PATH,
'-reqexts', 'req_ext',
'-extensions', 'req_ext',
'-config', SERVER_CONF_PATH])
with open(SERVER_CSR_PATH, 'r') as fp:
return {'bay_uuid': config['BAY_UUID'], 'csr': fp.read()}
def write_server_cert(config, csr_req):
cert_url = '%s/certificates' % config['MAGNUM_URL']
headers = {
'Content-Type': 'application/json',
'X-Auth-Token': config['USER_TOKEN']
}
csr_resp = requests.post(cert_url,
data=json.dumps(csr_req),
headers=headers)
with open(SERVER_CERT_PATH, 'w') as fp:
fp.write(csr_resp.json()['pem'])
def main():
config = load_config()
if config['INSECURE'] == 'False':
create_dirs()
write_ca_cert(config)
write_server_key()
csr_req = create_server_csr(config)
write_server_cert(config, csr_req)
if __name__ == '__main__':
main()
|
eshijia/magnum
|
magnum/templates/docker-swarm/fragments/make-cert.py
|
Python
|
apache-2.0
| 4,072
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF framework to train and evaluate the NeuMF model.
The NeuMF model assembles both MF and MLP models under the NCF framework. Check
`neumf_model.py` for more details about the models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
# pylint: disable=g-bad-import-order
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
from official.recommendation import constants as rconst
from official.recommendation import movielens
from official.recommendation import ncf_common
from official.recommendation import ncf_input_pipeline
from official.recommendation import neumf_model
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
from official.utils.misc import model_helpers
FLAGS = flags.FLAGS
def metric_fn(logits, dup_mask, match_mlperf):
dup_mask = tf.cast(dup_mask, tf.float32)
logits = tf.slice(logits, [0, 1], [-1, -1])
in_top_k, _, metric_weights, _ = neumf_model.compute_top_k_and_ndcg(
logits,
dup_mask,
match_mlperf)
metric_weights = tf.cast(metric_weights, tf.float32)
return in_top_k, metric_weights
class MetricLayer(tf.keras.layers.Layer):
"""Custom layer of metrics for NCF model."""
def __init__(self, match_mlperf):
super(MetricLayer, self).__init__()
self.match_mlperf = match_mlperf
def get_config(self):
return {"match_mlperf": self.match_mlperf}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def call(self, inputs, training=False):
logits, dup_mask = inputs
if training:
hr_sum = 0.0
hr_count = 0.0
else:
metric, metric_weights = metric_fn(logits, dup_mask, self.match_mlperf)
hr_sum = tf.reduce_sum(metric * metric_weights)
hr_count = tf.reduce_sum(metric_weights)
self.add_metric(hr_sum, name="hr_sum", aggregation="mean")
self.add_metric(hr_count, name="hr_count", aggregation="mean")
return logits
class LossLayer(tf.keras.layers.Layer):
"""Pass-through loss layer for NCF model."""
def __init__(self, loss_normalization_factor):
# The loss may overflow in float16, so we use float32 instead.
super(LossLayer, self).__init__(dtype="float32")
self.loss_normalization_factor = loss_normalization_factor
self.loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction="sum")
def get_config(self):
return {"loss_normalization_factor": self.loss_normalization_factor}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def call(self, inputs):
logits, labels, valid_pt_mask_input = inputs
loss = self.loss(
y_true=labels, y_pred=logits, sample_weight=valid_pt_mask_input)
loss = loss * (1.0 / self.loss_normalization_factor)
self.add_loss(loss)
return logits
class IncrementEpochCallback(tf.keras.callbacks.Callback):
"""A callback to increase the requested epoch for the data producer.
The reason why we need this is because we can only buffer a limited amount of
data. So we keep a moving window to represent the buffer. This is to move the
one of the window's boundaries for each epoch.
"""
def __init__(self, producer):
self._producer = producer
def on_epoch_begin(self, epoch, logs=None):
self._producer.increment_request_epoch()
class CustomEarlyStopping(tf.keras.callbacks.Callback):
"""Stop training has reached a desired hit rate."""
def __init__(self, monitor, desired_value):
super(CustomEarlyStopping, self).__init__()
self.monitor = monitor
self.desired = desired_value
self.stopped_epoch = 0
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current and current >= self.desired:
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning("Early stopping conditioned on metric `%s` "
"which is not available. Available metrics are: %s",
self.monitor, ",".join(list(logs.keys())))
return monitor_value
def _get_keras_model(params):
"""Constructs and returns the model."""
batch_size = params["batch_size"]
user_input = tf.keras.layers.Input(
shape=(1,), name=movielens.USER_COLUMN, dtype=tf.int32)
item_input = tf.keras.layers.Input(
shape=(1,), name=movielens.ITEM_COLUMN, dtype=tf.int32)
valid_pt_mask_input = tf.keras.layers.Input(
shape=(1,), name=rconst.VALID_POINT_MASK, dtype=tf.bool)
dup_mask_input = tf.keras.layers.Input(
shape=(1,), name=rconst.DUPLICATE_MASK, dtype=tf.int32)
label_input = tf.keras.layers.Input(
shape=(1,), name=rconst.TRAIN_LABEL_KEY, dtype=tf.bool)
base_model = neumf_model.construct_model(user_input, item_input, params)
logits = base_model.output
zeros = tf.keras.layers.Lambda(
lambda x: x * 0)(logits)
softmax_logits = tf.keras.layers.concatenate(
[zeros, logits],
axis=-1)
# Custom training loop calculates loss and metric as a part of
# training/evaluation step function.
if not params["keras_use_ctl"]:
softmax_logits = MetricLayer(
params["match_mlperf"])([softmax_logits, dup_mask_input])
# TODO(b/134744680): Use model.add_loss() instead once the API is well
# supported.
softmax_logits = LossLayer(batch_size)(
[softmax_logits, label_input, valid_pt_mask_input])
keras_model = tf.keras.Model(
inputs={
movielens.USER_COLUMN: user_input,
movielens.ITEM_COLUMN: item_input,
rconst.VALID_POINT_MASK: valid_pt_mask_input,
rconst.DUPLICATE_MASK: dup_mask_input,
rconst.TRAIN_LABEL_KEY: label_input},
outputs=softmax_logits)
keras_model.summary()
return keras_model
def run_ncf(_):
"""Run NCF training and eval with Keras."""
keras_utils.set_session_config(enable_xla=FLAGS.enable_xla)
if FLAGS.seed is not None:
print("Setting tf seed")
tf.random.set_seed(FLAGS.seed)
model_helpers.apply_clean(FLAGS)
if FLAGS.dtype == "fp16" and FLAGS.fp16_implementation == "keras":
policy = tf.keras.mixed_precision.experimental.Policy(
"mixed_float16",
loss_scale=flags_core.get_loss_scale(FLAGS, default_for_fp16="dynamic"))
tf.keras.mixed_precision.experimental.set_policy(policy)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
params = ncf_common.parse_flags(FLAGS)
params["distribute_strategy"] = strategy
if params["use_tpu"] and not params["keras_use_ctl"]:
logging.error("Custom training loop must be used when using TPUStrategy.")
return
batch_size = params["batch_size"]
time_callback = keras_utils.TimeHistory(batch_size, FLAGS.log_steps)
callbacks = [time_callback]
producer, input_meta_data = None, None
generate_input_online = params["train_dataset_path"] is None
if generate_input_online:
# Start data producing thread.
num_users, num_items, _, _, producer = ncf_common.get_inputs(params)
producer.start()
per_epoch_callback = IncrementEpochCallback(producer)
callbacks.append(per_epoch_callback)
else:
assert params["eval_dataset_path"] and params["input_meta_data_path"]
with tf.io.gfile.GFile(params["input_meta_data_path"], "rb") as reader:
input_meta_data = json.loads(reader.read().decode("utf-8"))
num_users = input_meta_data["num_users"]
num_items = input_meta_data["num_items"]
params["num_users"], params["num_items"] = num_users, num_items
if FLAGS.early_stopping:
early_stopping_callback = CustomEarlyStopping(
"val_HR_METRIC", desired_value=FLAGS.hr_threshold)
callbacks.append(early_stopping_callback)
(train_input_dataset, eval_input_dataset,
num_train_steps, num_eval_steps) = \
(ncf_input_pipeline.create_ncf_input_data(
params, producer, input_meta_data, strategy))
steps_per_epoch = None if generate_input_online else num_train_steps
with distribution_utils.get_strategy_scope(strategy):
keras_model = _get_keras_model(params)
optimizer = tf.keras.optimizers.Adam(
learning_rate=params["learning_rate"],
beta_1=params["beta1"],
beta_2=params["beta2"],
epsilon=params["epsilon"])
if FLAGS.fp16_implementation == "graph_rewrite":
optimizer = \
tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite(
optimizer,
loss_scale=flags_core.get_loss_scale(FLAGS,
default_for_fp16="dynamic"))
elif FLAGS.dtype == "fp16" and params["keras_use_ctl"]:
# When keras_use_ctl is False, instead Model.fit() automatically applies
# loss scaling so we don't need to create a LossScaleOptimizer.
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
optimizer,
tf.keras.mixed_precision.experimental.global_policy().loss_scale)
if params["keras_use_ctl"]:
train_loss, eval_results = run_ncf_custom_training(
params,
strategy,
keras_model,
optimizer,
callbacks,
train_input_dataset,
eval_input_dataset,
num_train_steps,
num_eval_steps,
generate_input_online=generate_input_online)
else:
keras_model.compile(optimizer=optimizer, run_eagerly=FLAGS.run_eagerly)
if not FLAGS.ml_perf:
# Create Tensorboard summary and checkpoint callbacks.
summary_dir = os.path.join(FLAGS.model_dir, "summaries")
summary_callback = tf.keras.callbacks.TensorBoard(summary_dir)
checkpoint_path = os.path.join(FLAGS.model_dir, "checkpoint")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, save_weights_only=True)
callbacks += [summary_callback, checkpoint_callback]
history = keras_model.fit(
train_input_dataset,
epochs=FLAGS.train_epochs,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
validation_data=eval_input_dataset,
validation_steps=num_eval_steps,
verbose=2)
logging.info("Training done. Start evaluating")
eval_loss_and_metrics = keras_model.evaluate(
eval_input_dataset, steps=num_eval_steps, verbose=2)
logging.info("Keras evaluation is done.")
# Keras evaluate() API returns scalar loss and metric values from
# evaluation as a list. Here, the returned list would contain
# [evaluation loss, hr sum, hr count].
eval_hit_rate = eval_loss_and_metrics[1] / eval_loss_and_metrics[2]
# Format evaluation result into [eval loss, eval hit accuracy].
eval_results = [eval_loss_and_metrics[0], eval_hit_rate]
if history and history.history:
train_history = history.history
train_loss = train_history["loss"][-1]
stats = build_stats(train_loss, eval_results, time_callback)
return stats
def run_ncf_custom_training(params,
strategy,
keras_model,
optimizer,
callbacks,
train_input_dataset,
eval_input_dataset,
num_train_steps,
num_eval_steps,
generate_input_online=True):
"""Runs custom training loop.
Args:
params: Dictionary containing training parameters.
strategy: Distribution strategy to be used for distributed training.
keras_model: Model used for training.
optimizer: Optimizer used for training.
callbacks: Callbacks to be invoked between batches/epochs.
train_input_dataset: tf.data.Dataset used for training.
eval_input_dataset: tf.data.Dataset used for evaluation.
num_train_steps: Total number of steps to run for training.
num_eval_steps: Total number of steps to run for evaluation.
generate_input_online: Whether input data was generated by data producer.
When data is generated by data producer, then train dataset must be
re-initialized after every epoch.
Returns:
A tuple of train loss and a list of training and evaluation results.
"""
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
reduction="sum", from_logits=True)
train_input_iterator = iter(
strategy.experimental_distribute_dataset(train_input_dataset))
def train_step(train_iterator):
"""Called once per step to train the model."""
def step_fn(features):
"""Computes loss and applied gradient per replica."""
with tf.GradientTape() as tape:
softmax_logits = keras_model(features)
# The loss can overflow in float16, so we cast to float32.
softmax_logits = tf.cast(softmax_logits, "float32")
labels = features[rconst.TRAIN_LABEL_KEY]
loss = loss_object(
labels,
softmax_logits,
sample_weight=features[rconst.VALID_POINT_MASK])
loss *= (1.0 / params["batch_size"])
if FLAGS.dtype == "fp16":
loss = optimizer.get_scaled_loss(loss)
grads = tape.gradient(loss, keras_model.trainable_variables)
if FLAGS.dtype == "fp16":
grads = optimizer.get_unscaled_gradients(grads)
# Converting gradients to dense form helps in perf on GPU for NCF
grads = neumf_model.sparse_to_dense_grads(
list(zip(grads, keras_model.trainable_variables)))
optimizer.apply_gradients(grads)
return loss
per_replica_losses = strategy.run(
step_fn, args=(next(train_iterator),))
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
def eval_step(eval_iterator):
"""Called once per eval step to compute eval metrics."""
def step_fn(features):
"""Computes eval metrics per replica."""
softmax_logits = keras_model(features)
in_top_k, metric_weights = metric_fn(softmax_logits,
features[rconst.DUPLICATE_MASK],
params["match_mlperf"])
hr_sum = tf.reduce_sum(in_top_k * metric_weights)
hr_count = tf.reduce_sum(metric_weights)
return hr_sum, hr_count
per_replica_hr_sum, per_replica_hr_count = (
strategy.run(
step_fn, args=(next(eval_iterator),)))
hr_sum = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_hr_sum, axis=None)
hr_count = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_hr_count, axis=None)
return hr_sum, hr_count
if not FLAGS.run_eagerly:
train_step = tf.function(train_step)
eval_step = tf.function(eval_step)
for callback in callbacks:
callback.on_train_begin()
# Not writing tensorboard summaries if running in MLPerf.
if FLAGS.ml_perf:
eval_summary_writer, train_summary_writer = None, None
else:
summary_dir = os.path.join(FLAGS.model_dir, "summaries")
eval_summary_writer = tf.summary.create_file_writer(
os.path.join(summary_dir, "eval"))
train_summary_writer = tf.summary.create_file_writer(
os.path.join(summary_dir, "train"))
train_loss = 0
for epoch in range(FLAGS.train_epochs):
for cb in callbacks:
cb.on_epoch_begin(epoch)
# As NCF dataset is sampled with randomness, not repeating
# data elements in each epoch has significant impact on
# convergence. As so, offline-generated TF record files
# contains all epoch worth of data. Thus we do not need
# to initialize dataset when reading from tf record files.
if generate_input_online:
train_input_iterator = iter(
strategy.experimental_distribute_dataset(train_input_dataset))
train_loss = 0
for step in range(num_train_steps):
current_step = step + epoch * num_train_steps
for c in callbacks:
c.on_batch_begin(current_step)
train_loss += train_step(train_input_iterator)
# Write train loss once in every 1000 steps.
if train_summary_writer and step % 1000 == 0:
with train_summary_writer.as_default():
tf.summary.scalar("training_loss", train_loss/(step + 1),
step=current_step)
for c in callbacks:
c.on_batch_end(current_step)
train_loss /= num_train_steps
logging.info("Done training epoch %s, epoch loss=%s.", epoch + 1,
train_loss)
eval_input_iterator = iter(
strategy.experimental_distribute_dataset(eval_input_dataset))
hr_sum = 0
hr_count = 0
for _ in range(num_eval_steps):
step_hr_sum, step_hr_count = eval_step(eval_input_iterator)
hr_sum += step_hr_sum
hr_count += step_hr_count
logging.info("Done eval epoch %s, hit_rate=%s.", epoch + 1,
hr_sum / hr_count)
if eval_summary_writer:
with eval_summary_writer.as_default():
tf.summary.scalar("hit_rate", hr_sum / hr_count, step=current_step)
if (FLAGS.early_stopping and
float(hr_sum / hr_count) > params["hr_threshold"]):
break
for c in callbacks:
c.on_train_end()
# Saving the model at the end of training.
if not FLAGS.ml_perf:
checkpoint = tf.train.Checkpoint(model=keras_model, optimizer=optimizer)
checkpoint_path = os.path.join(FLAGS.model_dir, "ctl_checkpoint")
checkpoint.save(checkpoint_path)
logging.info("Saving model as TF checkpoint: %s", checkpoint_path)
return train_loss, [None, hr_sum / hr_count]
def build_stats(loss, eval_result, time_callback):
"""Normalizes and returns dictionary of stats.
Args:
loss: The final loss at training time.
eval_result: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
time_callback: Time tracking callback likely used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if loss:
stats["loss"] = loss
if eval_result:
stats["eval_loss"] = eval_result[0]
stats["eval_hit_rate"] = eval_result[1]
if time_callback:
timestamp_log = time_callback.timestamp_log
stats["step_timestamp_log"] = timestamp_log
stats["train_finish_time"] = time_callback.train_finish_time
if len(timestamp_log) > 1:
stats["avg_exp_per_second"] = (
time_callback.batch_size * time_callback.log_steps *
(len(time_callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
def main(_):
logging.info("Result is %s", run_ncf(FLAGS))
if __name__ == "__main__":
ncf_common.define_ncf_flags()
app.run(main)
|
tombstone/models
|
official/recommendation/ncf_keras_main.py
|
Python
|
apache-2.0
| 19,975
|
import pathlib
import sys
import attr
import pytest
import salt.channel.server
import salt.ext.tornado.gen
import salt.transport.ipc
import salt.utils.platform
from salt.ext.tornado import locks
pytestmark = [
# Windows does not support POSIX IPC
pytest.mark.skip_on_windows,
pytest.mark.skipif(
sys.version_info < (3, 6), reason="The IOLoop blocks under Py3.5 on these tests"
),
]
@attr.s(frozen=True, slots=True)
class PayloadHandler:
payloads = attr.ib(init=False, default=attr.Factory(list))
async def handle_payload(self, payload, reply_func):
self.payloads.append(payload)
await reply_func(payload)
def __enter__(self):
return self
def __exit__(self, *args):
self.payloads.clear()
@attr.s(frozen=True, slots=True)
class IPCTester:
io_loop = attr.ib()
socket_path = attr.ib()
publisher = attr.ib()
subscriber = attr.ib()
payloads = attr.ib(default=attr.Factory(list))
payload_ack = attr.ib(default=attr.Factory(locks.Condition))
@subscriber.default
def _subscriber_default(self):
return salt.transport.ipc.IPCMessageSubscriber(
self.socket_path,
io_loop=self.io_loop,
)
@publisher.default
def _publisher_default(self):
return salt.transport.ipc.IPCMessagePublisher(
{"ipc_write_buffer": 0},
self.socket_path,
io_loop=self.io_loop,
)
async def handle_payload(self, payload, reply_func):
self.payloads.append(payload)
await reply_func(payload)
self.payload_ack.notify()
def new_client(self):
return IPCTester(
io_loop=self.io_loop,
socket_path=self.socket_path,
server=self.server,
payloads=self.payloads,
payload_ack=self.payload_ack,
)
async def publish(self, payload, timeout=60):
self.publisher.publish(payload)
async def read(self, timeout=60):
ret = await self.subscriber.read(timeout)
return ret
def __enter__(self):
self.publisher.start()
self.io_loop.add_callback(self.subscriber.connect)
return self
def __exit__(self, *args):
self.subscriber.close()
self.publisher.close()
@pytest.fixture
def ipc_socket_path(tmp_path):
if salt.utils.platform.is_darwin():
# A shorter path so that we don't hit the AF_UNIX path too long
tmp_path = pathlib.Path("/tmp").resolve()
_socket_path = tmp_path / "ipc-test.ipc"
try:
yield _socket_path
finally:
if _socket_path.exists():
_socket_path.unlink()
@pytest.fixture
def channel(io_loop, ipc_socket_path):
_ipc_tester = IPCTester(io_loop=io_loop, socket_path=str(ipc_socket_path))
with _ipc_tester:
yield _ipc_tester
async def test_basic_send(channel):
msg = {"foo": "bar", "stop": True}
# XXX: IPCClient connect and connected methods need to be cleaned up as
# this should not be needed.
while not channel.subscriber._connecting_future.done():
await salt.ext.tornado.gen.sleep(0.01)
while not channel.subscriber.connected():
await salt.ext.tornado.gen.sleep(0.01)
assert channel.subscriber.connected()
await channel.publish(msg)
ret = await channel.read()
assert ret == msg
|
saltstack/salt
|
tests/pytests/functional/transport/ipc/test_subscriber.py
|
Python
|
apache-2.0
| 3,365
|
"""Deletes a global load balancer rule."""
from baseCmd import *
from baseResponse import *
class deleteGlobalLoadBalancerRuleCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the ID of the global load balancer rule"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
self.required = ["id", ]
class deleteGlobalLoadBalancerRuleResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""any text associated with the success or failure"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""true if operation is executed successfully"""
self.success = None
self.typeInfo['success'] = 'boolean'
|
MissionCriticalCloud/marvin
|
marvin/cloudstackAPI/deleteGlobalLoadBalancerRule.py
|
Python
|
apache-2.0
| 763
|
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class DNSDriver(object):
"""Defines the DNS manager interface. Does nothing. """
def __init__(self):
pass
def get_domains(self):
raise NotImplementedError()
def create_entry(self, _name, _address, _type, _domain):
raise NotImplementedError()
def delete_entry(self, _name, _domain):
raise NotImplementedError()
def modify_address(self, _name, _address, _domain):
raise NotImplementedError()
def get_entries_by_address(self, _address, _domain):
raise NotImplementedError()
def get_entries_by_name(self, _name, _domain):
raise NotImplementedError()
def create_domain(self, _fqdomain):
raise NotImplementedError()
def delete_domain(self, _fqdomain):
raise NotImplementedError()
|
fajoy/nova
|
nova/network/dns_driver.py
|
Python
|
apache-2.0
| 1,432
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module is a special module to define functions or other resources
which need to be imported outside of openstack_dashboard.api.nova
(like cinder.py) to avoid cyclic imports.
"""
from django.conf import settings
from glanceclient import exc as glance_exceptions
from novaclient import api_versions
from novaclient import client as nova_client
from horizon import exceptions as horizon_exceptions
from horizon.utils import memoized
from openstack_dashboard.api import base
from openstack_dashboard.api import glance
from openstack_dashboard.api import microversions
from openstack_dashboard.contrib.developer.profiler import api as profiler
# Supported compute versions
VERSIONS = base.APIVersionManager("compute", preferred_version=2)
VERSIONS.load_supported_version(1.1, {"client": nova_client, "version": 1.1})
VERSIONS.load_supported_version(2, {"client": nova_client, "version": 2})
INSECURE = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
CACERT = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
class Server(base.APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server.
Preserves the request info so image name can later be retrieved.
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links', 'description',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name', 'fault',
'tenant_id', 'user_id', 'created', 'locked',
'OS-EXT-STS:power_state', 'OS-EXT-STS:task_state',
'OS-EXT-SRV-ATTR:instance_name', 'OS-EXT-SRV-ATTR:host',
'OS-EXT-AZ:availability_zone', 'OS-DCF:diskConfig']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
# TODO(gabriel): deprecate making a call to Glance as a fallback.
@property
def image_name(self):
if not self.image:
return None
elif hasattr(self.image, 'name'):
return self.image.name
elif 'name' in self.image:
return self.image['name']
else:
try:
image = glance.image_get(self.request, self.image['id'])
self.image['name'] = image.name
return image.name
except (glance_exceptions.ClientException,
horizon_exceptions.ServiceCatalogException):
self.image['name'] = None
return None
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
@property
def availability_zone(self):
return getattr(self, 'OS-EXT-AZ:availability_zone', "")
@property
def host_server(self):
return getattr(self, 'OS-EXT-SRV-ATTR:host', '')
@memoized.memoized
def get_microversion(request, features):
client = novaclient(request)
min_ver, max_ver = api_versions._get_server_version_range(client)
return (microversions.get_microversion_for_features(
'nova', features, api_versions.APIVersion, min_ver, max_ver))
def get_auth_params_from_request(request):
"""Extracts properties needed by novaclient call from the request object.
These will be used to memoize the calls to novaclient.
"""
return (
request.user.username,
request.user.token.id,
request.user.tenant_id,
request.user.token.project.get('domain_id'),
base.url_for(request, 'compute'),
base.url_for(request, 'identity')
)
@memoized.memoized
def cached_novaclient(request, version=None):
(
username,
token_id,
project_id,
project_domain_id,
nova_url,
auth_url
) = get_auth_params_from_request(request)
if version is None:
version = VERSIONS.get_active_version()['version']
c = nova_client.Client(version,
username,
token_id,
project_id=project_id,
project_domain_id=project_domain_id,
auth_url=auth_url,
insecure=INSECURE,
cacert=CACERT,
http_log_debug=settings.DEBUG,
auth_token=token_id,
endpoint_override=nova_url)
return c
def novaclient(request, version=None):
if isinstance(version, api_versions.APIVersion):
version = version.get_string()
return cached_novaclient(request, version)
def get_novaclient_with_instance_desc(request):
microversion = get_microversion(request, "instance_description")
return novaclient(request, version=microversion)
@profiler.trace
def server_get(request, instance_id):
return Server(get_novaclient_with_instance_desc(request).servers.get(
instance_id), request)
|
NeCTAR-RC/horizon
|
openstack_dashboard/api/_nova.py
|
Python
|
apache-2.0
| 5,498
|
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from ...models import BlogEntry, RelatedBlog, Feed
from ...parser import get_all_entries, update_blog_supernav
class Command(BaseCommand):
""" Update blog entries and related blog feed data """
def handle(self, **options):
for feed in Feed.objects.all():
entries = get_all_entries(feed.feed_url)
for entry in entries:
try:
e = BlogEntry.objects.get(url=entry['url'])
# Update our info if it's changed
if e.pub_date < entry['pub_date']:
e.title = entry['title']
e.summary = entry['summary']
e.pub_date = entry['pub_date']
e.save()
except BlogEntry.DoesNotExist:
BlogEntry.objects.create(
title=entry['title'],
summary=entry['summary'],
pub_date=entry['pub_date'],
url=entry['url'],
feed=feed,
)
feed.last_import = now()
feed.save()
# Update the supernav box with the latest entry's info
update_blog_supernav()
# Update Related Blogs
for blog in RelatedBlog.objects.all():
blog.update_blog_data()
|
Mariatta/pythondotorg
|
blogs/management/commands/update_blogs.py
|
Python
|
apache-2.0
| 1,451
|
import logging
import random
import string
import sys
from oslo.config import cfg
# Logging setup
logger = logging.getLogger(__name__)
stdout = logging.StreamHandler(sys.stdout)
stdout.setLevel(logging.DEBUG)
logger.addHandler(stdout)
logger.setLevel(logging.DEBUG)
default_opts = [
cfg.StrOpt('working_dir',
default='/opt/docstack',
help="The base path to use for docstack."),
]
# Option Definitions
infrastructure_opts = [
cfg.StrOpt('sql_backend',
default='mysql',
choices=['mysql', 'postgresql'],
help="The sql backend to use."),
cfg.StrOpt('sql_host',
default='127.0.0.1',
help="The host for the sql backend."),
cfg.StrOpt('sql_user',
default='mysql',
help="The user for the sql backend."),
cfg.StrOpt('sql_password',
default='',
help="Password for the sql backend."),
cfg.StrOpt('queue_backend',
default='rabbit',
choices=['rabbit', 'qpid', 'zeromq'],
help="The shared queue to use."),
cfg.StrOpt('queue_host',
default='127.0.0.1',
help="The host for the queue backend."),
cfg.StrOpt('queue_user',
default='rabbit',
help="The user for the queue backend."),
cfg.StrOpt('queue_password',
default='',
help="Password for the sql backend."),
]
def generate_password(length):
chars = ''.join([string.lowercase, string.uppercase, "1234567890"])
choice = random.SystemRandom().choice
return ''.join((choice(chars) for i in range(length)))
def parse():
conf = cfg.ConfigOpts()
conf(project='docstack', prog='docstack')
# Base options
conf.register_opts(default_opts)
# Infrastructure
infrastructure_group = cfg.OptGroup(name="infrastructure",
title="Infrastructure Services")
conf.register_group(infrastructure_group)
conf.register_opts(infrastructure_opts, infrastructure_group)
conf.set_default('sql_password', generate_password(12), 'infrastructure')
conf.set_default('queue_password', generate_password(12), 'infrastructure')
conf.reload_config_files()
# Log it all out
conf.log_opt_values(logger, logging.INFO)
return conf
|
jarretraim/py-docstack
|
docstack/config.py
|
Python
|
apache-2.0
| 2,382
|
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Bob Callaway. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp NFS storage.
"""
import os
import uuid
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as na_api
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
"""NetApp NFS driver for Data ONTAP (Cluster-mode)."""
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
def __init__(self, *args, **kwargs):
super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
def do_setup(self, context):
"""Do the customized set up on client for cluster mode."""
super(NetAppCmodeNfsDriver, self).do_setup(context)
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
self.vserver = self.configuration.netapp_vserver
self.zapi_client = client_cmode.Client(
transport_type=self.configuration.netapp_transport_type,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port,
vserver=self.vserver)
self.ssc_enabled = True
self.ssc_vols = None
self.stale_vols = set()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
super(NetAppCmodeNfsDriver, self).check_for_setup_error()
ssc_cmode.check_ssc_api_permissions(self.zapi_client)
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
"""
LOG.debug('create_volume on %s' % volume['host'])
self._ensure_shares_mounted()
# get share as pool name
share = volume_utils.extract_host(volume['host'], level='pool')
if share is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
extra_specs = na_utils.get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
# warn on obsolete extra specs
na_utils.log_extra_spec_warnings(extra_specs)
try:
volume['provider_location'] = share
LOG.info(_LI('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
if qos_policy_group:
self._set_qos_policy_group_on_volume(volume, share,
qos_policy_group)
return {'provider_location': volume['provider_location']}
except Exception as ex:
LOG.error(_LW("Exception creating vol %(name)s on "
"share %(share)s. Details: %(ex)s")
% {'name': volume['name'],
'share': volume['provider_location'],
'ex': ex})
volume['provider_location'] = None
finally:
if self.ssc_enabled:
self._update_stale_vols(self._get_vol_for_share(share))
msg = _("Volume %s could not be created on shares.")
raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
def _set_qos_policy_group_on_volume(self, volume, share, qos_policy_group):
target_path = '%s' % (volume['name'])
export_path = share.split(':')[1]
flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver,
export_path)
self.zapi_client.file_assign_qos(flex_vol_name,
qos_policy_group,
target_path)
def _check_volume_type(self, volume, share, file_name):
"""Match volume type for share file."""
extra_specs = na_utils.get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
if not self._is_share_vol_type_match(volume, share):
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("Volume type does not match for share %s."),
share))
if qos_policy_group:
try:
vserver, flex_vol_name = self._get_vserver_and_exp_vol(
share=share)
self.zapi_client.file_assign_qos(flex_vol_name,
qos_policy_group,
file_name)
except na_api.NaApiError as ex:
LOG.exception(_LE('Setting file QoS policy group failed. %s'),
ex)
raise exception.NetAppDriverException(
reason=(_('Setting file QoS policy group failed. %s'), ex))
def _clone_volume(self, volume_name, clone_name,
volume_id, share=None):
"""Clones mounted volume on NetApp Cluster."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
self.zapi_client.clone_file(exp_volume, volume_name, clone_name,
vserver)
share = share if share else self._get_provider_location(volume_id)
self._post_prov_deprov_in_ssc(share)
def _get_vserver_and_exp_vol(self, volume_id=None, share=None):
"""Gets the vserver and export volume for share."""
(host_ip, export_path) = self._get_export_ip_path(volume_id, share)
ifs = self.zapi_client.get_if_info_by_ip(host_ip)
vserver = ifs[0].get_child_content('vserver')
exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver,
export_path)
return vserver, exp_volume
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
self._ensure_shares_mounted()
sync = True if self.ssc_vols is None else False
ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.connection,
self.vserver, synchronous=sync)
LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_NFS_Cluster_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or netapp_backend
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'nfs'
data['pools'] = self._get_pool_stats()
self._spawn_clean_cache_job()
self.zapi_client.provide_ems(self, netapp_backend, self._app_version)
self._stats = data
def _get_pool_stats(self):
"""Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
pools = []
for nfs_share in self._mounted_shares:
capacity = self._get_extended_capacity_info(nfs_share)
pool = dict()
pool['pool_name'] = nfs_share
pool['QoS_support'] = False
pool['reserved_percentage'] = 0
# Report pool as reserved when over the configured used_ratio
if capacity['used_ratio'] > self.configuration.nfs_used_ratio:
pool['reserved_percentage'] = 100
# Report pool as reserved when over the subscribed ratio
if capacity['subscribed_ratio'] >=\
self.configuration.nfs_oversub_ratio:
pool['reserved_percentage'] = 100
# convert sizes to GB
total = float(capacity['apparent_size']) / units.Gi
pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
free = float(capacity['apparent_available']) / units.Gi
pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
# add SSC content if available
vol = self._get_vol_for_share(nfs_share)
if vol and self.ssc_vols:
pool['netapp_raid_type'] = vol.aggr['raid_type']
pool['netapp_disk_type'] = vol.aggr['disk_type']
mirrored = vol in self.ssc_vols['mirrored']
pool['netapp_mirrored'] = six.text_type(mirrored).lower()
pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
dedup = vol in self.ssc_vols['dedup']
pool['netapp_dedup'] = six.text_type(dedup).lower()
pool['netapp_nodedup'] = six.text_type(not dedup).lower()
compression = vol in self.ssc_vols['compression']
pool['netapp_compression'] = six.text_type(compression).lower()
pool['netapp_nocompression'] = six.text_type(
not compression).lower()
thin = vol in self.ssc_vols['thin']
pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
pool['netapp_thick_provisioned'] = six.text_type(
not thin).lower()
pools.append(pool)
return pools
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy."""
if volume:
self.stale_vols.add(volume)
set_copy = self.stale_vols.copy()
if reset:
self.stale_vols.clear()
return set_copy
@utils.synchronized("refresh_ssc_vols")
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
if not self._mounted_shares:
LOG.warning(_LW("No shares found hence skipping ssc refresh."))
return
mnt_share_vols = set()
vs_ifs = self.zapi_client.get_vserver_ips(self.vserver)
for vol in vols['all']:
for sh in self._mounted_shares:
host = sh.split(':')[0]
junction = sh.split(':')[1]
ip = na_utils.resolve_hostname(host)
if (self._ip_in_ifs(ip, vs_ifs) and
junction == vol.id['junction_path']):
mnt_share_vols.add(vol)
vol.export['path'] = sh
break
for key in vols.keys():
vols[key] = vols[key] & mnt_share_vols
self.ssc_vols = vols
def _ip_in_ifs(self, ip, api_ifs):
"""Checks if ip is listed for ifs in API format."""
if api_ifs is None:
return False
for ifc in api_ifs:
ifc_ip = ifc.get_child_content("address")
if ifc_ip == ip:
return True
return False
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
(vserver, exp_volume) = self._get_vserver_and_exp_vol(
volume_id=None, share=share)
for file in old_files:
path = '/vol/%s/%s' % (exp_volume, file)
u_bytes = self.zapi_client.get_file_usage(path, vserver)
file_list.append((file, u_bytes))
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
return file_list
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
ip_vserver = self._get_vserver_for_ip(ip)
if ip_vserver and shares:
for share in shares:
ip_sh = share.split(':')[0]
sh_vserver = self._get_vserver_for_ip(ip_sh)
if sh_vserver == ip_vserver:
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _get_vserver_for_ip(self, ip):
"""Get vserver for the mentioned ip."""
try:
ifs = self.zapi_client.get_if_info_by_ip(ip)
vserver = ifs[0].get_child_content('vserver')
return vserver
except Exception:
return None
def _get_vol_for_share(self, nfs_share):
"""Gets the ssc vol with given share."""
if self.ssc_vols:
for vol in self.ssc_vols['all']:
if vol.export['path'] == nfs_share:
return vol
return None
def _is_share_vol_compatible(self, volume, share):
"""Checks if share is compatible with volume to host it."""
compatible = self._is_share_eligible(share, volume['size'])
if compatible and self.ssc_enabled:
matched = self._is_share_vol_type_match(volume, share)
compatible = compatible and matched
return compatible
def _is_share_vol_type_match(self, volume, share):
"""Checks if share matches volume type."""
netapp_vol = self._get_vol_for_share(share)
LOG.debug("Found volume %(vol)s for share %(share)s."
% {'vol': netapp_vol, 'share': share})
extra_specs = na_utils.get_volume_extra_specs(volume)
vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
return netapp_vol in vols
def delete_volume(self, volume):
"""Deletes a logical volume."""
share = volume['provider_location']
super(NetAppCmodeNfsDriver, self).delete_volume(volume)
self._post_prov_deprov_in_ssc(share)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
share = self._get_provider_location(snapshot.volume_id)
super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot)
self._post_prov_deprov_in_ssc(share)
def _post_prov_deprov_in_ssc(self, share):
if self.ssc_enabled and share:
netapp_vol = self._get_vol_for_share(share)
if netapp_vol:
self._update_stale_vols(volume=netapp_vol)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
copy_success = False
try:
major, minor = self.zapi_client.get_ontapi_version()
col_path = self.configuration.netapp_copyoffload_tool_path
if major == 1 and minor >= 20 and col_path:
self._try_copyoffload(context, volume, image_service, image_id)
copy_success = True
LOG.info(_LI('Copied image %(img)s to volume %(vol)s using '
'copy offload workflow.')
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug("Copy offload either not configured or"
" unsupported.")
except Exception as e:
LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e)
finally:
if not copy_success:
super(NetAppCmodeNfsDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
if self.ssc_enabled:
sh = self._get_provider_location(volume['id'])
self._update_stale_vols(self._get_vol_for_share(sh))
def _try_copyoffload(self, context, volume, image_service, image_id):
"""Tries server side file copy offload."""
copied = False
cache_result = self._find_image_in_cache(image_id)
if cache_result:
copied = self._copy_from_cache(volume, image_id, cache_result)
if not cache_result or not copied:
self._copy_from_img_service(context, volume, image_service,
image_id)
def _get_ip_verify_on_cluster(self, host):
"""Verifies if host on same cluster and returns ip."""
ip = na_utils.resolve_hostname(host)
vserver = self._get_vserver_for_ip(ip)
if not vserver:
raise exception.NotFound(_("Unable to locate an SVM that is "
"managing the IP address '%s'") % ip)
return ip
def _copy_from_cache(self, volume, image_id, cache_result):
"""Try copying image file_name from cached file_name."""
LOG.debug("Trying copy from cache using copy offload.")
copied = False
for res in cache_result:
try:
(share, file_name) = res
LOG.debug("Found cache file_name on share %s.", share)
if share != self._get_provider_location(volume['id']):
col_path = self.configuration.netapp_copyoffload_tool_path
src_ip = self._get_ip_verify_on_cluster(
share.split(':')[0])
src_path = os.path.join(share.split(':')[1], file_name)
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
volume['id']))
dst_path = os.path.join(
self._get_export_path(volume['id']), volume['name'])
self._execute(col_path, src_ip, dst_ip,
src_path, dst_path,
run_as_root=self._execute_as_root,
check_exit_code=0)
self._register_image_in_cache(volume, image_id)
LOG.debug("Copied image from cache to volume %s using"
" copy offload.", volume['id'])
else:
self._clone_file_dst_exists(share, file_name,
volume['name'],
dest_exists=True)
LOG.debug("Copied image from cache to volume %s using"
" cloning.", volume['id'])
self._post_clone_image(volume)
copied = True
break
except Exception as e:
LOG.exception(_LE('Error in workflow copy from cache. %s.'), e)
return copied
def _clone_file_dst_exists(self, share, src_name, dst_name,
dest_exists=False):
"""Clone file even if dest exists."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share)
self.zapi_client.clone_file(exp_volume, src_name, dst_name, vserver,
dest_exists=dest_exists)
def _copy_from_img_service(self, context, volume, image_service,
image_id):
"""Copies from the image service using copy offload."""
LOG.debug("Trying copy from image service using copy offload.")
image_loc = image_service.get_location(context, image_id)
image_loc = self._construct_image_nfs_url(image_loc)
conn, dr = self._check_get_nfs_path_segs(image_loc)
if conn:
src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
else:
raise exception.NotFound(_("Source host details not found."))
(__, ___, img_file) = image_loc.rpartition('/')
src_path = os.path.join(dr, img_file)
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
volume['id']))
# tmp file is required to deal with img formats
tmp_img_file = six.text_type(uuid.uuid4())
col_path = self.configuration.netapp_copyoffload_tool_path
img_info = image_service.show(context, image_id)
dst_share = self._get_provider_location(volume['id'])
self._check_share_can_hold_size(dst_share, img_info['size'])
run_as_root = self._execute_as_root
dst_dir = self._get_mount_point_for_share(dst_share)
dst_img_local = os.path.join(dst_dir, tmp_img_file)
try:
# If src and dst share not equal
if (('%s:%s' % (src_ip, dr)) !=
('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))):
dst_img_serv_path = os.path.join(
self._get_export_path(volume['id']), tmp_img_file)
self._execute(col_path, src_ip, dst_ip, src_path,
dst_img_serv_path, run_as_root=run_as_root,
check_exit_code=0)
else:
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
self._discover_file_till_timeout(dst_img_local, timeout=120)
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.'
% {'img': image_id, 'tmp': tmp_img_file})
dst_img_cache_local = os.path.join(dst_dir,
'img-cache-%s' % image_id)
if img_info['disk_format'] == 'raw':
LOG.debug('Image is raw %s.', image_id)
self._clone_file_dst_exists(dst_share, tmp_img_file,
volume['name'], dest_exists=True)
self._move_nfs_file(dst_img_local, dst_img_cache_local)
LOG.debug('Copied raw image %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug('Image will be converted to raw %s.', image_id)
img_conv = six.text_type(uuid.uuid4())
dst_img_conv_local = os.path.join(dst_dir, img_conv)
# Checking against image size which is approximate check
self._check_share_can_hold_size(dst_share, img_info['size'])
try:
image_utils.convert_image(dst_img_local,
dst_img_conv_local, 'raw',
run_as_root=run_as_root)
data = image_utils.qemu_img_info(dst_img_conv_local,
run_as_root=run_as_root)
if data.file_format != "raw":
raise exception.InvalidResults(
_("Converted to raw, but format is now %s.")
% data.file_format)
else:
self._clone_file_dst_exists(dst_share, img_conv,
volume['name'],
dest_exists=True)
self._move_nfs_file(dst_img_conv_local,
dst_img_cache_local)
LOG.debug('Copied locally converted raw image'
' %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
finally:
if os.path.exists(dst_img_conv_local):
self._delete_file(dst_img_conv_local)
self._post_clone_image(volume)
finally:
if os.path.exists(dst_img_local):
self._delete_file(dst_img_local)
|
Akrog/cinder
|
cinder/volume/drivers/netapp/dataontap/nfs_cmode.py
|
Python
|
apache-2.0
| 24,731
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kafka.tools import log
from kafka.tools.assigner.actions import ActionModule
from kafka.tools.exceptions import ConfigurationException
class ActionClone(ActionModule):
name = "clone"
helpstr = "Copy partitions from some brokers to a new broker (increasing RF)"
def __init__(self, args, cluster):
super(ActionClone, self).__init__(args, cluster)
self.check_brokers()
if args.to_broker not in self.cluster.brokers:
raise ConfigurationException("Target broker is not in the brokers list for this cluster")
self.sources = args.brokers
self.to_broker = self.cluster.brokers[args.to_broker]
@classmethod
def _add_args(cls, parser):
parser.add_argument('-b', '--brokers', help="List of source broker IDs", required=True, type=int, nargs='*')
parser.add_argument('-t', '--to_broker', help="Broker ID to copy partitions to", required=True, type=int)
def process_cluster(self):
source_set = set(self.sources)
for partition in self.cluster.partitions(self.args.exclude_topics):
if len(source_set & set([replica.id for replica in partition.replicas])) > 0:
if self.to_broker in partition.replicas:
log.warn("Target broker (ID {0}) is already in the replica list for {1}:{2}".format(self.to_broker.id, partition.topic.name, partition.num))
# If the broker is already in the replica list, it ALWAYS becomes the leader
if self.to_broker != partition.replicas[0]:
partition.swap_replica_positions(self.to_broker, partition.replicas[0])
else:
# If one of the source brokers is currently the leader, the target broker is the leader. Otherwise, the target leader is in second place
if partition.replicas[0].id in self.sources:
partition.add_replica(self.to_broker, 0)
else:
partition.add_replica(self.to_broker, 1)
|
toddpalino/kafka-tools
|
kafka/tools/assigner/actions/clone.py
|
Python
|
apache-2.0
| 2,839
|
import xbmcgui
import urllib
def download(url, dest, dp = None):
if not dp:
dp = xbmcgui.DialogProgress()
dp.create("XBMCHUB...","Downloading & Copying File",' ', ' ')
dp.update(0)
urllib.urlretrieve(url,dest,lambda nb, bs, fs, url=url: _pbhook(nb,bs,fs,url,dp))
def _pbhook(numblocks, blocksize, filesize, url, dp):
try:
percent = min((numblocks*blocksize*100)/filesize, 100)
dp.update(percent)
except:
percent = 100
dp.update(percent)
if dp.iscanceled():
raise Exception("Canceled")
dp.close()
|
aplicatii-romanesti/allinclusive-kodi-pi
|
.kodi/addons/plugin.video.familyfunflix/downloader.py
|
Python
|
apache-2.0
| 588
|
import os
import sys
import logging
import datetime
from future.utils import iteritems
from pandaharvester.harvesterconfig import harvester_config
try:
os.remove(harvester_config.db.database_filename)
except Exception:
pass
from pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy
from pandaharvester.harvestercore.job_spec import JobSpec
from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper
from pandaharvester.harvestercore.communicator_pool import CommunicatorPool
for loggerName, loggerObj in iteritems(logging.Logger.manager.loggerDict):
if loggerName.startswith('panda.log'):
if len(loggerObj.handlers) == 0:
continue
if loggerName.split('.')[-1] in ['db_proxy']:
continue
stdoutHandler = logging.StreamHandler(sys.stdout)
stdoutHandler.setFormatter(loggerObj.handlers[0].formatter)
loggerObj.addHandler(stdoutHandler)
queueConfigMapper = QueueConfigMapper()
proxy = DBProxy()
proxy.make_tables(queueConfigMapper)
job = JobSpec()
job.PandaID = 1
job.modificationTime = datetime.datetime.now()
proxy.insert_jobs([job])
newJob = proxy.get_job(1)
a = CommunicatorPool()
a.get_jobs('siteName', 'nodeName', 'prodSourceLabel', 'computingElement', 1, {})
|
PanDAWMS/panda-harvester
|
pandaharvester/harvestertest/basicTest.py
|
Python
|
apache-2.0
| 1,286
|