code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import numpy as np
import tensorflow as tf
from recognition.utils.rect import Rect
def intersection(box1, box2):
x1_max = tf.maximum(box1[:, 0], box2[:, 0])
y1_max = tf.maximum(box1[:, 1], box2[:, 1])
x2_min = tf.minimum(box1[:, 2], box2[:, 2])
y2_min = tf.minimum(box1[:, 3], box2[:, 3])
x_diff = tf.maximum(x2_min - x1_max, 0)
y_diff = tf.maximum(y2_min - y1_max, 0)
return x_diff * y_diff
def area(box):
x_diff = tf.maximum(box[:, 2] - box[:, 0], 0)
y_diff = tf.maximum(box[:, 3] - box[:, 1], 0)
return x_diff * y_diff
def union(box1, box2):
return area(box1) + area(box2) - intersection(box1, box2)
def iou(box1, box2):
return intersection(box1, box2) / union(box1, box2)
def to_idx(vec, w_shape):
'''
vec = (idn, idh, idw)
w_shape = [n, h, w, c]
'''
return vec[:, 2] + w_shape[2] * (vec[:, 1] + w_shape[1] * vec[:, 0])
def interp(w, i, channel_dim):
'''
Input:
w: A 4D block tensor of shape (n, h, w, c)
i: A list of 3-tuples [(x_1, y_1, z_1), (x_2, y_2, z_2), ...],
each having type (int, float, float)
The 4D block represents a batch of 3D image feature volumes with c channels.
The input i is a list of points to index into w via interpolation. Direct
indexing is not possible due to y_1 and z_1 being float values.
Output:
A list of the values: [
w[x_1, y_1, z_1, :]
w[x_2, y_2, z_2, :]
...
w[x_k, y_k, z_k, :]
]
of the same length == len(i)
'''
w_as_vector = tf.reshape(w, [-1, channel_dim]) # gather expects w to be 1-d
upper_l = tf.to_int32(tf.concat(1, [i[:, 0:1], tf.floor(i[:, 1:2]), tf.floor(i[:, 2:3])]))
upper_r = tf.to_int32(tf.concat(1, [i[:, 0:1], tf.floor(i[:, 1:2]), tf.ceil(i[:, 2:3])]))
lower_l = tf.to_int32(tf.concat(1, [i[:, 0:1], tf.ceil(i[:, 1:2]), tf.floor(i[:, 2:3])]))
lower_r = tf.to_int32(tf.concat(1, [i[:, 0:1], tf.ceil(i[:, 1:2]), tf.ceil(i[:, 2:3])]))
upper_l_idx = to_idx(upper_l, tf.shape(w))
upper_r_idx = to_idx(upper_r, tf.shape(w))
lower_l_idx = to_idx(lower_l, tf.shape(w))
lower_r_idx = to_idx(lower_r, tf.shape(w))
upper_l_value = tf.gather(w_as_vector, upper_l_idx)
upper_r_value = tf.gather(w_as_vector, upper_r_idx)
lower_l_value = tf.gather(w_as_vector, lower_l_idx)
lower_r_value = tf.gather(w_as_vector, lower_r_idx)
alpha_lr = tf.expand_dims(i[:, 2] - tf.floor(i[:, 2]), 1)
alpha_ud = tf.expand_dims(i[:, 1] - tf.floor(i[:, 1]), 1)
upper_value = (1 - alpha_lr) * upper_l_value + (alpha_lr) * upper_r_value
lower_value = (1 - alpha_lr) * lower_l_value + (alpha_lr) * lower_r_value
value = (1 - alpha_ud) * upper_value + (alpha_ud) * lower_value
return value
def bilinear_select(H, pred_boxes, early_feat, early_feat_channels, w_offset, h_offset):
'''
Function used for rezooming high level feature maps. Uses bilinear interpolation
to select all channels at index (x, y) for a high level feature map, where x and y are floats.
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
fine_stride = 8. # pixels per 60x80 grid cell in 480x640 image
coarse_stride = H['region_size'] # pixels per 15x20 grid cell in 480x640 image
batch_ids = []
x_offsets = []
y_offsets = []
for n in range(H['batch_size']):
for i in range(H['grid_height']):
for j in range(H['grid_width']):
for k in range(H['rnn_len']):
batch_ids.append([n])
x_offsets.append([coarse_stride / 2. + coarse_stride * j])
y_offsets.append([coarse_stride / 2. + coarse_stride * i])
batch_ids = tf.constant(batch_ids)
x_offsets = tf.constant(x_offsets)
y_offsets = tf.constant(y_offsets)
pred_boxes_r = tf.reshape(pred_boxes, [outer_size * H['rnn_len'], 4])
scale_factor = coarse_stride / fine_stride # scale difference between 15x20 and 60x80 features
pred_x_center = (pred_boxes_r[:, 0:1] + w_offset * pred_boxes_r[:, 2:3] + x_offsets) / fine_stride
pred_x_center_clip = tf.clip_by_value(pred_x_center,
0,
scale_factor * H['grid_width'] - 1)
pred_y_center = (pred_boxes_r[:, 1:2] + h_offset * pred_boxes_r[:, 3:4] + y_offsets) / fine_stride
pred_y_center_clip = tf.clip_by_value(pred_y_center,
0,
scale_factor * H['grid_height'] - 1)
interp_indices = tf.concat(1, [tf.to_float(batch_ids), pred_y_center_clip, pred_x_center_clip])
return interp_indices
def filter_rectangles(H, confidences, boxes, use_stitching=False, rnn_len=1, tau=0.25, min_conf=0.1):
boxes_r = np.reshape(boxes, (-1,
H["grid_height"],
H["grid_width"],
rnn_len,
4))
confidences_r = np.reshape(confidences, (-1,
H["grid_height"],
H["grid_width"],
rnn_len,
H['num_classes']))
cell_pix_size = H['region_size']
all_rects = [[[] for _ in range(H["grid_width"])] for _ in range(H["grid_height"])]
for n in range(rnn_len):
for y in range(H["grid_height"]):
for x in range(H["grid_width"]):
bbox = boxes_r[0, y, x, n, :]
abs_cx = int(bbox[0]) + cell_pix_size / 2 + cell_pix_size * x
abs_cy = int(bbox[1]) + cell_pix_size / 2 + cell_pix_size * y
w = bbox[2]
h = bbox[3]
conf = np.max(confidences_r[0, y, x, n, 1:])
all_rects[y][x].append(Rect(abs_cx, abs_cy, w, h, conf))
all_rects_r = [r for row in all_rects for cell in row for r in cell]
if use_stitching:
from recognition.utils.stitch_wrapper import stitch_rects
acc_rects = stitch_rects(all_rects, tau)
else:
acc_rects = all_rects_r
filtered = []
for rect in acc_rects:
if rect.confidence > min_conf:
filtered.append(rect)
return filtered
|
Queuer/queue-vision
|
recognition/utils/train_utils.py
|
Python
|
apache-2.0
| 6,412
|
"""
This module (mostly) uses the XenAPI to manage Xen virtual machines.
Big fat warning: the XenAPI used in this file is the one bundled with
Xen Source, NOT XenServer nor Xen Cloud Platform. As a matter of fact it
*will* fail under those platforms. From what I've read, little work is needed
to adapt this code to XS/XCP, mostly playing with XenAPI version, but as
XCP is not taking precedence on Xen Source on many platforms, please keep
compatibility in mind.
Useful documentation:
. http://downloads.xen.org/Wiki/XenAPI/xenapi-1.0.6.pdf
. http://docs.vmd.citrix.com/XenServer/6.0.0/1.0/en_gb/api/
. https://github.com/xapi-project/xen-api/tree/master/scripts/examples/python
. http://xenbits.xen.org/gitweb/?p=xen.git;a=tree;f=tools/python/xen/xm;hb=HEAD
"""
import contextlib
import os
import sys
import salt.modules.cmdmod
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
try:
import importlib # pylint: disable=minimum-python-version
HAS_IMPORTLIB = True
except ImportError:
# Python < 2.7 does not have importlib
HAS_IMPORTLIB = False
# Define the module's virtual name
__virtualname__ = "virt"
# This module has only been tested on Debian GNU/Linux and NetBSD, it
# probably needs more path appending for other distributions.
# The path to append is the path to python Xen libraries, where resides
# XenAPI.
def _check_xenapi():
if __grains__["os"] == "Debian":
debian_xen_version = "/usr/lib/xen-common/bin/xen-version"
if os.path.isfile(debian_xen_version):
# __salt__ is not available in __virtual__
xenversion = salt.modules.cmdmod._run_quiet(debian_xen_version)
xapipath = "/usr/lib/xen-{}/lib/python".format(xenversion)
if os.path.isdir(xapipath):
sys.path.append(xapipath)
try:
if HAS_IMPORTLIB:
return importlib.import_module("xen.xm.XenAPI")
return __import__("xen.xm.XenAPI").xm.XenAPI
except (ImportError, AttributeError):
return False
def __virtual__():
if _check_xenapi() is not False:
return __virtualname__
return (False, "Module xapi: xenapi check failed")
@contextlib.contextmanager
def _get_xapi_session():
"""
Get a session to XenAPI. By default, use the local UNIX socket.
"""
_xenapi = _check_xenapi()
xapi_uri = __salt__["config.option"]("xapi.uri")
xapi_login = __salt__["config.option"]("xapi.login")
xapi_password = __salt__["config.option"]("xapi.password")
if not xapi_uri:
# xend local UNIX socket
xapi_uri = "httpu:///var/run/xend/xen-api.sock"
if not xapi_login:
xapi_login = ""
if not xapi_password:
xapi_password = ""
try:
session = _xenapi.Session(xapi_uri)
session.xenapi.login_with_password(xapi_login, xapi_password)
yield session.xenapi
except Exception: # pylint: disable=broad-except
raise CommandExecutionError("Failed to connect to XenAPI socket.")
finally:
session.xenapi.session.logout()
# Used rectypes (Record types):
#
# host
# host_cpu
# VM
# VIF
# VBD
def _get_xtool():
"""
Internal, returns xl or xm command line path
"""
for xtool in ["xl", "xm"]:
path = salt.utils.path.which(xtool)
if path is not None:
return path
def _get_all(xapi, rectype):
"""
Internal, returns all members of rectype
"""
return getattr(xapi, rectype).get_all()
def _get_label_uuid(xapi, rectype, label):
"""
Internal, returns label's uuid
"""
try:
return getattr(xapi, rectype).get_by_name_label(label)[0]
except Exception: # pylint: disable=broad-except
return False
def _get_record(xapi, rectype, uuid):
"""
Internal, returns a full record for uuid
"""
return getattr(xapi, rectype).get_record(uuid)
def _get_record_by_label(xapi, rectype, label):
"""
Internal, returns a full record for uuid
"""
uuid = _get_label_uuid(xapi, rectype, label)
if uuid is False:
return False
return getattr(xapi, rectype).get_record(uuid)
def _get_metrics_record(xapi, rectype, record):
"""
Internal, returns metrics record for a rectype
"""
metrics_id = record["metrics"]
return getattr(xapi, "{}_metrics".format(rectype)).get_record(metrics_id)
def _get_val(record, keys):
"""
Internal, get value from record
"""
data = record
for key in keys:
if key in data:
data = data[key]
else:
return None
return data
def list_domains():
"""
Return a list of virtual machine names on the minion
CLI Example:
.. code-block:: bash
salt '*' virt.list_domains
"""
with _get_xapi_session() as xapi:
hosts = xapi.VM.get_all()
ret = []
for _host in hosts:
if xapi.VM.get_record(_host)["is_control_domain"] is False:
ret.append(xapi.VM.get_name_label(_host))
return ret
def vm_info(vm_=None):
"""
Return detailed information about the vms.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_info
"""
with _get_xapi_session() as xapi:
def _info(vm_):
vm_rec = _get_record_by_label(xapi, "VM", vm_)
if vm_rec is False:
return False
vm_metrics_rec = _get_metrics_record(xapi, "VM", vm_rec)
return {
"cpu": vm_metrics_rec["VCPUs_number"],
"maxCPU": _get_val(vm_rec, ["VCPUs_max"]),
"cputime": vm_metrics_rec["VCPUs_utilisation"],
"disks": get_disks(vm_),
"nics": get_nics(vm_),
"maxMem": int(_get_val(vm_rec, ["memory_dynamic_max"])),
"mem": int(vm_metrics_rec["memory_actual"]),
"state": _get_val(vm_rec, ["power_state"]),
}
info = {}
if vm_:
ret = _info(vm_)
if ret is not None:
info[vm_] = ret
else:
for vm_ in list_domains():
ret = _info(vm_)
if ret is not None:
info[vm_] = _info(vm_)
return info
def vm_state(vm_=None):
"""
Return list of all the vms and their state.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_state <vm name>
"""
with _get_xapi_session() as xapi:
info = {}
if vm_:
info[vm_] = _get_record_by_label(xapi, "VM", vm_)["power_state"]
return info
for vm_ in list_domains():
info[vm_] = _get_record_by_label(xapi, "VM", vm_)["power_state"]
return info
def node_info():
"""
Return a dict with information about this node
CLI Example:
.. code-block:: bash
salt '*' virt.node_info
"""
with _get_xapi_session() as xapi:
# get node uuid
host_rec = _get_record(xapi, "host", _get_all(xapi, "host")[0])
# get first CPU (likely to be a core) uuid
host_cpu_rec = _get_record(xapi, "host_cpu", host_rec["host_CPUs"][0])
# get related metrics
host_metrics_rec = _get_metrics_record(xapi, "host", host_rec)
# adapted / cleaned up from Xen's xm
def getCpuMhz():
cpu_speeds = [
int(host_cpu_rec["speed"])
for host_cpu_it in host_cpu_rec
if "speed" in host_cpu_it
]
if cpu_speeds:
return sum(cpu_speeds) / len(cpu_speeds)
else:
return 0
def getCpuFeatures():
if host_cpu_rec:
return host_cpu_rec["features"]
def getFreeCpuCount():
cnt = 0
for host_cpu_it in host_cpu_rec:
if len(host_cpu_rec["cpu_pool"]) == 0:
cnt += 1
return cnt
info = {
"cpucores": _get_val(host_rec, ["cpu_configuration", "nr_cpus"]),
"cpufeatures": getCpuFeatures(),
"cpumhz": getCpuMhz(),
"cpuarch": _get_val(host_rec, ["software_version", "machine"]),
"cputhreads": _get_val(host_rec, ["cpu_configuration", "threads_per_core"]),
"phymemory": int(host_metrics_rec["memory_total"]) / 1024 / 1024,
"cores_per_sockets": _get_val(
host_rec, ["cpu_configuration", "cores_per_socket"]
),
"free_cpus": getFreeCpuCount(),
"free_memory": int(host_metrics_rec["memory_free"]) / 1024 / 1024,
"xen_major": _get_val(host_rec, ["software_version", "xen_major"]),
"xen_minor": _get_val(host_rec, ["software_version", "xen_minor"]),
"xen_extra": _get_val(host_rec, ["software_version", "xen_extra"]),
"xen_caps": " ".join(_get_val(host_rec, ["capabilities"])),
"xen_scheduler": _get_val(host_rec, ["sched_policy"]),
"xen_pagesize": _get_val(host_rec, ["other_config", "xen_pagesize"]),
"platform_params": _get_val(host_rec, ["other_config", "platform_params"]),
"xen_commandline": _get_val(host_rec, ["other_config", "xen_commandline"]),
"xen_changeset": _get_val(host_rec, ["software_version", "xen_changeset"]),
"cc_compiler": _get_val(host_rec, ["software_version", "cc_compiler"]),
"cc_compile_by": _get_val(host_rec, ["software_version", "cc_compile_by"]),
"cc_compile_domain": _get_val(
host_rec, ["software_version", "cc_compile_domain"]
),
"cc_compile_date": _get_val(
host_rec, ["software_version", "cc_compile_date"]
),
"xend_config_format": _get_val(
host_rec, ["software_version", "xend_config_format"]
),
}
return info
def get_nics(vm_):
"""
Return info about the network interfaces of a named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_nics <vm name>
"""
with _get_xapi_session() as xapi:
nic = {}
vm_rec = _get_record_by_label(xapi, "VM", vm_)
if vm_rec is False:
return False
for vif in vm_rec["VIFs"]:
vif_rec = _get_record(xapi, "VIF", vif)
nic[vif_rec["MAC"]] = {
"mac": vif_rec["MAC"],
"device": vif_rec["device"],
"mtu": vif_rec["MTU"],
}
return nic
def get_macs(vm_):
"""
Return a list off MAC addresses from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_macs <vm name>
"""
macs = []
nics = get_nics(vm_)
if nics is None:
return None
for nic in nics:
macs.append(nic)
return macs
def get_disks(vm_):
"""
Return the disks of a named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_disks <vm name>
"""
with _get_xapi_session() as xapi:
disk = {}
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
for vbd in xapi.VM.get_VBDs(vm_uuid):
dev = xapi.VBD.get_device(vbd)
if not dev:
continue
prop = xapi.VBD.get_runtime_properties(vbd)
disk[dev] = {
"backend": prop["backend"],
"type": prop["device-type"],
"protocol": prop["protocol"],
}
return disk
def setmem(vm_, memory):
"""
Changes the amount of memory allocated to VM.
Memory is to be specified in MB
CLI Example:
.. code-block:: bash
salt '*' virt.setmem myvm 768
"""
with _get_xapi_session() as xapi:
mem_target = int(memory) * 1024 * 1024
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.set_memory_dynamic_max_live(vm_uuid, mem_target)
xapi.VM.set_memory_dynamic_min_live(vm_uuid, mem_target)
return True
except Exception: # pylint: disable=broad-except
return False
def setvcpus(vm_, vcpus):
"""
Changes the amount of vcpus allocated to VM.
vcpus is an int representing the number to be assigned
CLI Example:
.. code-block:: bash
salt '*' virt.setvcpus myvm 2
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.set_VCPUs_number_live(vm_uuid, vcpus)
return True
except Exception: # pylint: disable=broad-except
return False
def vcpu_pin(vm_, vcpu, cpus):
"""
Set which CPUs a VCPU can use.
CLI Example:
.. code-block:: bash
salt 'foo' virt.vcpu_pin domU-id 2 1
salt 'foo' virt.vcpu_pin domU-id 2 2-6
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
# from xm's main
def cpu_make_map(cpulist):
cpus = []
for c in cpulist.split(","):
if c == "":
continue
if "-" in c:
(x, y) = c.split("-")
for i in range(int(x), int(y) + 1):
cpus.append(int(i))
else:
# remove this element from the list
if c[0] == "^":
cpus = [x for x in cpus if x != int(c[1:])]
else:
cpus.append(int(c))
cpus.sort()
return ",".join(map(str, cpus))
if cpus == "all":
cpumap = cpu_make_map("0-63")
else:
cpumap = cpu_make_map("{}".format(cpus))
try:
xapi.VM.add_to_VCPUs_params_live(vm_uuid, "cpumap{}".format(vcpu), cpumap)
return True
# VM.add_to_VCPUs_params_live() implementation in xend 4.1+ has
# a bug which makes the client call fail.
# That code is accurate for all others XenAPI implementations, but
# for that particular one, fallback to xm / xl instead.
except Exception: # pylint: disable=broad-except
return __salt__["cmd.run"](
"{} vcpu-pin {} {} {}".format(_get_xtool(), vm_, vcpu, cpus),
python_shell=False,
)
def freemem():
"""
Return an int representing the amount of memory that has not been given
to virtual machines on this node
CLI Example:
.. code-block:: bash
salt '*' virt.freemem
"""
return node_info()["free_memory"]
def freecpu():
"""
Return an int representing the number of unallocated cpus on this
hypervisor
CLI Example:
.. code-block:: bash
salt '*' virt.freecpu
"""
return node_info()["free_cpus"]
def full_info():
"""
Return the node_info, vm_info and freemem
CLI Example:
.. code-block:: bash
salt '*' virt.full_info
"""
return {"node_info": node_info(), "vm_info": vm_info()}
def shutdown(vm_):
"""
Send a soft shutdown signal to the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.shutdown <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.clean_shutdown(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def pause(vm_):
"""
Pause the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.pause <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.pause(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def resume(vm_):
"""
Resume the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.resume <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.unpause(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def start(config_):
"""
Start a defined domain
CLI Example:
.. code-block:: bash
salt '*' virt.start <path to Xen cfg file>
"""
# FIXME / TODO
# This function does NOT use the XenAPI. Instead, it use good old xm / xl.
# On Xen Source, creating a virtual machine using XenAPI is really painful.
# XCP / XS make it really easy using xapi.Async.VM.start instead. Anyone?
return __salt__["cmd.run"](
"{} create {}".format(_get_xtool(), config_), python_shell=False
)
def reboot(vm_):
"""
Reboot a domain via ACPI request
CLI Example:
.. code-block:: bash
salt '*' virt.reboot <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.clean_reboot(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def reset(vm_):
"""
Reset a VM by emulating the reset button on a physical machine
CLI Example:
.. code-block:: bash
salt '*' virt.reset <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.hard_reboot(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def migrate(vm_, target, live=1, port=0, node=-1, ssl=None, change_home_server=0):
"""
Migrates the virtual machine to another hypervisor
CLI Example:
.. code-block:: bash
salt '*' virt.migrate <vm name> <target hypervisor> [live] [port] [node] [ssl] [change_home_server]
Optional values:
live
Use live migration
port
Use a specified port
node
Use specified NUMA node on target
ssl
use ssl connection for migration
change_home_server
change home server for managed domains
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
other_config = {
"port": port,
"node": node,
"ssl": ssl,
"change_home_server": change_home_server,
}
try:
xapi.VM.migrate(vm_uuid, target, bool(live), other_config)
return True
except Exception: # pylint: disable=broad-except
return False
def stop(vm_):
"""
Hard power down the virtual machine, this is equivalent to pulling the
power
CLI Example:
.. code-block:: bash
salt '*' virt.stop <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.hard_shutdown(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def is_hyper():
"""
Returns a bool whether or not this node is a hypervisor of any kind
CLI Example:
.. code-block:: bash
salt '*' virt.is_hyper
"""
try:
if __grains__["virtual_subtype"] != "Xen Dom0":
return False
except KeyError:
# virtual_subtype isn't set everywhere.
return False
try:
with salt.utils.files.fopen("/proc/modules") as fp_:
if "xen_" not in salt.utils.stringutils.to_unicode(fp_.read()):
return False
except OSError:
return False
# there must be a smarter way...
return "xenstore" in __salt__["cmd.run"](__grains__["ps"])
def vm_cputime(vm_=None):
"""
Return cputime used by the vms on this hyper in a
list of dicts:
.. code-block:: python
[
'your-vm': {
'cputime' <int>
'cputime_percent' <int>
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_cputime
"""
with _get_xapi_session() as xapi:
def _info(vm_):
host_rec = _get_record_by_label(xapi, "VM", vm_)
host_cpus = len(host_rec["host_CPUs"])
if host_rec is False:
return False
host_metrics = _get_metrics_record(xapi, "VM", host_rec)
vcpus = int(host_metrics["VCPUs_number"])
cputime = int(host_metrics["VCPUs_utilisation"]["0"])
cputime_percent = 0
if cputime:
# Divide by vcpus to always return a number between 0 and 100
cputime_percent = (1.0e-7 * cputime / host_cpus) / vcpus
return {
"cputime": int(cputime),
"cputime_percent": int("{:.0f}".format(cputime_percent)),
}
info = {}
if vm_:
info[vm_] = _info(vm_)
return info
for vm_ in list_domains():
info[vm_] = _info(vm_)
return info
def vm_netstats(vm_=None):
"""
Return combined network counters used by the vms on this hyper in a
list of dicts:
.. code-block:: python
[
'your-vm': {
'io_read_kbs' : 0,
'io_total_read_kbs' : 0,
'io_total_write_kbs' : 0,
'io_write_kbs' : 0
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_netstats
"""
with _get_xapi_session() as xapi:
def _info(vm_):
ret = {}
vm_rec = _get_record_by_label(xapi, "VM", vm_)
if vm_rec is False:
return False
for vif in vm_rec["VIFs"]:
vif_rec = _get_record(xapi, "VIF", vif)
ret[vif_rec["device"]] = _get_metrics_record(xapi, "VIF", vif_rec)
del ret[vif_rec["device"]]["last_updated"]
return ret
info = {}
if vm_:
info[vm_] = _info(vm_)
else:
for vm_ in list_domains():
info[vm_] = _info(vm_)
return info
def vm_diskstats(vm_=None):
"""
Return disk usage counters used by the vms on this hyper in a
list of dicts:
.. code-block:: python
[
'your-vm': {
'io_read_kbs' : 0,
'io_write_kbs' : 0
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_diskstats
"""
with _get_xapi_session() as xapi:
def _info(vm_):
ret = {}
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
for vbd in xapi.VM.get_VBDs(vm_uuid):
vbd_rec = _get_record(xapi, "VBD", vbd)
ret[vbd_rec["device"]] = _get_metrics_record(xapi, "VBD", vbd_rec)
del ret[vbd_rec["device"]]["last_updated"]
return ret
info = {}
if vm_:
info[vm_] = _info(vm_)
else:
for vm_ in list_domains():
info[vm_] = _info(vm_)
return info
|
saltstack/salt
|
salt/modules/xapi_virt.py
|
Python
|
apache-2.0
| 24,657
|
# coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for a collection and its constituents.
Domain objects capture domain-specific logic and are agnostic of how the
objects they represent are stored. All methods and properties in this file
should therefore be independent of the specific storage models used.
"""
from core.domain import user_services
import utils
class Classroom(object):
def __init__(self, id, name, building, location, day_of_week, sections, seat_count,
author_id,last_updated):
self.id = id
self.name = name
self.building = building
self.location = location
self.day_of_week = day_of_week
self.sections = sections
self.seat_count = seat_count
self.author_id = author_id
self.last_updated = last_updated
def get_author_name(self):
return user_services.get_username(self.author_id)
def to_dict(self):
return {
'author_name': self.get_author_name(),
'id': self.id,
'name': self.name,
'building': self.building,
'location': self.location,
'day_of_week': self.day_of_week,
'seat_count': self.seat_count,
'sections': self.sections,
'author_id': self.author_id,
'last_updated': utils.get_time_in_millisecs(self.last_updated),
}
|
zgchizi/oppia-uc
|
core/domain/classroom_domain.py
|
Python
|
apache-2.0
| 1,980
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import distutils as _distutils
import inspect as _inspect
import os as _os
import site as _site
import six as _six
import sys as _sys
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python.tools import module_util as _module_util
from tensorflow.python.platform import tf_logging as _logging
from tensorflow.python.util.lazy_loader import LazyLoader as _LazyLoader
# API IMPORTS PLACEHOLDER
# WRAPPER_PLACEHOLDER
if "dev" in __version__: # pylint: disable=undefined-variable
_logging.warning("""
TensorFlow's `tf-nightly` package will soon be updated to TensorFlow 2.0.
Please upgrade your code to TensorFlow 2.0:
* https://www.tensorflow.org/guide/migrate
Or install the latest stable TensorFlow 1.X release:
* `pip install -U "tensorflow==1.*"`
Otherwise your code may be broken by the change.
""")
# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = _sys.modules[__name__].bitwise # pylint: disable=undefined-variable
_current_module = _sys.modules[__name__]
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
# Hook external TensorFlow modules.
# Import compat before trying to import summary from tensorboard, so that
# reexport_tf_summary can get compat from sys.modules. Only needed if using
# lazy loading.
_current_module.compat.v2 # pylint: disable=pointless-statement
# Lazy-load estimator.
_estimator_module = "tensorflow_estimator.python.estimator.api._v1.estimator"
estimator = _LazyLoader("estimator", globals(), _estimator_module)
_module_dir = _module_util.get_parent_dir_for_name(_estimator_module)
if _module_dir:
_current_module.__path__ = [_module_dir] + _current_module.__path__
setattr(_current_module, "estimator", estimator)
try:
from .python.keras.api._v1 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass
# Explicitly import lazy-loaded modules to support autocompletion.
# pylint: disable=g-import-not-at-top
if not _six.PY2:
import typing as _typing
if _typing.TYPE_CHECKING:
from tensorflow_estimator.python.estimator.api._v1 import estimator
# pylint: enable=g-import-not-at-top
from tensorflow.python.util.lazy_loader import LazyLoader # pylint: disable=g-import-not-at-top
_CONTRIB_WARNING = """
The TensorFlow contrib module will not be included in TensorFlow 2.0.
For more information, please see:
* https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md
* https://github.com/tensorflow/addons
* https://github.com/tensorflow/io (for I/O related ops)
If you depend on functionality not listed there, please file an issue.
"""
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib',
_CONTRIB_WARNING)
del LazyLoader
# The templated code that replaces the placeholder above sometimes
# sets the __all__ variable. If it does, we have to be sure to add
# "contrib".
if '__all__' in vars():
vars()['__all__'].append('contrib')
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
# The 'app' module will be imported as part of the placeholder section above.
_current_module.app.flags = flags # pylint: disable=undefined-variable
setattr(_current_module, "flags", flags)
_major_api_version = 1
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
# directories.
# TODO(gunan): Find a better location for this code snippet.
from tensorflow.python.framework import load_library as _ll
from tensorflow.python.lib.io import file_io as _fi
# Get sitepackages directories for the python installation.
_site_packages_dirs = []
_site_packages_dirs += [] if _site.USER_SITE is None else [_site.USER_SITE]
_site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p]
if 'getsitepackages' in dir(_site):
_site_packages_dirs += _site.getsitepackages()
if 'sysconfig' in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]
_site_packages_dirs = list(set(_site_packages_dirs))
# Find the location of this exact file.
_current_file_location = _inspect.getfile(_inspect.currentframe())
def _running_from_pip_package():
return any(
_current_file_location.startswith(dir_) for dir_ in _site_packages_dirs)
if _running_from_pip_package():
# TODO(gunan): Add sanity checks to loaded modules here.
for _s in _site_packages_dirs:
# Load first party dynamic kernels.
_main_dir = _os.path.join(_s, 'tensorflow/core/kernels')
if _os.path.exists(_main_dir):
_ll.load_library(_main_dir)
# Load third party dynamic kernels.
_plugin_dir = _os.path.join(_s, 'tensorflow-plugins')
if _os.path.exists(_plugin_dir):
_ll.load_library(_plugin_dir)
# Load Pluggable Device Library
_ll.load_pluggable_device_library(_plugin_dir)
# Delete modules that should be hidden from dir().
# Don't fail if these modules are not available.
# For e.g. this file will be originally placed under tensorflow/_api/v1 which
# does not have 'python', 'core' directories. Then, it will be copied
# to tensorflow/ which does have these two directories.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
try:
del core
except NameError:
pass
try:
del compiler
except NameError:
pass
# __all__ PLACEHOLDER
|
cxxgtxy/tensorflow
|
tensorflow/api_template_v1.__init__.py
|
Python
|
apache-2.0
| 6,757
|
"""Apply the same action to the simulated and real A1 robot.
As a basic debug tool, this script allows you to execute the same action
(which you choose from the pybullet GUI) on the simulation and real robot
simultaneouly. Make sure to put the real robbot on rack before testing.
"""
from absl import app
from absl import logging
import numpy as np
import time
from tqdm import tqdm
import pybullet # pytype:disable=import-error
import pybullet_data
from pybullet_utils import bullet_client
from motion_imitation.robots import a1_robot
from motion_imitation.robots import robot_config
FREQ = 0.5
def main(_):
logging.info(
"WARNING: this code executes low-level controller on the robot.")
logging.info("Make sure the robot is hang on rack before proceeding.")
input("Press enter to continue...")
# Construct sim env and real robot
p = bullet_client.BulletClient(connection_mode=pybullet.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
robot = a1_robot.A1Robot(pybullet_client=p, action_repeat=1)
# Move the motors slowly to initial position
robot.ReceiveObservation()
current_motor_angle = np.array(robot.GetMotorAngles())
desired_motor_angle = np.array([0., 0.9, -1.8] * 4)
for t in tqdm(range(300)):
blend_ratio = np.minimum(t / 200., 1)
action = (1 - blend_ratio
) * current_motor_angle + blend_ratio * desired_motor_angle
robot.Step(action, robot_config.MotorControlMode.POSITION)
time.sleep(0.005)
# Move the legs in a sinusoidal curve
for t in tqdm(range(1000)):
angle_hip = 0.9 + 0.2 * np.sin(2 * np.pi * FREQ * 0.01 * t)
angle_calf = -2 * angle_hip
action = np.array([0., angle_hip, angle_calf] * 4)
robot.Step(action, robot_config.MotorControlMode.POSITION)
time.sleep(0.007)
# print(robot.GetFootContacts())
print(robot.GetBaseVelocity())
robot.Terminate()
if __name__ == '__main__':
app.run(main)
|
google-research/motion_imitation
|
motion_imitation/examples/a1_robot_exercise.py
|
Python
|
apache-2.0
| 1,931
|
# Copyright (c) 2010, 2011 Arek Korbik
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
_s_uchar = struct.Struct('B')
_s_double_uchar = struct.Struct('BB')
_s_ushort = struct.Struct('>H')
_s_ushort_l = struct.Struct('<H')
_s_ulong_b = struct.Struct('>L')
_s_ulong_l = struct.Struct('<L')
_s_double_ulong_b = struct.Struct('>LL')
_s_double = struct.Struct('>d')
_s_ext_csid = struct.Struct('<BH')
_s_time_size_type = struct.Struct('>HBHBB')
_s_time = struct.Struct('>HB')
_s_set_bw = struct.Struct('>LB')
_s_date_tz = struct.Struct('>dh')
|
arkadini/twimp
|
twimp/primitives.py
|
Python
|
apache-2.0
| 1,077
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# pylint: skip-file
from neon.backends import gen_backend
from neon.backends.tests.utils import assert_tensors_allclose
def test_gpu_randomstate():
# run 1
be = gen_backend(backend='gpu', rng_seed=100)
a = be.empty((3, 3))
a[:] = be.rand() # gpu rand
x0 = a.get()
x1 = be.rng.rand(3, 3) # host rand
a[:] = be.rand() # gpu rand
x2 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
x3 = a.get()
assert len(be.context_rand_state_map) == 1 and len(be.context_rand_state_alive) == 1
for ctx in be.context_rand_state_alive:
assert be.context_rand_state_alive[ctx] is True
# run 2, using reset
be.rng_reset()
for ctx in be.context_rand_state_alive:
assert be.context_rand_state_alive[ctx] is False
a[:] = be.rand()
y0 = a.get()
y1 = be.rng.rand(3, 3)
a[:] = be.rand()
y2 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
y3 = a.get()
assert len(be.context_rand_state_map) == 1 and len(be.context_rand_state_alive) == 1
for ctx in be.context_rand_state_alive:
assert be.context_rand_state_alive[ctx] is True
del(be)
# run 3, using a new backend
be = gen_backend(backend='gpu', rng_seed=100)
a = be.empty((3, 3))
a[:] = be.rand() # gpu rand
z0 = a.get()
z1 = be.rng.rand(3, 3) # host rand
a[:] = be.rand() # gpu rand
z2 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
z3 = a.get()
# check equality
assert_tensors_allclose([x0, x1, x2, x3], [y0, y1, y2, y3], rtol=0., atol=0.)
assert_tensors_allclose([x0, x1, x2, x3], [z0, z1, z2, z3], rtol=0., atol=0.)
del(be)
def test_cpu_randomstate():
# run 1
be = gen_backend(backend='cpu', rng_seed=100)
a = be.empty((3, 3))
be.make_binary_mask(a, keepthresh=be.rng.rand())
x0 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
x1 = a.get()
# run 2, using reset
be.rng_reset()
be.make_binary_mask(a, keepthresh=be.rng.rand())
y0 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
y1 = a.get()
del(be)
# run 3, using a new backend
be = gen_backend(backend='cpu', rng_seed=100)
a = be.empty((3, 3))
be.make_binary_mask(a, keepthresh=be.rng.rand())
z0 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
z1 = a.get()
# check equality
assert_tensors_allclose([x0, x1], [y0, y1], rtol=0., atol=0.)
assert_tensors_allclose([x0, x1], [z0, z1], rtol=0., atol=0.)
del(be)
|
Bam4d/neon
|
neon/backends/tests/test_randomstate.py
|
Python
|
apache-2.0
| 3,295
|
import ConfigParser
import os
import re
import ldap
import asfgit.cfg as cfg
import asfgit.log as log
import asfgit.util as util
GROUP_DN="cn=%(group)s,ou=groups,dc=apache,dc=org"
DN_RE=re.compile("uid=([^,]+),ou=people,dc=apache,dc=org")
SUBPROJECT_RE=re.compile("-.+$")
def authorized_committers(repo_name):
writers = set()
# Read the static file to get admin and bot names.
parser = ConfigParser.SafeConfigParser()
with open(cfg.auth_file) as handle:
parser.readfp(handle)
for admin in parser.get("groups", "gitadmins").split(","):
writers.add(util.decode(admin.strip()))
if parser.has_option("groups", repo_name):
dn = parser.get("groups", repo_name).strip()
else:
# drop subproject name if present
repo_name = SUBPROJECT_RE.sub("", repo_name)
dn = GROUP_DN % {"group": repo_name}
# Individually granted access
if parser.has_option("individuals", repo_name):
for person in parser.get("individuals", repo_name).split(","):
writers.add(util.decode(person.strip()))
# Add the committers listed in ldap for the project.
lh = ldap.initialize("ldaps://ldap-lb-us.apache.org")
attrs = ["memberUid", "member"]
try:
for dn, attrs in lh.search_s(dn, ldap.SCOPE_BASE, attrlist=attrs):
for availid in attrs.get("memberUid", []):
writers.add(availid)
for dn in attrs.get("member", []):
writers.add(DN_RE.match(dn).group(1))
except:
log.exception()
# Add per-repository exceptions
map(writers.add, cfg.extra_writers)
return writers
|
chtyim/infrastructure-puppet
|
modules/gitserver_dual/files/asfgit/auth.py
|
Python
|
apache-2.0
| 1,642
|
from armstrong.core.arm_sections import utils
from armstrong.core.arm_sections.models import Section
from ._utils import ArmSectionsTestCase, override_settings
from .support.models import SimpleCommon
def rel_field_names(rels):
return [rel.field.name for rel in rels]
class get_configured_item_modelTestCase(ArmSectionsTestCase):
def test_returns_configured_model(self):
m = "%s.FooBar" % self.__class__.__module__
with self.settings(ARMSTRONG_SECTION_ITEM_MODEL=m):
module, model = utils.get_module_and_model_names()
self.assertEqual(self.__class__.__module__, module)
self.assertEqual("FooBar", model)
def test_provides_default_value(self):
with self.settings(ARMSTRONG_SECTION_ITEM_MODEL=False):
module, model = utils.get_module_and_model_names()
self.assertEqual("armstrong.apps.content.models", module)
self.assertEqual("Content", model)
class get_item_model_classTestCase(ArmSectionsTestCase):
@override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.SimpleCommon')
def test_returns_specified_class(self):
self.assertEqual(SimpleCommon, utils.get_item_model_class())
class get_section_relationsTestCase(ArmSectionsTestCase):
@override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.SimpleCommon')
def test_returns_relation_for_foreign_key_only(self):
self.assertEqual(
['primary_section'],
rel_field_names(utils.get_section_relations(Section)))
@override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.ComplexCommon')
def test_returns_relations_for_foreign_key_and_many_to_many(self):
self.assertEqual(
['primary_section', 'related_sections'],
rel_field_names(utils.get_section_relations(Section)))
@override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.MultipleManyToManyModel')
def test_returns_relations_for_subclass_with_foreign_key_and_m2m(self):
self.assertEqual(
['primary_section', 'related_sections', 'more_sections'],
rel_field_names(utils.get_section_relations(Section)))
class get_section_many_to_many_relationsTestCase(ArmSectionsTestCase):
@override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.SimpleCommon')
def test_returns_no_relations_for_foreign_key_only(self):
self.assertEqual(
[],
rel_field_names(utils.get_section_many_to_many_relations(Section)))
@override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.ComplexCommon')
def test_returns_relation_for_foreign_key_and_many_to_many(self):
self.assertEqual(
['related_sections'],
rel_field_names(utils.get_section_many_to_many_relations(Section)))
@override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.MultipleManyToManyModel')
def test_returns_relations_for_subclass_with_foreign_key_and_m2m(self):
self.assertEqual(
['related_sections', 'more_sections'],
rel_field_names(utils.get_section_many_to_many_relations(Section)))
|
armstrong/armstrong.core.arm_sections
|
tests/utils.py
|
Python
|
apache-2.0
| 3,181
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal tilera driver."""
import os
from oslo.config import cfg
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import tilera
from nova.virt.baremetal import utils as bm_utils
from nova.virt.disk import api as disk_api
from nova.virt import fake as fake_virt
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.tilera.Tilera',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalTileraTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalTileraTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = tilera.Tilera(fake_virt.FakeVirtAPI())
fake_image.stub_out_image_service(self.stubs)
self.addCleanup(fake_image.FakeImageService_reset)
self.context = utils.get_test_admin_context()
self.test_block_device_info = None,
self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info()
self.node_info = bm_db_utils.new_bm_node(
service_host='test_host',
cpus=4,
memory_mb=2048,
)
self.nic_info = [
{'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
'port_no': 1},
{'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
'port_no': 2},
]
def _create_node(self):
self.node = db.bm_node_create(self.context, self.node_info)
for nic in self.nic_info:
db.bm_interface_create(
self.context,
self.node['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
self.instance['node'] = self.node['id']
self.spawn_params = dict(
admin_password='test_pass',
block_device_info=self.test_block_device_info,
context=self.context,
image_meta=utils.get_test_image_info(None,
self.instance),
injected_files=[('/fake/path', 'hello world')],
instance=self.instance,
network_info=self.test_network_info,
)
class TileraClassMethodsTestCase(BareMetalTileraTestCase):
def test_build_network_config(self):
net = utils.get_test_network_info(1)
config = tilera.build_network_config(net)
self.assertIn('eth0', config)
self.assertNotIn('eth1', config)
net = utils.get_test_network_info(2)
config = tilera.build_network_config(net)
self.assertIn('eth0', config)
self.assertIn('eth1', config)
def test_build_network_config_dhcp(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0]['network']['subnets'][0]['ips'][0]['address'] = '1.2.3.4'
config = tilera.build_network_config(net)
self.assertIn('iface eth0 inet dhcp', config)
self.assertNotIn('address 1.2.3.4', config)
def test_build_network_config_static(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-static.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0]['network']['subnets'][0]['ips'][0]['address'] = '1.2.3.4'
config = tilera.build_network_config(net)
self.assertIn('iface eth0 inet static', config)
self.assertIn('address 1.2.3.4', config)
def test_image_dir_path(self):
self.assertEqual(
tilera.get_image_dir_path(self.instance),
os.path.join(CONF.instances_path, 'instance-00000001'))
def test_image_file_path(self):
self.assertEqual(
tilera.get_image_file_path(self.instance),
os.path.join(
CONF.instances_path, 'instance-00000001', 'disk'))
def test_tilera_nfs_path(self):
self._create_node()
self.node['id'] = '123'
tilera_nfs_dir = "fs_" + self.node['id']
self.assertEqual(
tilera.get_tilera_nfs_path(self.node['id']),
os.path.join(CONF.baremetal.tftp_root,
tilera_nfs_dir))
def test_get_partition_sizes(self):
# default "kinda.big" instance
sizes = tilera.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1024)
def test_swap_not_zero(self):
# override swap to 0
instance_type = utils.get_test_instance_type(self.context)
instance_type['swap'] = 0
self.instance = utils.get_test_instance(self.context, instance_type)
sizes = tilera.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1)
def test_get_tftp_image_info(self):
# Tilera case needs only kernel_id.
self.instance['kernel_id'] = 'aaaa'
self.instance['uuid'] = 'fake-uuid'
# Here, we confirm both that kernel_id was set
# and that the proper paths are getting set for all of them
base = os.path.join(CONF.baremetal.tftp_root, self.instance['uuid'])
res = tilera.get_tftp_image_info(self.instance)
expected = {
'kernel': ['aaaa', os.path.join(base, 'kernel')],
}
self.assertEqual(res, expected)
class TileraPrivateMethodsTestCase(BareMetalTileraTestCase):
def test_collect_mac_addresses(self):
self._create_node()
address_list = [nic['address'] for nic in self.nic_info]
address_list.sort()
macs = self.driver._collect_mac_addresses(self.context, self.node)
self.assertEqual(macs, address_list)
def test_cache_tftp_images(self):
self.instance['kernel_id'] = 'aaaa'
image_info = tilera.get_tftp_image_info(self.instance)
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(os.path.join(CONF.baremetal.tftp_root,
self.instance['uuid'])).AndReturn(True)
for uuid, path in [image_info[label] for label in image_info]:
os.path.exists(path).AndReturn(True)
self.mox.ReplayAll()
self.driver._cache_tftp_images(
self.context, self.instance, image_info)
self.mox.VerifyAll()
def test_cache_image(self):
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(tilera.get_image_dir_path(self.instance)).\
AndReturn(True)
os.path.exists(tilera.get_image_file_path(self.instance)).\
AndReturn(True)
self.mox.ReplayAll()
image_meta = utils.get_test_image_info(
self.context, self.instance)
self.driver._cache_image(
self.context, self.instance, image_meta)
self.mox.VerifyAll()
def test_inject_into_image(self):
self._create_node()
files = []
self.instance['hostname'] = 'fake hostname'
files.append(('/etc/hostname', 'fake hostname'))
self.instance['key_data'] = 'fake ssh key'
net_info = utils.get_test_network_info(1)
net = tilera.build_network_config(net_info)
admin_password = 'fake password'
self.mox.StubOutWithMock(disk_api, 'inject_data')
disk_api.inject_data(
admin_password=admin_password,
image=tilera.get_image_file_path(self.instance),
key='fake ssh key',
metadata=None,
partition=None,
net=net,
files=files,
).AndReturn(True)
self.mox.ReplayAll()
self.driver._inject_into_image(
self.context, self.node, self.instance,
network_info=net_info,
admin_password=admin_password,
injected_files=None)
self.mox.VerifyAll()
class TileraPublicMethodsTestCase(BareMetalTileraTestCase):
def test_cache_images(self):
self._create_node()
self.mox.StubOutWithMock(tilera, "get_tftp_image_info")
self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
self.mox.StubOutWithMock(self.driver, "_cache_image")
self.mox.StubOutWithMock(self.driver, "_inject_into_image")
tilera.get_tftp_image_info(self.instance).AndReturn([])
self.driver._cache_tftp_images(self.context, self.instance, [])
self.driver._cache_image(self.context, self.instance, [])
self.driver._inject_into_image(self.context, self.node, self.instance,
self.test_network_info, None, '')
self.mox.ReplayAll()
self.driver.cache_images(
self.context, self.node, self.instance,
admin_password='',
image_meta=[],
injected_files=None,
network_info=self.test_network_info,
)
self.mox.VerifyAll()
def test_destroy_images(self):
self._create_node()
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
bm_utils.unlink_without_raise(tilera.get_image_file_path(
self.instance))
bm_utils.rmtree_without_raise(tilera.get_image_dir_path(self.instance))
self.mox.ReplayAll()
self.driver.destroy_images(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_bootloader_passes_details(self):
self._create_node()
image_info = {
'kernel': [None, 'cccc'],
}
self.instance['uuid'] = 'fake-uuid'
iqn = "iqn-%s" % self.instance['uuid']
tilera_config = 'this is a fake tilera config'
self.instance['uuid'] = 'fake-uuid'
tilera_path = tilera.get_tilera_nfs_path(self.instance)
image_path = tilera.get_image_file_path(self.instance)
self.mox.StubOutWithMock(tilera, 'get_tftp_image_info')
self.mox.StubOutWithMock(tilera, 'get_partition_sizes')
tilera.get_tftp_image_info(self.instance).AndReturn(image_info)
tilera.get_partition_sizes(self.instance).AndReturn((0, 0))
self.mox.ReplayAll()
self.driver.activate_bootloader(self.context, self.node, self.instance,
network_info=self.test_network_info)
self.mox.VerifyAll()
def test_activate_and_deactivate_bootloader(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
tilera_path = tilera.get_tilera_nfs_path(self.instance)
image_path = tilera.get_image_file_path(self.instance)
self.mox.ReplayAll()
# activate and deactivate the bootloader
# and check the deployment task_state in the database
row = db.bm_node_get(self.context, 1)
self.assertIsNone(row['deploy_key'])
self.driver.activate_bootloader(self.context, self.node, self.instance,
network_info=self.test_network_info)
row = db.bm_node_get(self.context, 1)
self.assertIsNotNone(row['deploy_key'])
self.driver.deactivate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertIsNone(row['deploy_key'])
self.mox.VerifyAll()
def test_deactivate_bootloader_for_nonexistent_instance(self):
self._create_node()
self.node['id'] = 'fake-node-id'
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
self.mox.StubOutWithMock(tilera, 'get_tftp_image_info')
self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
tilera_path = tilera.get_tilera_nfs_path(self.node['id'])
tilera.get_tftp_image_info(self.instance).\
AndRaise(exception.NovaException)
self.driver._collect_mac_addresses(self.context, self.node).\
AndRaise(db_exc.DBError)
self.mox.ReplayAll()
self.driver.deactivate_bootloader(
self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_node(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYING,
'instance_uuid': 'fake-uuid'})
# test DEPLOYDONE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYDONE})
self.driver.activate_node(self.context, self.node, self.instance)
# test no deploy -- state is just ACTIVE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.ACTIVE})
self.driver.activate_node(self.context, self.node, self.instance)
# test node gone
db.bm_node_destroy(self.context, 1)
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)
|
imsplitbit/nova
|
nova/tests/virt/baremetal/test_tilera.py
|
Python
|
apache-2.0
| 15,118
|
__all__ = ('WebSocketResponse', 'MsgType')
import asyncio
import warnings
from . import hdrs
from .errors import HttpProcessingError, ClientDisconnectedError
from .websocket import do_handshake, Message, WebSocketError
from .websocket_client import MsgType, closedMessage
from .web_exceptions import (
HTTPBadRequest, HTTPMethodNotAllowed, HTTPInternalServerError)
from aio2py.required.aiohttp.web_reqrep import StreamResponse
THRESHOLD_CONNLOST_ACCESS = 5
class WebSocketResponse(StreamResponse):
def __init__(self, *,
timeout=10.0, autoclose=True, autoping=True, protocols=()):
super().__init__(status=101)
self._protocols = protocols
self._protocol = None
self._writer = None
self._reader = None
self._closed = False
self._closing = False
self._conn_lost = 0
self._close_code = None
self._loop = None
self._waiting = False
self._exception = None
self._timeout = timeout
self._autoclose = autoclose
self._autoping = autoping
def start(self, request):
# make pre-check to don't hide it by do_handshake() exceptions
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
try:
status, headers, parser, writer, protocol = do_handshake(
request.method, request.headers, request.transport,
self._protocols)
except HttpProcessingError as err:
if err.code == 405:
raise HTTPMethodNotAllowed(
request.method, [hdrs.METH_GET], body=b'')
elif err.code == 400:
raise HTTPBadRequest(text=err.message, headers=err.headers)
else: # pragma: no cover
raise HTTPInternalServerError() from err
if self.status != status:
self.set_status(status)
for k, v in headers:
self.headers[k] = v
self.force_close()
resp_impl = super().start(request)
self._reader = request._reader.set_parser(parser)
self._writer = writer
self._protocol = protocol
self._loop = request.app.loop
return resp_impl
def can_start(self, request):
if self._writer is not None:
raise RuntimeError('Already started')
try:
_, _, _, _, protocol = do_handshake(
request.method, request.headers, request.transport,
self._protocols)
except HttpProcessingError:
return False, None
else:
return True, protocol
@property
def closed(self):
return self._closed
@property
def close_code(self):
return self._close_code
@property
def protocol(self):
return self._protocol
def exception(self):
return self._exception
def ping(self, message='b'):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
self._writer.ping(message)
def pong(self, message='b'):
# unsolicited pong
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
self._writer.pong(message)
def send_str(self, data):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
if not isinstance(data, str):
raise TypeError('data argument must be str (%r)' % type(data))
self._writer.send(data, binary=False)
def send_bytes(self, data):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)' %
type(data))
self._writer.send(data, binary=True)
@asyncio.coroutine
def wait_closed(self): # pragma: no cover
warnings.warn(
'wait_closed() coroutine is deprecated. use close() instead',
DeprecationWarning)
return (yield from self.close())
@asyncio.coroutine
def write_eof(self):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self.close()
self._eof_sent = True
@asyncio.coroutine
def close(self, *, code=1000, message=b''):
if self._writer is None:
raise RuntimeError('Call .start() first')
if not self._closed:
self._closed = True
try:
self._writer.close(code, message)
except (asyncio.CancelledError, asyncio.TimeoutError):
self._close_code = 1006
raise
except Exception as exc:
self._close_code = 1006
self._exception = exc
return True
if self._closing:
return True
while True:
try:
msg = yield from asyncio.wait_for(
self._reader.read(),
timeout=self._timeout, loop=self._loop)
except asyncio.CancelledError:
self._close_code = 1006
raise
except Exception as exc:
self._close_code = 1006
self._exception = exc
return True
if msg.tp == MsgType.close:
self._close_code = msg.data
return True
else:
return False
@asyncio.coroutine
def receive(self):
if self._reader is None:
raise RuntimeError('Call .start() first')
if self._waiting:
raise RuntimeError('Concurrent call to receive() is not allowed')
self._waiting = True
try:
while True:
if self._closed:
self._conn_lost += 1
if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS:
raise RuntimeError('WebSocket connection is closed.')
return closedMessage
try:
msg = yield from self._reader.read()
except (asyncio.CancelledError, asyncio.TimeoutError):
raise
except WebSocketError as exc:
self._close_code = exc.code
yield from self.close(code=exc.code)
return Message(MsgType.error, exc, None)
except ClientDisconnectedError:
self._closed = True
self._close_code = 1006
return Message(MsgType.close, None, None)
except Exception as exc:
self._exception = exc
self._closing = True
self._close_code = 1006
yield from self.close()
return Message(MsgType.error, exc, None)
if msg.tp == MsgType.close:
self._closing = True
self._close_code = msg.data
if not self._closed and self._autoclose:
yield from self.close()
return msg
elif not self._closed:
if msg.tp == MsgType.ping and self._autoping:
self._writer.pong(msg.data)
elif msg.tp == MsgType.pong and self._autoping:
continue
else:
return msg
finally:
self._waiting = False
@asyncio.coroutine
def receive_msg(self): # pragma: no cover
warnings.warn(
'receive_msg() coroutine is deprecated. use receive() instead',
DeprecationWarning)
return (yield from self.receive())
@asyncio.coroutine
def receive_str(self):
msg = yield from self.receive()
if msg.tp != MsgType.text:
raise TypeError(
"Received message {}:{!r} is not str".format(msg.tp, msg.data))
return msg.data
@asyncio.coroutine
def receive_bytes(self):
msg = yield from self.receive()
if msg.tp != MsgType.binary:
raise TypeError(
"Received message {}:{!r} is not bytes".format(msg.tp,
msg.data))
return msg.data
def write(self, data):
raise RuntimeError("Cannot call .write() for websocket")
|
lfblogs/aio2py
|
aio2py/required/aiohttp/web_ws.py
|
Python
|
apache-2.0
| 8,999
|
#!/usr/bin/python3
__author__ = 'nhumrich'
import logspit.runner
if __name__ == '__main__':
logspit.runner.run()
|
CanopyTax/logspit
|
run.py
|
Python
|
apache-2.0
| 118
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
import webob.exc
import heat.api.middleware.fault as fault
import heat.api.openstack.v1.events as events
from heat.common import exception as heat_exc
from heat.common import identifier
from heat.common import policy
from heat.rpc import client as rpc_client
from heat.tests.api.openstack_v1 import tools
from heat.tests import common
@mock.patch.object(policy.Enforcer, 'enforce')
class EventControllerTest(tools.ControllerTest, common.HeatTestCase):
"""Tests the API class EventController.
Tests the API class which acts as the WSGI controller,
the endpoint processing API requests after they are routed
"""
def setUp(self):
super(EventControllerTest, self).setUp()
# Create WSGI controller instance
class DummyConfig(object):
bind_port = 8004
cfgopts = DummyConfig()
self.controller = events.EventController(options=cfgopts)
def test_resource_index_event_id_integer(self, mock_enforce):
self._test_resource_index('42', mock_enforce)
def test_resource_index_event_id_uuid(self, mock_enforce):
self._test_resource_index('a3455d8c-9f88-404d-a85b-5315293e67de',
mock_enforce)
def test_resource_index_nested_depth(self, mock_enforce):
self._test_resource_index('a3455d8c-9f88-404d-a85b-5315293e67de',
mock_enforce, nested_depth=1)
def _test_resource_index(self, event_id, mock_enforce, nested_depth=None):
self._mock_enforce_setup(mock_enforce, 'index', True)
res_name = 'WikiDatabase'
params = {}
if nested_depth:
params['nested_depth'] = nested_depth
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '6')
res_identity = identifier.ResourceIdentifier(resource_name=res_name,
**stack_identity)
ev_identity = identifier.EventIdentifier(event_id=event_id,
**res_identity)
req = self._get(stack_identity._tenant_path() +
'/resources/' + res_name + '/events',
params=params)
kwargs = {'stack_identity': stack_identity,
'nested_depth': nested_depth,
'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': {'resource_name': res_name}}
engine_resp = [
{
u'stack_name': u'wordpress',
u'event_time': u'2012-07-23T13:05:39Z',
u'stack_identity': dict(stack_identity),
u'resource_name': res_name,
u'resource_status_reason': u'state changed',
u'event_identity': dict(ev_identity),
u'resource_action': u'CREATE',
u'resource_status': u'IN_PROGRESS',
u'physical_resource_id': None,
u'resource_type': u'AWS::EC2::Instance',
}
]
if nested_depth:
engine_resp[0]['root_stack_id'] = dict(stack_identity)
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
result = self.controller.index(req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id,
resource_name=res_name)
expected = {
'events': [
{
'id': event_id,
'links': [
{'href': self._url(ev_identity), 'rel': 'self'},
{'href': self._url(res_identity), 'rel': 'resource'},
{'href': self._url(stack_identity), 'rel': 'stack'},
],
u'resource_name': res_name,
u'logical_resource_id': res_name,
u'resource_status_reason': u'state changed',
u'event_time': u'2012-07-23T13:05:39Z',
u'resource_status': u'CREATE_IN_PROGRESS',
u'physical_resource_id': None,
}
]
}
if nested_depth:
expected['events'][0]['links'].append(
{'href': self._url(stack_identity), 'rel': 'root_stack'}
)
self.assertEqual(expected, result)
mock_call.assert_called_once_with(
req.context,
('list_events', kwargs),
version='1.31'
)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_multiple_resource_names(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
res_name = 'resource3'
event_id = '42'
params = {
'resource_name': ['resource1', 'resource2']
}
stack_identity = identifier.HeatIdentifier(self.tenant,
'wibble', '6')
res_identity = identifier.ResourceIdentifier(resource_name=res_name,
**stack_identity)
ev_identity = identifier.EventIdentifier(event_id=event_id,
**res_identity)
req = self._get(stack_identity._tenant_path() + '/events',
params=params)
mock_call.return_value = [
{
u'stack_name': u'wordpress',
u'event_time': u'2012-07-23T13:05:39Z',
u'stack_identity': dict(stack_identity),
u'resource_name': res_name,
u'resource_status_reason': u'state changed',
u'event_identity': dict(ev_identity),
u'resource_action': u'CREATE',
u'resource_status': u'IN_PROGRESS',
u'physical_resource_id': None,
u'resource_type': u'AWS::EC2::Instance',
}
]
self.controller.index(req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id,
resource_name=res_name)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(7, len(engine_args))
self.assertIn('filters', engine_args)
self.assertIn('resource_name', engine_args['filters'])
self.assertEqual(res_name, engine_args['filters']['resource_name'])
self.assertNotIn('resource1', engine_args['filters']['resource_name'])
self.assertNotIn('resource2', engine_args['filters']['resource_name'])
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_multiple_resource_names_no_resource(self, mock_call,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
res_name = 'resource3'
event_id = '42'
params = {
'resource_name': ['resource1', 'resource2']
}
stack_identity = identifier.HeatIdentifier(self.tenant,
'wibble', '6')
res_identity = identifier.ResourceIdentifier(resource_name=res_name,
**stack_identity)
ev_identity = identifier.EventIdentifier(event_id=event_id,
**res_identity)
req = self._get(stack_identity._tenant_path() + '/events',
params=params)
mock_call.return_value = [
{
u'stack_name': u'wordpress',
u'event_time': u'2012-07-23T13:05:39Z',
u'stack_identity': dict(stack_identity),
u'resource_name': res_name,
u'resource_status_reason': u'state changed',
u'event_identity': dict(ev_identity),
u'resource_action': u'CREATE',
u'resource_status': u'IN_PROGRESS',
u'physical_resource_id': None,
u'resource_type': u'AWS::EC2::Instance',
}
]
self.controller.index(req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(7, len(engine_args))
self.assertIn('filters', engine_args)
self.assertIn('resource_name', engine_args['filters'])
self.assertIn('resource1', engine_args['filters']['resource_name'])
self.assertIn('resource2', engine_args['filters']['resource_name'])
def test_stack_index_event_id_integer(self, mock_enforce):
self._test_stack_index('42', mock_enforce)
def test_stack_index_event_id_uuid(self, mock_enforce):
self._test_stack_index('a3455d8c-9f88-404d-a85b-5315293e67de',
mock_enforce)
def _test_stack_index(self, event_id, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
res_name = 'WikiDatabase'
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '6')
res_identity = identifier.ResourceIdentifier(resource_name=res_name,
**stack_identity)
ev_identity = identifier.EventIdentifier(event_id=event_id,
**res_identity)
req = self._get(stack_identity._tenant_path() + '/events')
kwargs = {'stack_identity': stack_identity, 'nested_depth': None,
'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': {'resource_name': res_name}}
engine_resp = [
{
u'stack_name': u'wordpress',
u'event_time': u'2012-07-23T13:05:39Z',
u'stack_identity': dict(stack_identity),
u'resource_name': res_name,
u'resource_status_reason': u'state changed',
u'event_identity': dict(ev_identity),
u'resource_action': u'CREATE',
u'resource_status': u'IN_PROGRESS',
u'physical_resource_id': None,
u'resource_type': u'AWS::EC2::Instance',
}
]
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
result = self.controller.index(req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id,
resource_name=res_name)
expected = {
'events': [
{
'id': event_id,
'links': [
{'href': self._url(ev_identity), 'rel': 'self'},
{'href': self._url(res_identity), 'rel': 'resource'},
{'href': self._url(stack_identity), 'rel': 'stack'},
],
u'resource_name': res_name,
u'logical_resource_id': res_name,
u'resource_status_reason': u'state changed',
u'event_time': u'2012-07-23T13:05:39Z',
u'resource_status': u'CREATE_IN_PROGRESS',
u'physical_resource_id': None,
}
]
}
self.assertEqual(expected, result)
mock_call.assert_called_once_with(
req.context,
('list_events', kwargs),
version='1.31'
)
def test_index_stack_nonexist(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wibble', '6')
req = self._get(stack_identity._tenant_path() + '/events')
kwargs = {'stack_identity': stack_identity, 'nested_depth': None,
'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None}
error = heat_exc.EntityNotFound(entity='Stack', name='a')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('list_events', kwargs),
version='1.31'
)
def test_index_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wibble', '6')
req = self._get(stack_identity._tenant_path() + '/events')
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_index_resource_nonexist(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
res_name = 'WikiDatabase'
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '6')
req = self._get(stack_identity._tenant_path() +
'/resources/' + res_name + '/events')
kwargs = {'stack_identity': stack_identity, 'nested_depth': None,
'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': {'resource_name': res_name}}
engine_resp = []
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index,
req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id,
resource_name=res_name)
mock_call.assert_called_once_with(
req.context,
('list_events', kwargs),
version='1.31'
)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_whitelists_pagination_params(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'limit': 10,
'sort_keys': 'fake sort keys',
'marker': 'fake marker',
'sort_dir': 'fake sort dir',
'balrog': 'you shall not pass!'
}
stack_identity = identifier.HeatIdentifier(self.tenant,
'wibble', '6')
req = self._get(stack_identity._tenant_path() + '/events',
params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(7, len(engine_args))
self.assertIn('limit', engine_args)
self.assertEqual(10, engine_args['limit'])
self.assertIn('sort_keys', engine_args)
self.assertEqual(['fake sort keys'], engine_args['sort_keys'])
self.assertIn('marker', engine_args)
self.assertEqual('fake marker', engine_args['marker'])
self.assertIn('sort_dir', engine_args)
self.assertEqual('fake sort dir', engine_args['sort_dir'])
self.assertIn('filters', engine_args)
self.assertIsNone(engine_args['filters'])
self.assertNotIn('balrog', engine_args)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_limit_not_int(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
sid = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._get(sid._tenant_path() + '/events',
params={'limit': 'not-an-int'})
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req,
tenant_id=self.tenant,
stack_name=sid.stack_name,
stack_id=sid.stack_id)
self.assertEqual("Only integer is acceptable by 'limit'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_whitelist_filter_params(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'resource_status': 'COMPLETE',
'resource_action': 'CREATE',
'resource_name': 'my_server',
'resource_type': 'OS::Nova::Server',
'balrog': 'you shall not pass!'
}
stack_identity = identifier.HeatIdentifier(self.tenant,
'wibble', '6')
req = self._get(stack_identity._tenant_path() + '/events',
params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertIn('filters', engine_args)
filters = engine_args['filters']
self.assertEqual(4, len(filters))
self.assertIn('resource_status', filters)
self.assertEqual('COMPLETE', filters['resource_status'])
self.assertIn('resource_action', filters)
self.assertEqual('CREATE', filters['resource_action'])
self.assertIn('resource_name', filters)
self.assertEqual('my_server', filters['resource_name'])
self.assertIn('resource_type', filters)
self.assertEqual('OS::Nova::Server', filters['resource_type'])
self.assertNotIn('balrog', filters)
def test_show_event_id_integer(self, mock_enforce):
self._test_show('42', mock_enforce)
def test_show_event_id_uuid(self, mock_enforce):
self._test_show('a3455d8c-9f88-404d-a85b-5315293e67de', mock_enforce)
def _test_show(self, event_id, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
res_name = 'WikiDatabase'
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '6')
res_identity = identifier.ResourceIdentifier(resource_name=res_name,
**stack_identity)
ev_identity = identifier.EventIdentifier(event_id=event_id,
**res_identity)
req = self._get(stack_identity._tenant_path() +
'/resources/' + res_name + '/events/' + event_id)
kwargs = {'stack_identity': stack_identity,
'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'nested_depth': None,
'filters': {'resource_name': res_name, 'uuid': event_id}}
engine_resp = [
{
u'stack_name': u'wordpress',
u'event_time': u'2012-07-23T13:06:00Z',
u'stack_identity': dict(stack_identity),
u'resource_name': res_name,
u'resource_status_reason': u'state changed',
u'event_identity': dict(ev_identity),
u'resource_action': u'CREATE',
u'resource_status': u'COMPLETE',
u'physical_resource_id':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
u'resource_properties': {u'UserData': u'blah'},
u'resource_type': u'AWS::EC2::Instance',
}
]
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
result = self.controller.show(req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id,
resource_name=res_name,
event_id=event_id)
expected = {
'event': {
'id': event_id,
'links': [
{'href': self._url(ev_identity), 'rel': 'self'},
{'href': self._url(res_identity), 'rel': 'resource'},
{'href': self._url(stack_identity), 'rel': 'stack'},
],
u'resource_name': res_name,
u'logical_resource_id': res_name,
u'resource_status_reason': u'state changed',
u'event_time': u'2012-07-23T13:06:00Z',
u'resource_status': u'CREATE_COMPLETE',
u'physical_resource_id':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
u'resource_type': u'AWS::EC2::Instance',
u'resource_properties': {u'UserData': u'blah'},
}
}
self.assertEqual(expected, result)
mock_call.assert_called_once_with(
req.context,
('list_events', kwargs),
version='1.31'
)
def test_show_bad_resource(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
event_id = '42'
res_name = 'WikiDatabase'
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '6')
req = self._get(stack_identity._tenant_path() +
'/resources/' + res_name + '/events/' + event_id)
kwargs = {'stack_identity': stack_identity,
'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'nested_depth': None,
'filters': {'resource_name': res_name, 'uuid': '42'}}
engine_resp = []
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id,
resource_name=res_name, event_id=event_id)
mock_call.assert_called_once_with(
req.context,
('list_events', kwargs),
version='1.31'
)
def test_show_stack_nonexist(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
event_id = '42'
res_name = 'WikiDatabase'
stack_identity = identifier.HeatIdentifier(self.tenant,
'wibble', '6')
req = self._get(stack_identity._tenant_path() +
'/resources/' + res_name + '/events/' + event_id)
kwargs = {'stack_identity': stack_identity,
'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'nested_depth': None,
'filters': {'resource_name': res_name, 'uuid': '42'}}
error = heat_exc.EntityNotFound(entity='Stack', name='a')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.show,
req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id,
resource_name=res_name,
event_id=event_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('list_events', kwargs),
version='1.31'
)
def test_show_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', False)
event_id = '42'
res_name = 'WikiDatabase'
stack_identity = identifier.HeatIdentifier(self.tenant,
'wibble', '6')
req = self._get(stack_identity._tenant_path() +
'/resources/' + res_name + '/events/' + event_id)
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.show,
req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id,
resource_name=res_name,
event_id=event_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_show_multiple_resource_names(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
res_name = 'resource3'
event_id = '42'
stack_identity = identifier.HeatIdentifier(self.tenant,
'wibble', '6')
res_identity = identifier.ResourceIdentifier(resource_name=res_name,
**stack_identity)
ev_identity = identifier.EventIdentifier(event_id=event_id,
**res_identity)
req = self._get(stack_identity._tenant_path() +
'/resources/' + res_name + '/events/' + event_id)
mock_call.return_value = [
{
u'stack_name': u'wordpress',
u'event_time': u'2012-07-23T13:05:39Z',
u'stack_identity': dict(stack_identity),
u'resource_name': res_name,
u'resource_status_reason': u'state changed',
u'event_identity': dict(ev_identity),
u'resource_action': u'CREATE',
u'resource_status': u'IN_PROGRESS',
u'physical_resource_id': None,
u'resource_type': u'AWS::EC2::Instance',
}
]
self.controller.show(req, tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id,
resource_name=res_name, event_id=event_id)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(7, len(engine_args))
self.assertIn('filters', engine_args)
self.assertIn('resource_name', engine_args['filters'])
self.assertIn(res_name, engine_args['filters']['resource_name'])
|
noironetworks/heat
|
heat/tests/api/openstack_v1/test_events.py
|
Python
|
apache-2.0
| 28,895
|
# Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Ensure nova configs that conflict with ironic configs are unregistered for
# the tests
from oslo_config import cfg
from nova.api import auth
from nova import exception
from nova import netconf
from nova.network.neutronv2 import api
from nova import paths
from nova import utils
from nova.virt import images
CONF = cfg.CONF
CONF.unregister_opts(exception.exc_log_opts)
CONF.unregister_opt(utils.utils_opts[3])
CONF.unregister_opt(utils.utils_opts[4])
CONF.unregister_opt(netconf.netconf_opts[0])
CONF.unregister_opt(netconf.netconf_opts[2])
CONF.unregister_opts(paths.path_opts)
CONF.unregister_opt(auth.auth_opts[1])
CONF.unregister_opts(api.neutron_opts, group='neutron')
CONF.unregister_opts(images.image_opts)
|
Tehsmash/cisco-ironic-contrib
|
cisco_ironic_contrib/tests/__init__.py
|
Python
|
apache-2.0
| 1,348
|
# Copyright 2015 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dogpile.cache import region as dp_region
from oslo_cache import core
from oslo_cache.tests import test_cache
from oslo_config import fixture as config_fixture
from oslo_utils import fixture as time_fixture
NO_VALUE = core.NO_VALUE
KEY = 'test_key'
VALUE = 'test_value'
class CacheDictBackendTest(test_cache.BaseTestCase):
def setUp(self):
super(CacheDictBackendTest, self).setUp()
self.config_fixture = self.useFixture(config_fixture.Config())
self.config_fixture.config(group='cache', backend='oslo_cache.dict')
self.time_fixture = self.useFixture(time_fixture.TimeFixture())
self.region = dp_region.make_region()
self.region.configure(
'oslo_cache.dict', arguments={'expiration_time': 0.5})
def test_dict_backend(self):
self.assertIs(NO_VALUE, self.region.get(KEY))
self.region.set(KEY, VALUE)
self.assertEqual(VALUE, self.region.get(KEY))
self.region.delete(KEY)
self.assertIs(NO_VALUE, self.region.get(KEY))
def test_dict_backend_expiration_time(self):
self.region.set(KEY, VALUE)
self.assertEqual(VALUE, self.region.get(KEY))
self.time_fixture.advance_time_seconds(1)
self.assertIs(NO_VALUE, self.region.get(KEY))
def test_dict_backend_clear_cache(self):
self.region.set(KEY, VALUE)
self.time_fixture.advance_time_seconds(1)
self.assertEqual(1, len(self.region.backend.cache))
self.region.backend._clear()
self.assertEqual(0, len(self.region.backend.cache))
def test_dict_backend_zero_expiration_time(self):
self.region = dp_region.make_region()
self.region.configure(
'oslo_cache.dict', arguments={'expiration_time': 0})
self.region.set(KEY, VALUE)
self.time_fixture.advance_time_seconds(1)
self.assertEqual(VALUE, self.region.get(KEY))
self.assertEqual(1, len(self.region.backend.cache))
self.region.backend._clear()
self.assertEqual(VALUE, self.region.get(KEY))
self.assertEqual(1, len(self.region.backend.cache))
def test_dict_backend_multi_keys(self):
self.region.set('key1', 'value1')
self.region.set('key2', 'value2')
self.time_fixture.advance_time_seconds(1)
self.region.set('key3', 'value3')
self.assertEqual(1, len(self.region.backend.cache))
self.assertIs(NO_VALUE, self.region.get('key1'))
self.assertIs(NO_VALUE, self.region.get('key2'))
self.assertEqual('value3', self.region.get('key3'))
def test_dict_backend_multi_keys_in_one_call(self):
single_value = 'Test Value'
single_key = 'testkey'
multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
self.region.set(single_key, single_value)
self.assertEqual(single_value, self.region.get(single_key))
self.region.delete(single_key)
self.assertEqual(NO_VALUE, self.region.get(single_key))
self.region.set_multi(multi_values)
cached_values = self.region.get_multi(multi_values.keys())
for value in multi_values.values():
self.assertIn(value, cached_values)
self.assertEqual(len(multi_values.values()), len(cached_values))
self.region.delete_multi(multi_values.keys())
for value in self.region.get_multi(multi_values.keys()):
self.assertEqual(NO_VALUE, value)
def test_dict_backend_rewrite_value(self):
self.region.set(KEY, 'value1')
self.region.set(KEY, 'value2')
self.assertEqual('value2', self.region.get(KEY))
|
openstack/oslo.cache
|
oslo_cache/tests/unit/test_dict_backend.py
|
Python
|
apache-2.0
| 4,186
|
"""The tests for the MQTT light platform.
Configuration for RGB Version with brightness:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
qos: 0
payload_on: "on"
payload_off: "off"
Configuration for XY Version with brightness:
light:
platform: mqtt
name: "Office Light XY"
state_topic: "office/xy1/light/status"
command_topic: "office/xy1/light/switch"
brightness_state_topic: "office/xy1/brightness/status"
brightness_command_topic: "office/xy1/brightness/set"
xy_state_topic: "office/xy1/xy/status"
xy_command_topic: "office/xy1/xy/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB and brightness:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with brightness and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
config with brightness and color temp
light:
platform: mqtt
name: "Office Light Color Temp"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
color_temp_state_topic: "office/rgb1/color_temp/status"
color_temp_command_topic: "office/rgb1/color_temp/set"
qos: 0
payload_on: "on"
payload_off: "off"
config with brightness and effect
light:
platform: mqtt
name: "Office Light Color Temp"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
effect_state_topic: "office/rgb1/effect/status"
effect_command_topic: "office/rgb1/effect/set"
effect_list:
- rainbow
- colorloop
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with white value and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
white_value_state_topic: "office/rgb1/white_value/status"
white_value_command_topic: "office/rgb1/white_value/set"
white_value_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with RGB command template:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_command_template: "{{ '#%02x%02x%02x' | format(red, green, blue)}}"
qos: 0
payload_on: "on"
payload_off: "off"
Configuration for HS Version with brightness:
light:
platform: mqtt
name: "Office Light HS"
state_topic: "office/hs1/light/status"
command_topic: "office/hs1/light/switch"
brightness_state_topic: "office/hs1/brightness/status"
brightness_command_topic: "office/hs1/brightness/set"
hs_state_topic: "office/hs1/hs/status"
hs_command_topic: "office/hs1/hs/set"
qos: 0
payload_on: "on"
payload_off: "off"
"""
import json
from os import path
from unittest.mock import call, patch
import pytest
from homeassistant import config as hass_config
from homeassistant.components import light
from homeassistant.const import ATTR_ASSUMED_STATE, SERVICE_RELOAD, STATE_OFF, STATE_ON
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import assert_setup_component, async_fire_mqtt_message
from tests.components.light import common
DEFAULT_CONFIG = {
light.DOMAIN: {"platform": "mqtt", "name": "test", "command_topic": "test-topic"}
}
async def test_fail_setup_if_no_command_topic(hass, mqtt_mock):
"""Test if command fails with command topic."""
assert await async_setup_component(
hass, light.DOMAIN, {light.DOMAIN: {"platform": "mqtt", "name": "test"}}
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
async def test_no_color_brightness_color_temp_hs_white_xy_if_no_topics(hass, mqtt_mock):
"""Test if there is no color and brightness if no topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "ON")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
async def test_controlling_state_via_topic(hass, mqtt_mock):
"""Test the controlling of the state via topic."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"brightness_state_topic": "test_light_rgb/brightness/status",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_state_topic": "test_light_rgb/rgb/status",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_state_topic": "test_light_rgb/color_temp/status",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_state_topic": "test_light_rgb/effect/status",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_state_topic": "test_light_rgb/hs/status",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_state_topic": "test_light_rgb/white_value/status",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_state_topic": "test_light_rgb/xy/status",
"xy_command_topic": "test_light_rgb/xy/set",
"qos": "0",
"payload_on": 1,
"payload_off": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "0")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", "100")
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 100
async_fire_mqtt_message(hass, "test_light_rgb/color_temp/status", "300")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("color_temp") is None
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", "100")
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 100
assert light_state.attributes["color_temp"] == 300
async_fire_mqtt_message(hass, "test_light_rgb/effect/status", "rainbow")
light_state = hass.states.get("light.test")
assert light_state.attributes["effect"] == "rainbow"
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", "125,125,125")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", "0")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", "200,50")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (200, 50)
async_fire_mqtt_message(hass, "test_light_rgb/xy/status", "0.675,0.322")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("xy_color") == (0.672, 0.324)
async def test_invalid_state_via_topic(hass, mqtt_mock, caplog):
"""Test handling of empty data via topic."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"brightness_state_topic": "test_light_rgb/brightness/status",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_state_topic": "test_light_rgb/rgb/status",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_state_topic": "test_light_rgb/color_temp/status",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_state_topic": "test_light_rgb/effect/status",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_state_topic": "test_light_rgb/hs/status",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_state_topic": "test_light_rgb/white_value/status",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_state_topic": "test_light_rgb/xy/status",
"xy_command_topic": "test_light_rgb/xy/set",
"qos": "0",
"payload_on": 1,
"payload_off": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", "255,255,255")
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", "255")
async_fire_mqtt_message(hass, "test_light_rgb/effect/status", "none")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") == "none"
assert state.attributes.get("hs_color") == (0, 0)
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") == (0.323, 0.329)
async_fire_mqtt_message(hass, "test_light_rgb/status", "")
assert "Ignoring empty state message" in caplog.text
light_state = hass.states.get("light.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", "")
assert "Ignoring empty brightness message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 255
async_fire_mqtt_message(hass, "test_light_rgb/effect/status", "")
assert "Ignoring empty effect message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["effect"] == "none"
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", "")
assert "Ignoring empty rgb message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", "")
assert "Ignoring empty hs message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (0, 0)
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", "bad,bad")
assert "Failed to parse hs state update" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (0, 0)
async_fire_mqtt_message(hass, "test_light_rgb/xy/status", "")
assert "Ignoring empty xy-color message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("xy_color") == (0.323, 0.329)
async_fire_mqtt_message(hass, "test_light_rgb/color_temp/status", "153")
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", "255")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 153
assert state.attributes.get("effect") == "none"
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") == 255
assert state.attributes.get("xy_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/color_temp/status", "")
assert "Ignoring empty color temp message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["color_temp"] == 153
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", "")
assert "Ignoring empty white value message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 255
async def test_brightness_controlling_scale(hass, mqtt_mock):
"""Test the brightness controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_scale/status",
"command_topic": "test_scale/set",
"brightness_state_topic": "test_scale/brightness/status",
"brightness_command_topic": "test_scale/brightness/set",
"brightness_scale": "99",
"qos": 0,
"payload_on": "on",
"payload_off": "off",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_scale/status", "on")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") is None
async_fire_mqtt_message(hass, "test_scale/status", "off")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_scale/status", "on")
async_fire_mqtt_message(hass, "test_scale/brightness/status", "99")
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 255
async def test_brightness_from_rgb_controlling_scale(hass, mqtt_mock):
"""Test the brightness controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_scale_rgb/status",
"command_topic": "test_scale_rgb/set",
"rgb_state_topic": "test_scale_rgb/rgb/status",
"rgb_command_topic": "test_scale_rgb/rgb/set",
"qos": 0,
"payload_on": "on",
"payload_off": "off",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_scale_rgb/status", "on")
async_fire_mqtt_message(hass, "test_scale_rgb/rgb/status", "255,0,0")
state = hass.states.get("light.test")
assert state.attributes.get("brightness") == 255
async_fire_mqtt_message(hass, "test_scale_rgb/rgb/status", "127,0,0")
state = hass.states.get("light.test")
assert state.attributes.get("brightness") == 127
async def test_white_value_controlling_scale(hass, mqtt_mock):
"""Test the white_value controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_scale/status",
"command_topic": "test_scale/set",
"white_value_state_topic": "test_scale/white_value/status",
"white_value_command_topic": "test_scale/white_value/set",
"white_value_scale": "99",
"qos": 0,
"payload_on": "on",
"payload_off": "off",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_scale/status", "on")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") is None
async_fire_mqtt_message(hass, "test_scale/status", "off")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_scale/status", "on")
async_fire_mqtt_message(hass, "test_scale/white_value/status", "99")
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 255
async def test_controlling_state_via_topic_with_templates(hass, mqtt_mock):
"""Test the setting of the state with a template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/brightness/status",
"color_temp_state_topic": "test_light_rgb/color_temp/status",
"effect_state_topic": "test_light_rgb/effect/status",
"hs_state_topic": "test_light_rgb/hs/status",
"rgb_state_topic": "test_light_rgb/rgb/status",
"white_value_state_topic": "test_light_rgb/white_value/status",
"xy_state_topic": "test_light_rgb/xy/status",
"state_value_template": "{{ value_json.hello }}",
"brightness_value_template": "{{ value_json.hello }}",
"color_temp_value_template": "{{ value_json.hello }}",
"effect_value_template": "{{ value_json.hello }}",
"hs_value_template": '{{ value_json.hello | join(",") }}',
"rgb_value_template": '{{ value_json.hello | join(",") }}',
"white_value_template": "{{ value_json.hello }}",
"xy_value_template": '{{ value_json.hello | join(",") }}',
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert state.attributes.get("rgb_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", '{"hello": [1, 2, 3]}')
async_fire_mqtt_message(hass, "test_light_rgb/status", '{"hello": "ON"}')
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", '{"hello": "50"}')
async_fire_mqtt_message(
hass, "test_light_rgb/color_temp/status", '{"hello": "300"}'
)
async_fire_mqtt_message(
hass, "test_light_rgb/effect/status", '{"hello": "rainbow"}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 50
assert state.attributes.get("rgb_color") == (84, 169, 255)
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") == "rainbow"
assert state.attributes.get("white_value") is None
async_fire_mqtt_message(
hass, "test_light_rgb/white_value/status", '{"hello": "75"}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 50
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("color_temp") == 300
assert state.attributes.get("effect") == "rainbow"
assert state.attributes.get("white_value") == 75
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", '{"hello": [100,50]}')
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", '{"hello": "0"}')
state = hass.states.get("light.test")
assert state.attributes.get("hs_color") == (100, 50)
async_fire_mqtt_message(
hass, "test_light_rgb/xy/status", '{"hello": [0.123,0.123]}'
)
state = hass.states.get("light.test")
assert state.attributes.get("xy_color") == (0.14, 0.131)
async def test_controlling_state_via_topic_with_value_template(hass, mqtt_mock):
"""Test the setting of the state with undocumented value_template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"value_template": "{{ value_json.hello }}",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_light_rgb/status", '{"hello": "ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "test_light_rgb/status", '{"hello": "OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test the sending of command in optimistic mode."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"effect_list": ["colorloop", "random"],
"qos": 2,
"payload_on": "on",
"payload_off": "off",
}
}
fake_state = ha.State(
"light.test",
"on",
{
"brightness": 95,
"hs_color": [100, 100],
"effect": "random",
"color_temp": 100,
# TODO: Test restoring state with white_value
"white_value": 0,
},
)
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=fake_state,
), assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 95
assert state.attributes.get("hs_color") == (100, 100)
assert state.attributes.get("effect") == "random"
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "off", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
mqtt_mock.reset_mock()
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_rgb/set", "on", 2, False),
call("test_light_rgb/rgb/set", "255,128,0", 2, False),
call("test_light_rgb/brightness/set", "50", 2, False),
call("test_light_rgb/hs/set", "359.0,78.0", 2, False),
call("test_light_rgb/xy/set", "0.14,0.131", 2, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["rgb_color"] == (255, 128, 0)
assert state.attributes["brightness"] == 50
assert state.attributes["hs_color"] == (30.118, 100)
assert state.attributes.get("white_value") is None
assert state.attributes["xy_color"] == (0.611, 0.375)
assert state.attributes.get("color_temp") is None
await common.async_turn_on(hass, "light.test", white_value=80, color_temp=125)
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_rgb/white_value/set", "80", 2, False),
call("test_light_rgb/color_temp/set", "125", 2, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes["brightness"] == 50
assert state.attributes.get("hs_color") is None
assert state.attributes["white_value"] == 80
assert state.attributes.get("xy_color") is None
assert state.attributes["color_temp"] == 125
async def test_sending_mqtt_rgb_command_with_template(hass, mqtt_mock):
"""Test the sending of RGB command with template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light_rgb/set",
"rgb_command_topic": "test_light_rgb/rgb/set",
"rgb_command_template": '{{ "#%02x%02x%02x" | '
"format(red, green, blue)}}",
"payload_on": "on",
"payload_off": "off",
"qos": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 64])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_rgb/set", "on", 0, False),
call("test_light_rgb/rgb/set", "#ff803f", 0, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["rgb_color"] == (255, 128, 63)
async def test_sending_mqtt_color_temp_command_with_template(hass, mqtt_mock):
"""Test the sending of Color Temp command with template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light_color_temp/set",
"color_temp_command_topic": "test_light_color_temp/color_temp/set",
"color_temp_command_template": "{{ (1000 / value) | round(0) }}",
"payload_on": "on",
"payload_off": "off",
"qos": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", color_temp=100)
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_color_temp/set", "on", 0, False),
call("test_light_color_temp/color_temp/set", "10", 0, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["color_temp"] == 100
async def test_on_command_first(hass, mqtt_mock):
"""Test on command being sent before brightness."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
"on_command_type": "first",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=50)
# Should get the following MQTT messages.
# test_light/set: 'ON'
# test_light/bright: 50
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/set", "ON", 0, False),
call("test_light/bright", "50", 0, False),
],
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_on_command_last(hass, mqtt_mock):
"""Test on command being sent after brightness."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=50)
# Should get the following MQTT messages.
# test_light/bright: 50
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/bright", "50", 0, False),
call("test_light/set", "ON", 0, False),
],
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_on_command_brightness(hass, mqtt_mock):
"""Test on command being sent as only brightness."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
"rgb_command_topic": "test_light/rgb",
"on_command_type": "brightness",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn on w/ no brightness - should set to max
await common.async_turn_on(hass, "light.test")
# Should get the following MQTT messages.
# test_light/bright: 255
mqtt_mock.async_publish.assert_called_once_with(
"test_light/bright", "255", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ brightness
await common.async_turn_on(hass, "light.test", brightness=50)
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", "50", 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
# Turn on w/ just a color to ensure brightness gets
# added and sent.
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,128,0", 0, False),
call("test_light/bright", "50", 0, False),
],
any_order=True,
)
async def test_on_command_brightness_scaled(hass, mqtt_mock):
"""Test brightness scale."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
"brightness_scale": 100,
"rgb_command_topic": "test_light/rgb",
"on_command_type": "brightness",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn on w/ no brightness - should set to max
await common.async_turn_on(hass, "light.test")
# Should get the following MQTT messages.
# test_light/bright: 100
mqtt_mock.async_publish.assert_called_once_with(
"test_light/bright", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ brightness
await common.async_turn_on(hass, "light.test", brightness=50)
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", "20", 0, False)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ max brightness
await common.async_turn_on(hass, "light.test", brightness=255)
mqtt_mock.async_publish.assert_called_once_with(
"test_light/bright", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ min brightness
await common.async_turn_on(hass, "light.test", brightness=1)
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", "1", 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
# Turn on w/ just a color to ensure brightness gets
# added and sent.
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,128,0", 0, False),
call("test_light/bright", "1", 0, False),
],
any_order=True,
)
async def test_on_command_rgb(hass, mqtt_mock):
"""Test on command in RGB brightness mode."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"rgb_command_topic": "test_light/rgb",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=127)
# Should get the following MQTT messages.
# test_light/rgb: '127,127,127'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "127,127,127", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", brightness=255)
# Should get the following MQTT messages.
# test_light/rgb: '255,255,255'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,255,255", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", brightness=1)
# Should get the following MQTT messages.
# test_light/rgb: '1,1,1'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "1,1,1", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
# Ensure color gets scaled with brightness.
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "1,0,0", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", brightness=255)
# Should get the following MQTT messages.
# test_light/rgb: '255,128,0'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,128,0", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
async def test_on_command_rgb_template(hass, mqtt_mock):
"""Test on command in RGB brightness mode with RGB template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"rgb_command_topic": "test_light/rgb",
"rgb_command_template": "{{ red }}/{{ green }}/{{ blue }}",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=127)
# Should get the following MQTT messages.
# test_light/rgb: '127,127,127'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "127/127/127", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_effect(hass, mqtt_mock):
"""Test effect."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"effect_command_topic": "test_light/effect/set",
"effect_list": ["rainbow", "colorloop"],
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", effect="rainbow")
# Should get the following MQTT messages.
# test_light/effect/set: 'rainbow'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/effect/set", "rainbow", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one light per unique_id."""
config = {
light.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, light.DOMAIN, config)
async def test_discovery_removal_light(hass, mqtt_mock, caplog):
"""Test removal of discovered light."""
data = (
'{ "name": "test",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_removal(hass, mqtt_mock, caplog, light.DOMAIN, data)
async def test_discovery_deprecated(hass, mqtt_mock, caplog):
"""Test discovery of mqtt light with deprecated platform option."""
data = (
'{ "name": "Beer",' ' "platform": "mqtt",' ' "command_topic": "test_topic"}'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Beer"
async def test_discovery_update_light_topic_and_template(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = json.dumps(
{
"name": "Beer",
"state_topic": "test_light_rgb/state1",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/state1",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/state1",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/state1",
"color_temp_state_topic": "test_light_rgb/state1",
"effect_state_topic": "test_light_rgb/state1",
"hs_state_topic": "test_light_rgb/state1",
"rgb_state_topic": "test_light_rgb/state1",
"white_value_state_topic": "test_light_rgb/state1",
"xy_state_topic": "test_light_rgb/state1",
"state_value_template": "{{ value_json.state1.state }}",
"brightness_value_template": "{{ value_json.state1.brightness }}",
"color_temp_value_template": "{{ value_json.state1.ct }}",
"effect_value_template": "{{ value_json.state1.fx }}",
"hs_value_template": "{{ value_json.state1.hs }}",
"rgb_value_template": "{{ value_json.state1.rgb }}",
"white_value_template": "{{ value_json.state1.white }}",
"xy_value_template": "{{ value_json.state1.xy }}",
}
)
data2 = json.dumps(
{
"name": "Milk",
"state_topic": "test_light_rgb/state2",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/state2",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/state2",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/state2",
"color_temp_state_topic": "test_light_rgb/state2",
"effect_state_topic": "test_light_rgb/state2",
"hs_state_topic": "test_light_rgb/state2",
"rgb_state_topic": "test_light_rgb/state2",
"white_value_state_topic": "test_light_rgb/state2",
"xy_state_topic": "test_light_rgb/state2",
"state_value_template": "{{ value_json.state2.state }}",
"brightness_value_template": "{{ value_json.state2.brightness }}",
"color_temp_value_template": "{{ value_json.state2.ct }}",
"effect_value_template": "{{ value_json.state2.fx }}",
"hs_value_template": "{{ value_json.state2.hs }}",
"rgb_value_template": "{{ value_json.state2.rgb }}",
"white_value_template": "{{ value_json.state2.white }}",
"xy_value_template": "{{ value_json.state2.xy }}",
}
)
state_data1 = [
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "brightness":100, "ct":123, "white":100, "fx":"cycle"}}',
)
],
"on",
[
("brightness", 100),
("color_temp", 123),
("white_value", 100),
("effect", "cycle"),
],
),
(
[("test_light_rgb/state1", '{"state1":{"state":"OFF"}}')],
"off",
None,
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "hs":"1,2", "white":0}}',
)
],
"on",
[("hs_color", (1, 2)), ("white_value", None)],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"rgb":"255,127,63"}}',
)
],
"on",
[("rgb_color", (255, 127, 63))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"xy":"0.3, 0.4"}}',
)
],
"on",
[("xy_color", (0.3, 0.401))],
),
]
state_data2 = [
(
[
(
"test_light_rgb/state2",
'{"state2":{"state":"ON", "brightness":50, "ct":200, "white":50, "fx":"loop"}}',
)
],
"on",
[
("brightness", 50),
("color_temp", 200),
("white_value", 50),
("effect", "loop"),
],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "brightness":100, "ct":123, "fx":"cycle"}}',
),
(
"test_light_rgb/state1",
'{"state2":{"state":"ON", "brightness":100, "ct":123, "fx":"cycle"}}',
),
(
"test_light_rgb/state2",
'{"state1":{"state":"ON", "brightness":100, "ct":123, "fx":"cycle"}}',
),
],
"on",
[("brightness", 50), ("color_temp", 200), ("effect", "loop")],
),
(
[("test_light_rgb/state1", '{"state1":{"state":"OFF"}}')],
"on",
None,
),
(
[("test_light_rgb/state1", '{"state2":{"state":"OFF"}}')],
"on",
None,
),
(
[("test_light_rgb/state2", '{"state1":{"state":"OFF"}}')],
"on",
None,
),
(
[("test_light_rgb/state2", '{"state2":{"state":"OFF"}}')],
"off",
None,
),
(
[
(
"test_light_rgb/state2",
'{"state2":{"state":"ON", "hs":"1.2,2.2", "white":0}}',
)
],
"on",
[("hs_color", (1.2, 2.2)), ("white_value", None)],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "hs":"1,2"}}',
),
(
"test_light_rgb/state1",
'{"state2":{"state":"ON", "hs":"1,2"}}',
),
(
"test_light_rgb/state2",
'{"state1":{"state":"ON", "hs":"1,2"}}',
),
],
"on",
[("hs_color", (1.2, 2.2))],
),
(
[
(
"test_light_rgb/state2",
'{"state2":{"rgb":"63,127,255"}}',
)
],
"on",
[("rgb_color", (63, 127, 255))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"rgb":"255,127,63"}}',
),
(
"test_light_rgb/state1",
'{"state2":{"rgb":"255,127,63"}}',
),
(
"test_light_rgb/state2",
'{"state1":{"rgb":"255,127,63"}}',
),
],
"on",
[("rgb_color", (63, 127, 255))],
),
(
[
(
"test_light_rgb/state2",
'{"state2":{"xy":"0.4, 0.3"}}',
)
],
"on",
[("xy_color", (0.4, 0.3))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"white":50, "xy":"0.3, 0.4"}}',
),
(
"test_light_rgb/state1",
'{"state2":{"white":50, "xy":"0.3, 0.4"}}',
),
(
"test_light_rgb/state2",
'{"state1":{"white":50, "xy":"0.3, 0.4"}}',
),
],
"on",
[("xy_color", (0.4, 0.3))],
),
]
await help_test_discovery_update(
hass,
mqtt_mock,
caplog,
light.DOMAIN,
data1,
data2,
state_data1=state_data1,
state_data2=state_data2,
)
async def test_discovery_update_light_template(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = json.dumps(
{
"name": "Beer",
"state_topic": "test_light_rgb/state1",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/state1",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/state1",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/state1",
"color_temp_state_topic": "test_light_rgb/state1",
"effect_state_topic": "test_light_rgb/state1",
"hs_state_topic": "test_light_rgb/state1",
"rgb_state_topic": "test_light_rgb/state1",
"white_value_state_topic": "test_light_rgb/state1",
"xy_state_topic": "test_light_rgb/state1",
"state_value_template": "{{ value_json.state1.state }}",
"brightness_value_template": "{{ value_json.state1.brightness }}",
"color_temp_value_template": "{{ value_json.state1.ct }}",
"effect_value_template": "{{ value_json.state1.fx }}",
"hs_value_template": "{{ value_json.state1.hs }}",
"rgb_value_template": "{{ value_json.state1.rgb }}",
"white_value_template": "{{ value_json.state1.white }}",
"xy_value_template": "{{ value_json.state1.xy }}",
}
)
data2 = json.dumps(
{
"name": "Milk",
"state_topic": "test_light_rgb/state1",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/state1",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/state1",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/state1",
"color_temp_state_topic": "test_light_rgb/state1",
"effect_state_topic": "test_light_rgb/state1",
"hs_state_topic": "test_light_rgb/state1",
"rgb_state_topic": "test_light_rgb/state1",
"white_value_state_topic": "test_light_rgb/state1",
"xy_state_topic": "test_light_rgb/state1",
"state_value_template": "{{ value_json.state2.state }}",
"brightness_value_template": "{{ value_json.state2.brightness }}",
"color_temp_value_template": "{{ value_json.state2.ct }}",
"effect_value_template": "{{ value_json.state2.fx }}",
"hs_value_template": "{{ value_json.state2.hs }}",
"rgb_value_template": "{{ value_json.state2.rgb }}",
"white_value_template": "{{ value_json.state2.white }}",
"xy_value_template": "{{ value_json.state2.xy }}",
}
)
state_data1 = [
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "brightness":100, "ct":123, "white":100, "fx":"cycle"}}',
)
],
"on",
[
("brightness", 100),
("color_temp", 123),
("white_value", 100),
("effect", "cycle"),
],
),
(
[("test_light_rgb/state1", '{"state1":{"state":"OFF"}}')],
"off",
None,
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "hs":"1,2", "white":0}}',
)
],
"on",
[("hs_color", (1, 2))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"rgb":"255,127,63"}}',
)
],
"on",
[("rgb_color", (255, 127, 63))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"white":0, "xy":"0.3, 0.4"}}',
)
],
"on",
[("white_value", None), ("xy_color", (0.3, 0.401))],
),
]
state_data2 = [
(
[
(
"test_light_rgb/state1",
'{"state2":{"state":"ON", "brightness":50, "ct":200, "white":50, "fx":"loop"}}',
)
],
"on",
[
("brightness", 50),
("color_temp", 200),
("white_value", 50),
("effect", "loop"),
],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "brightness":100, "ct":123, "fx":"cycle"}}',
),
],
"on",
[("brightness", 50), ("color_temp", 200), ("effect", "loop")],
),
(
[("test_light_rgb/state1", '{"state1":{"state":"OFF"}}')],
"on",
None,
),
(
[("test_light_rgb/state1", '{"state2":{"state":"OFF"}}')],
"off",
None,
),
(
[
(
"test_light_rgb/state1",
'{"state2":{"state":"ON", "hs":"1.2,2.2", "white":0}}',
)
],
"on",
[("hs_color", (1.2, 2.2))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "hs":"1,2"}}',
)
],
"on",
[("hs_color", (1.2, 2.2))],
),
(
[
(
"test_light_rgb/state1",
'{"state2":{"rgb":"63,127,255"}}',
)
],
"on",
[("rgb_color", (63, 127, 255))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"rgb":"255,127,63"}}',
)
],
"on",
[("rgb_color", (63, 127, 255))],
),
(
[
(
"test_light_rgb/state1",
'{"state2":{"xy":"0.4, 0.3"}}',
)
],
"on",
[("white_value", None), ("xy_color", (0.4, 0.3))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"white":50, "xy":"0.3, 0.4"}}',
)
],
"on",
[("white_value", None), ("xy_color", (0.4, 0.3))],
),
]
await help_test_discovery_update(
hass,
mqtt_mock,
caplog,
light.DOMAIN,
data1,
data2,
state_data1=state_data1,
state_data2=state_data2,
)
async def test_discovery_update_unchanged_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
with patch(
"homeassistant.components.mqtt.light.schema_basic.MqttLight.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, light.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_broken(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_max_mireds(hass, mqtt_mock):
"""Test setting min_mireds and max_mireds."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_max_mireds/set",
"color_temp_command_topic": "test_max_mireds/color_temp/set",
"max_mireds": 370,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.attributes.get("min_mireds") == 153
assert state.attributes.get("max_mireds") == 370
async def test_reloadable(hass, mqtt_mock):
"""Test reloading an mqtt light."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test/set",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
assert hass.states.get("light.test")
assert len(hass.states.async_all()) == 1
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"mqtt/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
"mqtt",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("light.test") is None
assert hass.states.get("light.reload")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
adrienbrault/home-assistant
|
tests/components/mqtt/test_light.py
|
Python
|
apache-2.0
| 68,757
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiment_utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from language.common.utils import experiment_utils
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
FLAGS.set_default("num_train_steps", 2)
FLAGS.set_default("num_eval_steps", 2)
class ExperimentUtilsTest(tf.test.TestCase):
def _simple_model_fn(self, features, labels, mode, params):
logits = tf.squeeze(tf.layers.dense(features, 1))
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.to_float(labels), logits=logits))
optimizer = tf.train.GradientDescentOptimizer(0.1)
if params["use_tpu"]:
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
train_op = optimizer.minimize(
loss, global_step=tf.train.get_or_create_global_step())
if params["use_tpu"]:
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
def _simple_input_function(self, params):
features = [[1.0, 0.0, -1.0, 2.5],
[0.5, 1.1, -0.8, 1.5]]
labels = [0, 1]
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.repeat()
dataset = dataset.batch(params["batch_size"], drop_remainder=True)
return dataset
def test_run_experiment(self):
experiment_utils.run_experiment(
model_fn=self._simple_model_fn,
train_input_fn=self._simple_input_function,
eval_input_fn=self._simple_input_function)
def test_run_experiment_tpu(self):
params = dict(use_tpu=True)
experiment_utils.run_experiment(
model_fn=self._simple_model_fn,
train_input_fn=self._simple_input_function,
eval_input_fn=self._simple_input_function,
params=params)
if __name__ == "__main__":
tf.test.main()
|
google-research/language
|
language/common/utils/experiment_utils_test.py
|
Python
|
apache-2.0
| 2,572
|
import hashlib
import logging
from lxml import etree
from mock import Mock, patch
import os
import requests
import unittest
from six.moves.urllib.request import url2pathname
from eulfedora.models import DigitalObject
from eulfedora.server import Repository
from eulfedora.syncutil import ArchiveExport, endswith_partial, \
binarycontent_sections
from eulfedora.util import md5sum
from test.test_fedora.base import FIXTURE_ROOT
logger = logging.getLogger(__name__)
FIXTURES = {
'sync1_export': os.path.join(FIXTURE_ROOT, 'synctest1-export.xml'),
'sync2_export': os.path.join(FIXTURE_ROOT, 'synctest2-export.xml')
}
class ArchiveExportTest(unittest.TestCase):
def setUp(self):
# todo: use mocks?
self.repo = Mock(spec=Repository)
self.obj = Mock() #spec=DigitalObject)
self.obj.pid = 'synctest:1'
self.archex = ArchiveExport(self.obj, self.repo)
# set up a request session that can load file uris, so
# fixtures can be used as export data
self.session = requests.session()
self.session.mount('file://', LocalFileAdapter())
def test_get_datastream_info(self):
dsinfo = self.archex.get_datastream_info('''<foxml:datastreamVersion ID="DC.2" LABEL="Dublin Core" CREATED="2012-10-11T14:13:03.658Z" MIMETYPE="text/xml" FORMAT_URI="http://www.openarchives.org/OAI/2.0/oai_dc/" SIZE="771">
<foxml:contentDigest TYPE="MD5" DIGEST="f53aec07f2607f536bac7ee03dbbfe7c"/>''')
self.assertEqual('DC.2', dsinfo['id'])
self.assertEqual('text/xml', dsinfo['mimetype'])
self.assertEqual('771', dsinfo['size'])
self.assertEqual('MD5', dsinfo['type'])
self.assertEqual('f53aec07f2607f536bac7ee03dbbfe7c', dsinfo['digest'])
self.assertEqual('2012-10-11T14:13:03.658Z', dsinfo['created'])
# datastream info split across chunks
self.archex.end_of_last_chunk = '''<foxml:datastreamVersion ID="DC.2" LABEL="Dublin Core" CREATED="2012-10-11T14:13:03.658Z" MIMETYPE="te'''
dsinfo = self.archex.get_datastream_info('''xt/xml" FORMAT_URI="http://www.openarchives.org/OAI/2.0/oai_dc/" SIZE="771">
<foxml:contentDigest TYPE="MD5" DIGEST="f53aec07f2607f536bac7ee03dbbfe7c"/>''')
self.assertEqual('DC.2', dsinfo['id'])
self.assertEqual('text/xml', dsinfo['mimetype'])
self.assertEqual('f53aec07f2607f536bac7ee03dbbfe7c', dsinfo['digest'])
def test_object_data(self):
# mock api to read export data from a local fixture filie
response = self.session.get('file://%s' % FIXTURES['sync1_export'])
mockapi = Mock()
def mock_upload(data, *args, **kwargs):
list(data) # consume the generator so datastream processing happens
return 'uploaded://1'
mockapi.upload = mock_upload
mockapi.export.return_value = response
mockapi.base_url = 'http://fedora.example.co/fedora'
self.obj.api = self.repo.api = mockapi
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
self.assert_(b'<foxml:contentLocation REF="uploaded://1" TYPE="URL"/>' in foxml,
'object data for ingest should include upload id as content location')
# other tests?
# set read block size artificially low to test chunked handling
self.archex = ArchiveExport(self.obj, self.repo)
self.archex.read_block_size = 1024
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
self.assert_(b'<foxml:contentLocation REF="uploaded://1" TYPE="URL"/>' in foxml,
'object data for ingest should include upload id as content location')
# test with second fixture - multiple small encoded datastreams
self.archex = ArchiveExport(self.obj, self.repo)
self.archex.read_block_size = 1024
response = self.session.get('file://%s' % FIXTURES['sync2_export'])
mockapi.export.return_value = response
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
self.assert_(b'<foxml:contentLocation REF="uploaded://1" TYPE="URL"/>' in foxml,
'object data for ingest should include upload id as content location')
def test_object_data_split_bincontent(self):
# explictly test handling of binary content tag split over
# chunk boundaries
response = self.session.get('file://%s' % FIXTURES['sync1_export'])
mockapi = Mock()
def mock_upload(data, *args, **kwargs):
list(data) # consume the generator so datastream processing happens
return 'uploaded://1'
mockapi.upload = mock_upload
mockapi.export.return_value = response
self.obj.api = self.repo.api = mockapi
# test binary content tag split across chunks
self.archex = ArchiveExport(self.obj, self.repo)
# use a block size that will split the fixture in the middle of
# the first binary content tag
self.archex.read_block_size = 2688
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
self.archex = ArchiveExport(self.obj, self.repo)
# this blocksize ends with just the < in foxml:binaryContent
self.archex.read_block_size = 2680
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
self.archex = ArchiveExport(self.obj, self.repo)
# this blocksize ends with an unrelated close tag </
self.archex.read_block_size = 1526
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
def test_encoded_datastream(self):
# data content within a single chunk of data
mockapi = Mock()
mockapi.export.return_value = self.session.get('file://%s' % FIXTURES['sync1_export'])
mockapi.upload.return_value = 'uploaded://1'
self.obj.api = self.repo.api = mockapi
section = self.archex.get_next_section()
# get binary datastream info from first section
dsinfo = self.archex.get_datastream_info(section)
# fixture only has one binary content block
# get binarycontent tag out of the way
self.archex.get_next_section()
# next section will be file contents
self.archex.within_file = True
dscontent = b''.join(self.archex.encoded_datastream())
# check decoded size and MD5 match data from fixture
self.assertEqual(int(dsinfo['size']), len(dscontent))
self.assertEqual(dsinfo['digest'], md5sum(dscontent))
# data content across multiple chunks
mockapi.export.return_value = self.session.get('file://%s' % FIXTURES['sync1_export'])
self.obj.api = self.repo.api = mockapi
# set read block size artificially low to ensure
# datastream content is spread across multiple chunks
self.archex.read_block_size = 1024
finished = False
# iterate through the data, similar to object_data method,
# but only handle binary content
while not finished:
try:
section = self.archex.get_next_section()
except StopIteration:
finished = True
# find the section with starting binary content
if section == '<foxml:binaryContent>':
# then decode the subsequent content
self.archex.within_file = True
dscontent = ''.join(self.archex.encoded_datastream())
self.assertEqual(int(dsinfo['size']), len(dscontent))
self.assertEqual(dsinfo['digest'], md5sum(dscontent))
# stop processing
finished = True
class UtilsTest(unittest.TestCase):
def test_endswith_partial(self):
test_string = '<foxml:binaryContent>'
test_len = 19
txt = 'some content %s' % test_string[:test_len]
len_overlap = endswith_partial(txt, test_string)
self.assertEqual(test_len, len_overlap)
test_len = 5
txt = 'some content %s' % test_string[:test_len]
len_overlap = endswith_partial(txt, test_string)
self.assertEqual(test_len, len_overlap)
test_len = 1
txt = 'some content %s' % test_string[:test_len]
len_overlap = endswith_partial(txt, test_string)
self.assertEqual(test_len, len_overlap)
# no overlap
self.assertFalse(endswith_partial('some content', test_string))
def test_binarycontent_sections(self):
with open(FIXTURES['sync1_export'], 'rb') as sync1data:
sections = list(binarycontent_sections(sync1data.read()))
self.assertEqual(5, len(sections))
self.assertEqual(b'<foxml:binaryContent>', sections[1])
self.assertEqual(b'</foxml:binaryContent>', sections[3])
with open(FIXTURES['sync2_export'], 'rb') as sync1data:
sections = list(binarycontent_sections(sync1data.read()))
# second fixture should break into 17 sections
self.assertEqual(17, len(sections))
self.assertEqual(b'<foxml:binaryContent>', sections[1])
self.assertEqual(b'</foxml:binaryContent>', sections[3])
self.assertEqual(b'<foxml:binaryContent>', sections[5])
self.assertEqual(b'</foxml:binaryContent>', sections[7])
self.assertEqual(b'<foxml:binaryContent>', sections[9])
self.assertEqual(b'</foxml:binaryContent>', sections[11])
self.assertEqual(b'<foxml:binaryContent>', sections[13])
self.assertEqual(b'</foxml:binaryContent>', sections[15])
# requests file uri adapter, thanks to
# http://stackoverflow.com/questions/10123929/python-requests-fetch-a-file-from-a-local-url
class LocalFileAdapter(requests.adapters.BaseAdapter):
"""Protocol Adapter to allow Requests to GET file:// URLs
@todo: Properly handle non-empty hostname portions.
"""
@staticmethod
def _chkpath(method, path):
"""Return an HTTP status for the given filesystem path."""
if method.lower() in ('put', 'delete'):
return 501, "Not Implemented" # TODO
elif method.lower() not in ('get', 'head'):
return 405, "Method Not Allowed"
elif os.path.isdir(path):
return 400, "Path Not A File"
elif not os.path.isfile(path):
return 404, "File Not Found"
elif not os.access(path, os.R_OK):
return 403, "Access Denied"
else:
return 200, "OK"
def send(self, req, **kwargs): # pylint: disable=unused-argument
"""Return the file specified by the given request
@type req: C{PreparedRequest}
@todo: Should I bother filling `response.headers` and processing
If-Modified-Since and friends using `os.stat`?
"""
path = os.path.normcase(os.path.normpath(url2pathname(req.path_url)))
response = requests.Response()
response.status_code, response.reason = self._chkpath(req.method, path)
if response.status_code == 200 and req.method.lower() != 'head':
try:
response.raw = open(path, 'rb')
except (OSError, IOError) as err:
response.status_code = 500
response.reason = str(err)
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
response.request = req
response.connection = self
return response
def close(self):
pass
|
WSULib/eulfedora
|
test/test_fedora/test_syncutil.py
|
Python
|
apache-2.0
| 12,920
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import csv
import logging
import pickle
import numpy as np
from __future__ import division
# <codecell>
# Logging definition
logger = logging.getLogger('analyzing')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('log/process_ngrams.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
# <codecell>
def normalize(value):
"""
Normalize % value to 0.0<=value<=1.0
"""
value = float(value)
if value > 1.0:
return value / 100
else:
return value
# Load Sentiwordnet
sentiwordnet = {}
with open('data/sentiwordnet/sentiwordnet.tsv', 'rb') as ifile:
reader = csv.reader(ifile , delimiter='\t')
headers = reader.next()
for row in reader:
# Upload only adjectives and with a specific objectivity threshold
cond1 = True
if cond1:
sentiwordnet["%s" % (row[5])] = {"pos": normalize(row[6]), "neg": normalize(row[7]), "obj": 1.0}
logger.info(' %s sentiwords loaded' % (len(sentiwordnet)))
# <codecell>
# Process each review
total_bands = {"1.0": 0, "2.0": 0, "3.0": 0, "4.0": 0, "5.0": 0}
with open('data/output/words_labeled.p', 'rb') as ifile:
words = pickle.loads(ifile.read())
logger.info(' %s labeled words loaded' % (len(words)))
with open('data/output/results.tsv', 'wb') as ofile:
writer = csv.writer(ofile, delimiter='\t')
writer.writerow(["word", "pos", "neg", "1.0", "2.0", "3.0", "4.0", "5.0"])
for word in words:
for band in words[word]:
total_bands[band] += words[word][band]
n = sum(words[word][i] for i in words[word])
# Random experiment
random_experiment = False
if not random_experiment:
sw_pos = sentiwordnet[word]["pos"]
sw_neg = sentiwordnet[word]["neg"]
else:
if np.random.uniform(0,1) > 0.5:
sw_pos = 0.0
sw_neg = np.random.uniform(0,1)
else:
sw_neg = 0.0
sw_pos = np.random.uniform(0,1)
# Write row to file
writer.writerow([word, sw_pos, sw_neg,
words[word]["1.0"]/n, words[word]["2.0"]/n, words[word]["3.0"]/n,
words[word]["4.0"]/n, words[word]["5.0"]/n])
logger.info(' Bands distribution %s' % (total_bands))
# <headingcell level=3>
# Bands probability Analysis
# <codecell>
def acum_probability(data, bands):
"""
Return accum probability
"""
n = sum(data[band] for band in data)
if n == 0:
return 0.0
else:
return sum([data[band]/n for band in bands])
with open('data/output/words_labeled.p', 'rb') as ifile:
words = pickle.load(ifile)
for word in words:
value = acum_probability(words[word], ["3.0", "4.0", "5.0"])
if value >= 0.9:
pass
print(word," ",words[word])
# <codecell>
total_bands = {"1.0": 0, "2.0": 0, "3.0": 0, "4.0": 0, "5.0": 0}
with open('data/amazon/Cell_Phones_&_Accessories.txt', 'rb') as ifile:
for line in ifile.readlines():
if "review/score" in line:
score = float(line[len(line)-4:len(line)])
total_bands[str(score)] += 1
print(total_bands)
# <codecell>
|
rmaestre/amazon-sentiwordnet
|
Analysis.py
|
Python
|
apache-2.0
| 3,539
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteVersion
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_Versions_DeleteVersion_sync]
from google.cloud import dialogflow_v2
def sample_delete_version():
# Create a client
client = dialogflow_v2.VersionsClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteVersionRequest(
name="name_value",
)
# Make the request
client.delete_version(request=request)
# [END dialogflow_v2_generated_Versions_DeleteVersion_sync]
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_v2_generated_versions_delete_version_sync.py
|
Python
|
apache-2.0
| 1,386
|
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .api import MatrixHttpApi, MatrixRequestError, MatrixUnexpectedResponse
from threading import Thread
from time import sleep
import logging
import sys
class MatrixClient(object):
"""
The client API for Matrix. For the raw HTTP calls, see MatrixHttpApi.
Usage (new user):
client = MatrixClient("https://matrix.org")
token = client.register_with_password(username="foobar",
password="monkey")
room = client.create_room("myroom")
room.send_image(file_like_object)
Usage (logged in):
client = MatrixClient("https://matrix.org", token="foobar",
user_id="@foobar:matrix.org")
rooms = client.get_rooms() # NB: From initial sync
client.add_listener(func) # NB: event stream callback
rooms[0].add_listener(func) # NB: callbacks just for this room.
room = client.join_room("#matrix:matrix.org")
response = room.send_text("Hello!")
response = room.kick("@bob:matrix.org")
Incoming event callbacks (scopes):
def user_callback(user, incoming_event):
pass
def room_callback(room, incoming_event):
pass
def global_callback(incoming_event):
pass
"""
def __init__(self, base_url, token=None, user_id=None, valid_cert_check=True):
""" Create a new Matrix Client object.
Args:
base_url (str): The url of the HS preceding /_matrix.
e.g. (ex: https://localhost:8008 )
token (Optional[str]): If you have an access token
supply it here.
user_id (Optional[str]): You must supply the user_id
(as obtained when initially logging in to obtain
the token) if supplying a token; otherwise, ignored.
valid_cert_check (bool): Check the homeservers
certificate on connections?
Returns:
MatrixClient
Raises:
MatrixRequestError, ValueError
"""
if token is not None and user_id is None:
raise ValueError("must supply user_id along with token")
self.api = MatrixHttpApi(base_url, token)
self.api.validate_certificate(valid_cert_check)
self.listeners = []
self.sync_token = None
self.sync_filter = None
self.logger = logging.getLogger("matrix_client")
""" Time to wait before attempting a /sync request after failing."""
self.bad_sync_timeout_limit = 60 * 60
self.rooms = {
# room_id: Room
}
if token:
self.user_id = user_id
self._sync()
def get_sync_token(self):
return self.sync_token
def set_sync_token(self, token):
self.sync_token = token
def register_with_password(self, username, password, limit=1):
""" Register for a new account on this HS.
Args:
username (str): Account username
password (str): Account password
limit (int): Deprecated. How many messages to return when syncing.
Returns:
str: Access Token
Raises:
MatrixRequestError
"""
response = self.api.register(
"m.login.password", user=username, password=password
)
self.user_id = response["user_id"]
self.token = response["access_token"]
self.hs = response["home_server"]
self.api.token = self.token
self._sync()
return self.token
def login_with_password(self, username, password, limit=1):
""" Login to the homeserver.
Args:
username (str): Account username
password (str): Account password
limit (int): Deprecated. How many messages to return when syncing.
This will be replaced by a filter API in a later release.
Returns:
str: Access token
Raises:
MatrixRequestError
"""
response = self.api.login(
"m.login.password", user=username, password=password
)
self.user_id = response["user_id"]
self.token = response["access_token"]
self.hs = response["home_server"]
self.api.token = self.token
""" Limit Filter """
self.sync_filter = '{ "room": { "timeline" : { "limit" : %i } } }' % limit
self._sync()
return self.token
def create_room(self, alias=None, is_public=False, invitees=()):
""" Create a new room on the homeserver.
Args:
alias (str): The canonical_alias of the room.
is_public (bool): The public/private visibility of the room.
invitees (str[]): A set of user ids to invite into the room.
Returns:
Room
Raises:
MatrixRequestError
"""
response = self.api.create_room(alias, is_public, invitees)
return self._mkroom(response["room_id"])
def join_room(self, room_id_or_alias):
""" Join a room.
Args:
room_id_or_alias (str): Room ID or an alias.
Returns:
Room
Raises:
MatrixRequestError
"""
response = self.api.join_room(room_id_or_alias)
room_id = (
response["room_id"] if "room_id" in response else room_id_or_alias
)
return self._mkroom(room_id)
def get_rooms(self):
""" Return a list of Room objects that the user has joined.
Returns:
Room[]: Rooms the user has joined.
"""
return self.rooms
def add_listener(self, callback):
""" Add a listener that will send a callback when the client recieves
an event.
Args:
callback (func(roomchunk)): Callback called when an event arrives.
"""
self.listeners.append(callback)
def listen_for_events(self, timeout_ms=30000):
"""Deprecated. sync now pulls events from the request.
This function just calls _sync()
Args:
timeout_ms (int): How long to poll the Home Server for before
retrying.
"""
self._sync(timeout_ms)
def listen_forever(self, timeout_ms=30000):
""" Keep listening for events forever.
Args:
timeout_ms (int): How long to poll the Home Server for before
retrying.
"""
bad_sync_timeout = 5000
while(True):
try:
self._sync(timeout_ms)
bad_sync_timeout = 5
except MatrixRequestError as e:
self.logger.warning("A MatrixRequestError occured during sync.")
if e.code >= 500:
self.logger.warning("Problem occured serverside. Waiting %i seconds",
bad_sync_timeout)
sleep(bad_sync_timeout)
bad_sync_timeout = min(bad_sync_timeout * 2,
self.bad_sync_timeout_limit)
else:
raise e
except Exception as e:
self.logger.error("Exception thrown during sync\n %s", str(e))
def start_listener_thread(self, timeout_ms=30000):
""" Start a listener thread to listen for events in the background.
Args:
timeout (int): How long to poll the Home Server for before
retrying.
"""
try:
thread = Thread(target=self.listen_forever, args=(timeout_ms, ))
thread.daemon = True
thread.start()
except:
e = sys.exc_info()[0]
self.logger.error("Error: unable to start thread. %s", str(e))
def upload(self, content, content_type):
""" Upload content to the home server and recieve a MXC url.
Args:
content (bytes): The data of the content.
content_type (str): The mimetype of the content.
Raises:
MatrixUnexpectedResponse: If the homeserver gave a strange response
MatrixRequestError: If the upload failed for some reason.
"""
try:
response = self.api.media_upload(content, content_type)
if "content_uri" in response:
return response["content_uri"]
else:
raise MatrixUnexpectedResponse(
"The upload was successful, but content_uri wasn't found."
)
except MatrixRequestError as e:
raise MatrixRequestError(
code=e.code,
content="Upload failed: %s" % e
)
def _mkroom(self, room_id):
self.rooms[room_id] = Room(self, room_id)
return self.rooms[room_id]
def _process_state_event(self, state_event, current_room):
if "type" not in state_event:
return # Ignore event
etype = state_event["type"]
if etype == "m.room.name":
current_room.name = state_event["content"].get("name", None)
elif etype == "m.room.topic":
current_room.topic = state_event["content"].get("topic", None)
elif etype == "m.room.aliases":
current_room.aliases = state_event["content"].get("aliases", None)
def _sync(self, timeout_ms=30000):
# TODO: Deal with presence
# TODO: Deal with left rooms
response = self.api.sync(self.sync_token, timeout_ms, filter=self.sync_filter)
self.sync_token = response["next_batch"]
for room_id, sync_room in response['rooms']['join'].items():
if room_id not in self.rooms:
self._mkroom(room_id)
room = self.rooms[room_id]
for event in sync_room["state"]["events"]:
self._process_state_event(event, room)
for event in sync_room["timeline"]["events"]:
room._put_event(event)
def get_user(self, user_id):
""" Return a User by their id.
NOTE: This function only returns a user object, it does not verify
the user with the Home Server.
Args:
user_id (str): The matrix user id of a user.
"""
return User(self.api, user_id)
class Room(object):
""" The Room class can be used to call room specific functions
after joining a room from the Client.
"""
def __init__(self, client, room_id):
""" Create a blank Room object.
NOTE: This should ideally be called from within the Client.
NOTE: This does not verify the room with the Home Server.
"""
if not room_id.startswith("!"):
raise ValueError("RoomIDs start with !")
if ":" not in room_id:
raise ValueError("RoomIDs must have a domain component, seperated by a :")
self.room_id = room_id
self.client = client
self.listeners = []
self.events = []
self.event_history_limit = 20
self.name = None
self.aliases = []
self.topic = None
def send_text(self, text):
""" Send a plain text message to the room.
Args:
text (str): The message to send
"""
return self.client.api.send_message(self.room_id, text)
def send_emote(self, text):
""" Send a emote (/me style) message to the room.
Args:
text (str): The message to send
"""
return self.client.api.send_emote(self.room_id, text)
def send_notice(self, text):
return self.client.api.send_notice(self.room_id, text)
# See http://matrix.org/docs/spec/r0.0.1/client_server.html#m-image for the
# imageinfo args.
def send_image(self, url, name, **imageinfo):
""" Send a pre-uploaded image to the room.
See http://matrix.org/docs/spec/r0.0.1/client_server.html#m-image
for imageinfo
Args:
url (str): The mxc url of the image.
name (str): The filename of the image.
imageinfo (): Extra information aboutt
"""
return self.client.api.send_content(
self.room_id, url, name, "m.image",
extra_information=imageinfo
)
def add_listener(self, callback):
""" Add a callback handler for events going to this room.
Args:
callback (func(roomchunk)): Callback called when an event arrives.
"""
self.listeners.append(callback)
def _put_event(self, event):
self.events.append(event)
if len(self.events) > self.event_history_limit:
self.events.pop(0)
for listener in self.listeners:
listener(event)
def get_events(self):
""" Get the most recent events for this room.
Returns:
events
"""
return self.events
def invite_user(self, user_id):
""" Invite a user to this room
Args:
user_id (str): The matrix user id of a user.
Returns:
boolean: The invitation was sent.
"""
try:
self.client.api.invite_user(self.room_id, user_id)
return True
except MatrixRequestError:
return False
def kick_user(self, user_id, reason=""):
""" Kick a user from this room
Args:
user_id (str): The matrix user id of a user.
Returns:
boolean: The user was kicked.
"""
try:
self.client.api.kick_user(self.room_id, user_id)
return True
except MatrixRequestError:
return False
def ban_user(self, user_id, reason):
""" Ban a user from this room
Args:
user_id (str): The matrix user id of a user.
reason (str): A reason for banning the user.
Returns:
boolean: The user was banned.
"""
try:
self.client.api.ban_user(self.room_id, user_id, reason)
return True
except MatrixRequestError:
return False
def leave(self):
""" Leave the room.
Returns:
boolean: Leaving the room was successful.
"""
try:
self.client.api.leave_room(self.room_id)
self.client.rooms.remove(self.room_id)
return True
except MatrixRequestError:
return False
def update_room_name(self):
""" Get room name
Returns:
boolean: True if the room name changed, False if not
"""
try:
response = self.client.api.get_room_name(self.room_id)
if "name" in response and response["name"] != self.name:
self.name = response["name"]
return True
else:
return False
except MatrixRequestError:
return False
def update_room_topic(self):
""" Get room topic
Returns:
boolean: True if the topic changed, False if not
"""
try:
response = self.client.api.get_room_topic(self.room_id)
if "topic" in response and response["topic"] != self.topic:
self.topic = response["topic"]
return True
else:
return False
except MatrixRequestError:
return False
def update_aliases(self):
""" Get aliases information from room state
Returns:
boolean: True if the aliases changed, False if not
"""
try:
response = self.client.api.get_room_state(self.room_id)
for chunk in response:
if "content" in chunk and "aliases" in chunk["content"]:
if chunk["content"]["aliases"] != self.aliases:
self.aliases = chunk["content"]["aliases"]
return True
else:
return False
except MatrixRequestError:
return False
class User(object):
""" The User class can be used to call user specific functions.
"""
def __init__(self, api, user_id):
if not user_id.startswith("@"):
raise ValueError("UserIDs start with @")
if ":" not in user_id:
raise ValueError("UserIDs must have a domain component, seperated by a :")
self.user_id = user_id
self.api = api
def get_display_name(self):
""" Get this users display name.
See also get_friendly_name()
Returns:
str: Display Name
"""
return self.api.get_display_name(self.user_id)
def get_friendly_name(self):
display_name = self.api.get_display_name(self.user_id)
return display_name if display_name is not None else self.user_id
def set_display_name(self, display_name):
""" Set this users display name.
Args:
display_name (str): Display Name
"""
return self.api.set_display_name(self.user_id, display_name)
def get_avatar_url(self):
mxcurl = self.api.get_avatar_url(self.user_id)
url = self.api.get_download_url(mxcurl)
return url
def set_avatar_url(self, avatar_url):
""" Set this users avatar.
Args:
avatar_url (str): mxc url from previously uploaded
"""
return self.api.set_avatar_url(self.user_id, avatar_url)
|
dovf/matrix-python-sdk
|
matrix_client/client.py
|
Python
|
apache-2.0
| 18,097
|
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
APIRequest class
"""
import datetime
# TODO(termie): replace minidom with etree
from xml.dom import minidom
from lxml import etree
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
import six
from ec2api.api import cloud
from ec2api.api import ec2utils
from ec2api import exception
from ec2api.i18n import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _underscore_to_camelcase(st):
return ''.join([x[:1].upper() + x[1:] for x in st.split('_')])
def _underscore_to_xmlcase(st):
res = _underscore_to_camelcase(st)
return res[:1].lower() + res[1:]
def _database_to_isoformat(datetimeobj):
"""Return a xs:dateTime parsable string from datatime."""
return datetimeobj.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z'
class APIRequest(object):
def __init__(self, action, version, args):
self.action = action
self.version = version
self.args = args
if CONF.full_vpc_support:
self.controller = cloud.VpcCloudController()
else:
self.controller = cloud.CloudController()
def invoke(self, context):
try:
method = getattr(self.controller,
ec2utils.camelcase_to_underscore(self.action))
except AttributeError:
LOG.exception(_('Unsupported API request: action = %(action)s'),
{'action': self.action})
raise exception.InvalidRequest()
args = ec2utils.dict_from_dotted_str(self.args.items())
def convert_dicts_to_lists(args):
if not isinstance(args, dict):
return args
for key in args.keys():
# NOTE(vish): Turn numeric dict keys into lists
# NOTE(Alex): Turn "value"-only dict keys into values
if isinstance(args[key], dict):
if args[key] == {}:
continue
if args[key].keys()[0].isdigit():
s = args[key].items()
s.sort()
args[key] = [convert_dicts_to_lists(v) for k, v in s]
elif (args[key].keys()[0] == 'value' and
len(args[key]) == 1):
args[key] = args[key]['value']
return args
args = convert_dicts_to_lists(args)
result = method(context, **args)
return self._render_response(result, context.request_id)
def _render_response(self, response_data, request_id):
xml = minidom.Document()
response_el = xml.createElement(self.action + 'Response')
response_el.setAttribute('xmlns',
'http://vpc.ind-west-1.jiocloudservices.com/doc/%s/'
% self.version)
request_id_el = xml.createElement('requestId')
request_id_el.appendChild(xml.createTextNode(request_id))
response_el.appendChild(request_id_el)
if response_data is True:
self._render_dict(xml, response_el, {'return': 'true'})
else:
self._render_dict(xml, response_el, response_data)
xml.appendChild(response_el)
response = xml.toxml()
root = etree.fromstring(response)
response = etree.tostring(root, pretty_print=True)
xml.unlink()
# Don't write private key to log
if self.action != "CreateKeyPair":
LOG.debug(response)
else:
LOG.debug("CreateKeyPair: Return Private Key")
return response
def _render_dict(self, xml, el, data):
try:
for key in data.keys():
val = data[key]
el.appendChild(self._render_data(xml, key, val))
except Exception:
LOG.debug(data)
raise
def _render_data(self, xml, el_name, data):
el_name = _underscore_to_xmlcase(el_name)
data_el = xml.createElement(el_name)
if isinstance(data, list):
for item in data:
data_el.appendChild(self._render_data(xml, 'item', item))
elif isinstance(data, dict):
self._render_dict(xml, data_el, data)
elif hasattr(data, '__dict__'):
self._render_dict(xml, data_el, data.__dict__)
elif isinstance(data, bool):
data_el.appendChild(xml.createTextNode(str(data).lower()))
elif isinstance(data, datetime.datetime):
data_el.appendChild(
xml.createTextNode(_database_to_isoformat(data)))
elif data is not None:
data_el.appendChild(xml.createTextNode(
encodeutils.safe_encode(six.text_type(data))))
return data_el
|
MayankGo/ec2-api
|
ec2api/api/apirequest.py
|
Python
|
apache-2.0
| 5,353
|
from nose.tools import eq_
from .. import OnExp, OnState, OnTime
import datetime
from collections import defaultdict
def dt2s(dt):
return dt.strftime('%Y-%m-%d %H:%M:%S')
def test_one_date():
exp = OnExp("0 6 1,31 * tue,wed")
st = exp.state(datetime.datetime(2014,6,13,0,0,0))
#rules = exp.rules[schepy._MONTH_DAY][2014]
eq_(str(st), "2014-07-01 06:00:00")
def test_timez_in_middle():
exp = OnExp("5,7 6 1,31 * tue,wed")
st = exp.state(datetime.datetime(2014,7,1,6,6,0))
#rules = exp.rules[schepy._MONTH_DAY][2014]
eq_(str(st), "2014-07-01 06:07:00")
def test_all_dates():
exp = OnExp("0 6 1,31 * tue,wed")
s = exp.state(datetime.datetime(1000,1,1,0,0,0))
dd = defaultdict(list)
ls = []
loop=True
while loop:
try:
dt = s.toDateTime()
dd[dt.year].append(dt)
ls.append(dt)
s = s.forward()
except ValueError,e:
loop = False
eq_(len(dd),2000)
eq_(len(ls)/len(dd),5)
def test_ontime():
ont = OnTime("0 6 1,31 * tue,wed","Australia/Sydney")
s = ont.state(datetime.datetime(2014,7,4,0,0,0))
eq_("2014-09-30 20:00:00", dt2s(ont.toUtc(s)))
eq_("2014-06-30 20:00:00", dt2s(ont.toUtc(s.back())) )
eq_("2014-12-30 19:00:00", dt2s(ont.toUtc(s.forward())) )
|
walnutgeek/OnTimer
|
ontimer/tests/test_ontimer.py
|
Python
|
apache-2.0
| 1,348
|
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from clicrud.device.generic.ver.base import telnet as baseTelnet
from clicrud.device.generic.ver.base import ssh as baseSSH
class generic(object):
def __init__(self, **kwargs):
METHOD_ATTRS = ['telnet', 'ssh']
_args = kwargs
if _args.get('method') == 'telnet' and \
_args.get('method') in METHOD_ATTRS:
self._transport = baseTelnet(**_args)
if _args.get('method') == 'ssh' and \
_args.get('method') in METHOD_ATTRS:
self._transport = baseSSH(**_args)
@property
def connected(self):
if self._transport.connected:
return True
def read(self, command, **kwargs):
_args = kwargs
return self._transport.read(command, **_args)
def configure(self, commands, **kwargs):
_args = kwargs
return self._transport.configure(commands, **_args)
@property
def protocol(self):
return self._transport.protocol
def close(self):
self._transport.close()
@property
def hostname(self):
return self._transport.hostname
@property
def err(self):
return self._transport._error
|
DavidJohnGee/clicrud
|
clicrud/device/generic/__init__.py
|
Python
|
apache-2.0
| 1,768
|
#$language = "python"
#$interface = "1.0"
# Document_Device.py
#
# Description:
# Sends a series of Cisco Show commands one by one as listed in the
# COMMANDS array. The results of each command are captured into a
# variable, and then written to an individual log file (one log file
# for each command).
#
# Filename format is:
# ~/$savepath/<Host Name>-<Command Name>-<Date Format>.txt
import os
import subprocess
import datetime
import sys
# Adjust these to your environment
savepath = 'Configs/'
mydatestr = '%Y-%m-%d-%H-%M-%S'
COMMANDS = [
"show access-list",
"show call active voice brief",
"show call history voice brief",
"show cdp neighbors detail",
"show cdp neighbors",
"show clock",
"show controllers E1",
"show controllers T1",
"show crypto ipsec sa",
"show crypto isakmp sa",
"show crypto map",
"show debug",
"show dial-peer voice summary",
"show environment power"
"show etherchannel summary",
"show interface counters error",
"show interface description",
"show interface stats",
"show interface status",
"show interface summary",
"show interface transceiver detail",
"show interface transceiver",
"show interfaces",
"show inventory",
"show ip arp",
"show ip eigrp neighbor",
"show ip interface brief",
"show ip ospf neighbor",
"show ip protocols",
"show ip route 0.0.0.0",
"show ip route",
"show ipv6 interface brief",
"show ipv6 protocols",
"show ipv6 protocols",
"show ipv6 route",
"show log",
"show mac address-table dynamic",
"show mac address-table",
"show module",
"show policy-map interface"
"show policy-map",
"show port-channel summary",
"show power",
"show route-map",
"show running",
"show spanning-tree",
"show version",
"show vtp status",
"write"
]
def GetHostname(tab):
'''
This function will capture the prompt of the device. The script will capture the
text that is sent back from the remote device, which includes what we typed being
echoed back to us, so we have to account for that while we parse data.
'''
#Send two line feeds
tab.Send("\n\n")
tab.WaitForString("\n") # Waits for first linefeed to be echoed back to us
prompt = tab.ReadString("\n") #Read the text up to the next linefeed.
prompt = prompt.strip() #Remove any trailing control characters
# Check for non-enable mode (prompt ends with ">" instead of "#")
if prompt[-1] == ">":
return None
# Get out of config mode if that is the active mode when the script was launched
elif "(conf" in prompt:
tab.Send("end\n")
hostname = prompt.split("(")[0]
tab.WaitForString(hostname + "#")
# Return the hostname (everything before the first "(")
return hostname
# Else, Return the hostname (all of the prompt except the last character)
else:
return prompt[:-1]
def CaptureOutput(command, prompt, tab):
'''
This function captures the raw output of the command supplied and returns it.
The prompt variable is used to signal the end of the command output, and
the "tab" variable is object that specifies which tab the commands are
written to.
'''
#Send command
tab.Send(command)
#Ignore the echo of the command we typed
tab.WaitForString(command.strip())
#Capture the output until we get our prompt back and write it to the file
result = tab.ReadString(prompt)
return result
def WriteFile(raw, filename):
'''
This function simply write the contents of the "raw" variable to a
file with the name passed to the function. The file suffix is .txt by
default unless a different suffix is passed in.
'''
newfile = open(filename, 'wb')
newfile.write(raw)
newfile.close()
def main():
#Create a "Tab" object, so that all the output goes into the correct Tab.
objTab = crt.GetScriptTab()
tab = objTab.Screen #Allows us to type "tab.xxx" instead of "objTab.Screen.xxx"
tab.IgnoreEscape = True
tab.Synchronous = True
#Get the prompt of the device
hostname = GetHostname(tab)
if hostname == None:
crt.Dialog.MessageBox("You must be in enable mode to run this script.")
else:
prompt = hostname + "#"
now = datetime.datetime.now()
mydate = now.strftime(mydatestr)
#Send term length command and wait for prompt to return
tab.Send('term length 0\n')
tab.Send('term width 0\n')
tab.WaitForString(prompt)
for (index, SendCmd) in enumerate(COMMANDS):
SendCmd = SendCmd.strip()
# Save command without spaces to use in output filename.
CmdName = SendCmd.replace(" ", "_")
# Add a newline to command before sending it to the remote device.
SendCmd = SendCmd + "\n"
#Create Filename
filebits = [hostname, CmdName, mydate + ".txt"]
filename = '-'.join(filebits)
#Create path to save configuration file and open file
fullFileName = os.path.join(os.path.expanduser('~'), savepath + filename)
CmdResult = CaptureOutput(SendCmd, prompt, tab)
if "% Invalid input" not in CmdResult:
WriteFile(CmdResult, fullFileName)
CmdResult = ''
#Send term length back to default
tab.Send('term length 24\n')
tab.Send('term width 80\n')
tab.WaitForString(prompt)
tab.Synchronous = False
tab.IgnoreEscape = False
crt.Dialog.MessageBox("Device Documentation Script Complete", "Script Complete", 0)
main()
|
DN0000/SecureCRT
|
Document_Device.py
|
Python
|
apache-2.0
| 5,211
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common job operators useful in the framework and 3P-libraries."""
import copy
import itertools
from typing import Any, Callable, List, Sequence, Tuple
import attr
from xmanager.xm import job_blocks
from xmanager.xm import pattern_matching
def shallow_copy_job_type(
job_type: job_blocks.JobTypeVar) -> job_blocks.JobTypeVar:
"""Creates a shallow copy of the job structure."""
def apply_to_job_group(job_group: job_blocks.JobGroup) -> job_blocks.JobGroup:
job_group = copy.copy(job_group)
job_group.jobs = {key: matcher(job) for key, job in job_group.jobs.items()}
return job_group
matcher = pattern_matching.match(
pattern_matching.Case([job_blocks.Job], copy.copy),
apply_to_job_group,
pattern_matching.Case([job_blocks.JobGeneratorType],
lambda generator: generator),
)
return matcher(job_type)
def populate_job_names(job_type: job_blocks.JobTypeVar) -> None:
"""Assigns default names to the given jobs."""
def apply_to_job(prefix: Sequence[str], target: job_blocks.Job) -> None:
if target.name is None:
target.name = '_'.join(prefix) if prefix else target.executable.name
def apply_to_job_group(prefix: Sequence[str],
target: job_blocks.JobGroup) -> None:
for key, job in target.jobs.items():
matcher([*prefix, key], job)
def ignore_unknown(_: Sequence[str], target: Any) -> None:
return target
matcher = pattern_matching.match(
apply_to_job,
apply_to_job_group,
ignore_unknown,
)
return matcher([], job_type)
def collect_jobs_by_filter(
job_group: job_blocks.JobGroup,
predicate: Callable[[job_blocks.Job], bool],
) -> List[job_blocks.Job]:
"""Flattens a given job group and filters the result."""
def match_job(job: job_blocks.Job) -> List[job_blocks.Job]:
return [job] if predicate(job) else []
def match_job_group(job_group: job_blocks.JobGroup) -> List[job_blocks.Job]:
return list(
itertools.chain.from_iterable(
[job_collector(job) for job in job_group.jobs.values()]))
job_collector = pattern_matching.match(match_job_group, match_job)
return job_collector(job_group)
@attr.s(auto_attribs=True)
class ConstraintClique:
"""A constraint with the list of jobs it applies to."""
constraint: job_blocks.Constraint
jobs: List[job_blocks.Job]
def aggregate_constraint_cliques(
job_group: job_blocks.JobGroup) -> List[ConstraintClique]:
"""Forms constraint cliques.
For each constraint met, collects all jobs it applies to.
Args:
job_group: A job group to aggregate on.
Returns:
A set of cliques.
"""
def match_job(
job: job_blocks.Job
) -> Tuple[List[ConstraintClique], List[job_blocks.Job]]:
return [], [job]
def match_job_group(
job_group: job_blocks.JobGroup
) -> Tuple[List[ConstraintClique], List[job_blocks.Job]]:
cliques: List[ConstraintClique] = []
jobs: List[job_blocks.Job] = []
for job in job_group.jobs.values():
subcliques, subjobs = matcher(job)
cliques += subcliques
jobs += subjobs
cliques = [
ConstraintClique(constraint, jobs)
for constraint in job_group.constraints
] + cliques
return cliques, jobs
matcher = pattern_matching.match(match_job, match_job_group)
result, _ = matcher(job_group)
return result
def flatten_jobs(job_group: job_blocks.JobGroup) -> List[job_blocks.Job]:
return collect_jobs_by_filter(job_group, lambda _: True)
|
deepmind/xmanager
|
xmanager/xm/job_operators.py
|
Python
|
apache-2.0
| 4,103
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
import signal
import time
from pathlib import Path
from typing import List, Tuple
import pytest
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink
from pants.testutil.pants_integration_test import run_pants_with_workdir
from pants.util.dirutil import read_file
from pants_test.pantsd.pantsd_integration_test_base import PantsDaemonIntegrationTestBase
pytestmark = pytest.mark.platform_specific_behavior
def lifecycle_stub_cmdline() -> List[str]:
# Load the testprojects pants-plugins to get some testing tasks and subsystems.
testproject_backend_src_dir = os.path.join(
get_buildroot(), "testprojects/pants-plugins/src/python"
)
testproject_backend_pkg_name = "test_pants_plugin"
lifecycle_stub_cmdline = [
"--no-pantsd",
f"--pythonpath=+['{testproject_backend_src_dir}']",
f"--backend-packages=+['{testproject_backend_pkg_name}']",
# This task will always raise an exception.
"lifecycle-stub-goal",
]
return lifecycle_stub_cmdline
def get_log_file_paths(workdir: str, pid: int) -> Tuple[str, str]:
pid_specific_log_file = ExceptionSink.exceptions_log_path(for_pid=pid, in_dir=workdir)
assert os.path.isfile(pid_specific_log_file)
shared_log_file = ExceptionSink.exceptions_log_path(in_dir=workdir)
assert os.path.isfile(shared_log_file)
assert pid_specific_log_file != shared_log_file
return (pid_specific_log_file, shared_log_file)
def assert_unhandled_exception_log_matches(pid: int, file_contents: str) -> None:
regex_str = f"""\
timestamp: ([^\n]+)
process title: ([^\n]+)
sys\\.argv: ([^\n]+)
pid: {pid}
Exception caught: \\([^)]*\\)
(.|\n)*
Exception message:.*
"""
assert re.match(regex_str, file_contents)
def assert_graceful_signal_log_matches(pid: int, signum, signame, contents: str) -> None:
regex_str = """\
timestamp: ([^\n]+)
process title: ([^\n]+)
sys\\.argv: ([^\n]+)
pid: {pid}
Signal {signum} \\({signame}\\) was raised\\. Exiting with failure\\.
""".format(
pid=pid, signum=signum, signame=signame
)
assert re.search(regex_str, contents)
def test_logs_unhandled_exception(tmp_path: Path) -> None:
pants_run = run_pants_with_workdir(
lifecycle_stub_cmdline(),
workdir=tmp_path.as_posix(),
# The backtrace should be omitted when --print-stacktrace=False.
print_stacktrace=False,
extra_env={"_RAISE_EXCEPTION_ON_IMPORT": "True"},
)
pants_run.assert_failure()
regex = "exception during import!"
assert re.search(regex, pants_run.stderr)
pid_specific_log_file, shared_log_file = get_log_file_paths(tmp_path.as_posix(), pants_run.pid)
assert_unhandled_exception_log_matches(pants_run.pid, read_file(pid_specific_log_file))
assert_unhandled_exception_log_matches(pants_run.pid, read_file(shared_log_file))
class ExceptionSinkIntegrationTest(PantsDaemonIntegrationTestBase):
hermetic = False
def test_dumps_logs_on_signal(self):
"""Send signals which are handled, but don't get converted into a KeyboardInterrupt."""
signal_names = {
signal.SIGQUIT: "SIGQUIT",
signal.SIGTERM: "SIGTERM",
}
for (signum, signame) in signal_names.items():
with self.pantsd_successful_run_context() as ctx:
ctx.runner(["help"])
pid = ctx.checker.assert_started()
os.kill(pid, signum)
time.sleep(5)
# Check that the logs show a graceful exit by signal.
pid_specific_log_file, shared_log_file = get_log_file_paths(ctx.workdir, pid)
assert_graceful_signal_log_matches(
pid, signum, signame, read_file(pid_specific_log_file)
)
assert_graceful_signal_log_matches(pid, signum, signame, read_file(shared_log_file))
def test_dumps_traceback_on_sigabrt(self):
# SIGABRT sends a traceback to the log file for the current process thanks to
# faulthandler.enable().
with self.pantsd_successful_run_context() as ctx:
ctx.runner(["help"])
pid = ctx.checker.assert_started()
os.kill(pid, signal.SIGABRT)
time.sleep(5)
# Check that the logs show an abort signal and the beginning of a traceback.
pid_specific_log_file, shared_log_file = get_log_file_paths(ctx.workdir, pid)
regex_str = """\
Fatal Python error: Aborted
Thread [^\n]+ \\(most recent call first\\):
"""
assert re.search(regex_str, read_file(pid_specific_log_file))
# faulthandler.enable() only allows use of a single logging file at once for fatal tracebacks.
assert "" == read_file(shared_log_file)
|
pantsbuild/pants
|
src/python/pants/base/exception_sink_integration_test.py
|
Python
|
apache-2.0
| 4,970
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetBigQueryExport
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-securitycenter
# [START securitycenter_v1_generated_SecurityCenter_GetBigQueryExport_sync]
from google.cloud import securitycenter_v1
def sample_get_big_query_export():
# Create a client
client = securitycenter_v1.SecurityCenterClient()
# Initialize request argument(s)
request = securitycenter_v1.GetBigQueryExportRequest(
name="name_value",
)
# Make the request
response = client.get_big_query_export(request=request)
# Handle the response
print(response)
# [END securitycenter_v1_generated_SecurityCenter_GetBigQueryExport_sync]
|
googleapis/python-securitycenter
|
samples/generated_samples/securitycenter_v1_generated_security_center_get_big_query_export_sync.py
|
Python
|
apache-2.0
| 1,513
|
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
import unittest
import sys
import itertools
import os
import shutil
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology.context import ConfigParams, submit
from streamsx import rest
import test_functions
import test_vers
def s4():
return ['one', 'two', 'three', 'four']
def removeArtifacts(submissionResult):
if 'bundlePath' in submissionResult:
os.remove(submissionResult['bundlePath'])
if 'toolkitRoot' in submissionResult:
shutil.rmtree(submissionResult['toolkitRoot'])
if 'archivePath' in submissionResult:
os.remove(submissionResult['archivePath'])
if 'jobConfigPath' in submissionResult:
os.remove(submissionResult['jobConfigPath'])
def assertBundlePath(test, submissionResult):
test.assertIn('bundlePath', submissionResult)
test.assertTrue(os.path.isfile(submissionResult['bundlePath']))
if ((test.test_ctxtype == 'BUNDLE' or
test.test_ctxtype != 'STANDALONE' and
test.test_config.get('topology.keepArtifacts', False))):
test.assertIn('jobConfigPath', submissionResult)
test.assertTrue(os.path.isfile(submissionResult['jobConfigPath']))
else:
test.assertNotIn('jobConfigPath', test.result)
def assertToolkitRoot(test, submissionResult):
test.assertIn('toolkitRoot', submissionResult)
test.assertTrue(os.path.isdir(submissionResult['toolkitRoot']))
def assertArchivePath(test, submissionResult):
test.assertIn('archivePath', submissionResult)
test.assertTrue(os.path.isfile(submissionResult['archivePath']))
def verifyArtifacts(test):
if test.test_config.get('topology.keepArtifacts', False):
# KeepArtifacts is True
assertToolkitRoot(test, test.result)
if 'TOOLKIT' == test.test_ctxtype:
test.assertNotIn('bundlePath', test.result)
test.assertNotIn('archivePath', test.result)
elif (test.test_config.get('topology.forceRemoteBuild', False) or
'STREAMS_INSTALL' not in os.environ or
'BUILD_ARCHIVE' == test.test_ctxtype):
assertArchivePath(test, test.result)
test.assertNotIn('bundlePath', test.result)
else:
assertBundlePath(test, test.result)
test.assertNotIn('archivePath', test.result)
else:
# KeepArtifacts is False
if 'TOOLKIT' == test.test_ctxtype:
assertToolkitRoot(test, test.result)
else:
test.assertNotIn('toolkitRoot', test.result)
if 'BUNDLE' == test.test_ctxtype:
assertBundlePath(test, test.result)
test.assertNotIn('archivePath', test.result)
elif 'BUILD_ARCHIVE' == test.test_ctxtype:
assertArchivePath(test, test.result)
test.assertNotIn('bundlePath', test.result)
else:
test.assertNotIn('bundlePath', test.result)
test.assertNotIn('archivePath', test.result)
@unittest.skipIf(not test_vers.tester_supported(), "Tester not supported")
class TestToolkitMethodsNew(unittest.TestCase):
def setUp(self):
self.topo = Topology('test_ToolkitSource')
#self.topo.source(['Hello', 'Toolkit'])
self.topo.source('Toolkit')
self.test_ctxtype = 'TOOLKIT'
self.test_config = {}
self.result = {}
def tearDown(self):
removeArtifacts(self.result)
def test_NoKeepArtifacts(self):
self.result = submit(self.test_ctxtype, self.topo, self.test_config)
verifyArtifacts(self)
def test_KeepArtifacts(self):
self.test_config['topology.keepArtifacts'] = True
self.result = submit(self.test_ctxtype, self.topo, self.test_config)
verifyArtifacts(self)
@unittest.skipIf(not test_vers.tester_supported(), "Tester not supported")
class TestBuildArchiveMethodsNew(TestToolkitMethodsNew):
def setUp(self):
self.topo = Topology('test_BuildArchiveSource')
self.topo.source(['Hello', 'BuildArchive'])
self.test_ctxtype = 'BUILD_ARCHIVE'
self.test_config = {}
self.result = {}
@unittest.skipIf(not test_vers.tester_supported(), "Tester not supported")
@unittest.skipUnless('STREAMS_INSTALL' in os.environ, "requires STREAMS_INSTALL")
class TestBundleMethodsNew(TestToolkitMethodsNew):
def setUp(self):
self.topo = Topology('test_BundleSource')
self.topo.source(['Hello', 'Bundle'])
self.test_ctxtype = 'BUNDLE'
self.test_config = {}
self.result = {}
@unittest.skipIf(not test_vers.tester_supported(), "Tester not supported")
@unittest.skipUnless('STREAMS_INSTALL' in os.environ, "requires STREAMS_INSTALL")
class TestDistributedSubmitMethodsNew(unittest.TestCase):
def setUp(self):
self.topo = Topology('test_DistributedSubmit')
self.topo.source(['Hello', 'DistributedSubmit'])
self.test_ctxtype = 'DISTRIBUTED'
self.test_config = {}
def test_DifferentUsername(self):
sc = rest.StreamsConnection('user1', 'pass1')
self.test_config[ConfigParams.STREAMS_CONNECTION] = sc
with self.assertRaises(RuntimeError):
submit(self.test_ctxtype, self.topo, self.test_config, username='user2', password='pass1')
def test_DifferentPassword(self):
sc = rest.StreamsConnection('user1', 'pass1')
self.test_config[ConfigParams.STREAMS_CONNECTION] = sc
with self.assertRaises(RuntimeError):
submit(self.test_ctxtype, self.topo, self.test_config, username='user1', password='pass2')
@unittest.skipIf(not test_vers.tester_supported(), "Tester not supported")
@unittest.skipUnless('VCAP_SERVICES' in os.environ, "requires VCAP_SERVICES")
@unittest.skipUnless('STREAMING_ANALYTICS_SERVICE_NAME' in os.environ, "requires STREAMING_ANALYTICS_SERVICE_NAME")
class TestBluemixSubmitMethodsNew(unittest.TestCase):
def setUp(self):
self.topo = Topology('test_BluemixSubmit')
self.topo.source(['Hello', 'BluemixSubmit'])
self.test_ctxtype = 'STREAMING_ANALYTICS_SERVICE'
self.test_config = {}
def test_StreamsConnection(self):
sc = rest.StreamsConnection('user1', 'pass1')
self.test_config[ConfigParams.STREAMS_CONNECTION] = sc
with self.assertRaises(ValueError):
submit(self.test_ctxtype, self.topo, self.test_config)
def test_StreamingAnalyticsConnection(self):
sc = rest.StreamingAnalyticsConnection()
self.test_config[ConfigParams.STREAMS_CONNECTION] = sc
result = submit(self.test_ctxtype, self.topo, self.test_config)
self.assertEqual(result.return_code, 0)
result.job.cancel()
@unittest.skipIf(not test_vers.tester_supported() , "Tester not supported")
class TestTopologyMethodsNew(unittest.TestCase):
def setUp(self):
Tester.setup_standalone(self)
self.result = {}
def tearDown(self):
removeArtifacts(self.result)
def test_TopologySourceList(self):
topo = Topology('test_TopologySourceList')
hw = topo.source(['Hello', 'Tester'])
tester = Tester(topo)
tester.contents(hw, ['Hello', 'Tester'])
tester.test(self.test_ctxtype, self.test_config)
self.result = tester.result['submission_result']
verifyArtifacts(self)
def test_TopologySourceFn(self):
topo = Topology('test_TopologySourceFn')
hw = topo.source(s4)
tester = Tester(topo)
tester.contents(hw, s4())
tester.tuple_count(hw, len(s4()))
self.test_config['topology.keepArtifacts'] = True
tester.test(self.test_ctxtype, self.test_config)
self.result = tester.result['submission_result']
verifyArtifacts(self)
def test_TopologySourceItertools(self):
topo = Topology('test_TopologySourceItertools')
if sys.version_info.major == 2:
# Iterators not serializable in 2.7
hw = topo.source(lambda : itertools.repeat(9, 3))
else:
hw = topo.source(itertools.repeat(9, 3))
if sys.version_info.major == 2:
# Disabling assertions not supported on Python 2.7
# See splpy_setup.h
pass
else:
hw = hw.filter(test_functions.check_asserts_disabled)
tester = Tester(topo)
tester.contents(hw, [9, 9, 9])
tester.test(self.test_ctxtype, self.test_config)
@unittest.skipIf(not test_vers.tester_supported() , "Tester not supported")
class TestDistributedTopologyMethodsNew(TestTopologyMethodsNew):
def setUp(self):
Tester.setup_distributed(self)
self.result = {}
@unittest.skipIf(not test_vers.tester_supported() , "Tester not supported")
class TestBluemixTopologyMethodsNew(TestTopologyMethodsNew):
def setUp(self):
Tester.setup_streaming_analytics(self, force_remote_build=True)
self.result = {}
|
wmarshall484/streamsx.topology
|
test/python/topology/test2.py
|
Python
|
apache-2.0
| 8,958
|
import unittest
import mock
from kafka_influxdb.reader import kafka_python
from kafka.common import Message
class TestKafkaPython(unittest.TestCase):
def setUp(self):
self.host = "myhost"
self.port = 1234
self.group = "mygroup"
self.topic = "mytopic"
self.offset = "largest"
self.reconnect_wait_time = 0.01
self.reader = self.create_reader()
def create_reader(self):
reader = kafka_python.Reader(self.host,
self.port,
self.group,
self.topic,
self.offset)
reader.consumer = mock.MagicMock()
return reader
def sample_messages(self, payload, count):
return count * [Message(0, 0, None, payload)], count * [payload]
def test_handle_read(self):
sample_messages, extracted_messages = self.sample_messages("hello", 3)
self.reader.consumer.__iter__.return_value = sample_messages
self.reader._connect = mock.MagicMock()
received_messages = list(self.reader._handle_read())
self.assertEqual(received_messages, extracted_messages)
def receive_messages(self):
for message in self.reader.read():
yield message
|
mre/kafka-influxdb
|
kafka_influxdb/tests/reader_test/test_kafka_python.py
|
Python
|
apache-2.0
| 1,320
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import psutil
from builtins import input
from past.builtins import basestring
from datetime import datetime
import getpass
import imp
import os
import re
import signal
import subprocess
import sys
import warnings
from airflow.exceptions import AirflowException
# When killing processes, time to wait after issuing a SIGTERM before issuing a
# SIGKILL.
TIME_TO_WAIT_AFTER_SIGTERM = 5
def validate_key(k, max_length=250):
if not isinstance(k, basestring):
raise TypeError("The key has to be a string")
elif len(k) > max_length:
raise AirflowException(
"The key has to be less than {0} characters".format(max_length))
elif not re.match(r'^[A-Za-z0-9_\-\.]+$', k):
raise AirflowException(
"The key ({k}) has to be made of alphanumeric characters, dashes, "
"dots and underscores exclusively".format(**locals()))
else:
return True
def alchemy_to_dict(obj):
"""
Transforms a SQLAlchemy model instance into a dictionary
"""
if not obj:
return None
d = {}
for c in obj.__table__.columns:
value = getattr(obj, c.name)
if type(value) == datetime:
value = value.isoformat()
d[c.name] = value
return d
def ask_yesno(question):
yes = set(['yes', 'y'])
no = set(['no', 'n'])
done = False
print(question)
while not done:
choice = input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print("Please respond by yes or no.")
def is_in(obj, l):
"""
Checks whether an object is one of the item in the list.
This is different from ``in`` because ``in`` uses __cmp__ when
present. Here we change based on the object itself
"""
for item in l:
if item is obj:
return True
return False
def is_container(obj):
"""
Test if an object is a container (iterable) but not a string
"""
return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
def as_tuple(obj):
"""
If obj is a container, returns obj as a tuple.
Otherwise, returns a tuple containing obj.
"""
if is_container(obj):
return tuple(obj)
else:
return tuple([obj])
def as_flattened_list(iterable):
"""
Return an iterable with one level flattened
>>> as_flattened_list((('blue', 'red'), ('green', 'yellow', 'pink')))
['blue', 'red', 'green', 'yellow', 'pink']
"""
return [e for i in iterable for e in i]
def chain(*tasks):
"""
Given a number of tasks, builds a dependency chain.
chain(task_1, task_2, task_3, task_4)
is equivalent to
task_1.set_downstream(task_2)
task_2.set_downstream(task_3)
task_3.set_downstream(task_4)
"""
for up_task, down_task in zip(tasks[:-1], tasks[1:]):
up_task.set_downstream(down_task)
def pprinttable(rows):
"""Returns a pretty ascii table from tuples
If namedtuple are used, the table will have headers
"""
if not rows:
return
if hasattr(rows[0], '_fields'): # if namedtuple
headers = rows[0]._fields
else:
headers = ["col{}".format(i) for i in range(len(rows[0]))]
lens = [len(s) for s in headers]
for row in rows:
for i in range(len(rows[0])):
slenght = len("{}".format(row[i]))
if slenght > lens[i]:
lens[i] = slenght
formats = []
hformats = []
for i in range(len(rows[0])):
if isinstance(rows[0][i], int):
formats.append("%%%dd" % lens[i])
else:
formats.append("%%-%ds" % lens[i])
hformats.append("%%-%ds" % lens[i])
pattern = " | ".join(formats)
hpattern = " | ".join(hformats)
separator = "-+-".join(['-' * n for n in lens])
s = ""
s += separator + '\n'
s += (hpattern % tuple(headers)) + '\n'
s += separator + '\n'
def f(t):
return "{}".format(t) if isinstance(t, basestring) else t
for line in rows:
s += pattern % tuple(f(t) for t in line) + '\n'
s += separator + '\n'
return s
def kill_using_shell(pid, signal=signal.SIGTERM):
process = psutil.Process(pid)
# Use sudo only when necessary - consider SubDagOperator and SequentialExecutor case.
if process.username() != getpass.getuser():
args = ["sudo", "kill", "-{}".format(int(signal)), str(pid)]
else:
args = ["kill", "-{}".format(int(signal)), str(pid)]
# PID may not exist and return a non-zero error code
subprocess.call(args)
def kill_process_tree(logger, pid):
"""
Kills the process and all of the descendants. Kills using the `kill`
shell command so that it can change users. Note: killing via PIDs
has the potential to the wrong process if the process dies and the
PID gets recycled in a narrow time window.
:param logger: logger
:type logger: logging.Logger
"""
try:
root_process = psutil.Process(pid)
except psutil.NoSuchProcess:
logger.warn("PID: {} does not exist".format(pid))
return
# Check child processes to reduce cases where a child process died but
# the PID got reused.
descendant_processes = [x for x in root_process.children(recursive=True)
if x.is_running()]
if len(descendant_processes) != 0:
logger.warn("Terminating descendant processes of {} PID: {}"
.format(root_process.cmdline(),
root_process.pid))
temp_processes = descendant_processes[:]
for descendant in temp_processes:
logger.warn("Terminating descendant process {} PID: {}"
.format(descendant.cmdline(), descendant.pid))
try:
kill_using_shell(descendant.pid, signal.SIGTERM)
except psutil.NoSuchProcess:
descendant_processes.remove(descendant)
logger.warn("Waiting up to {}s for processes to exit..."
.format(TIME_TO_WAIT_AFTER_SIGTERM))
try:
psutil.wait_procs(descendant_processes, TIME_TO_WAIT_AFTER_SIGTERM)
logger.warn("Done waiting")
except psutil.TimeoutExpired:
logger.warn("Ran out of time while waiting for "
"processes to exit")
# Then SIGKILL
descendant_processes = [x for x in root_process.children(recursive=True)
if x.is_running()]
if len(descendant_processes) > 0:
temp_processes = descendant_processes[:]
for descendant in temp_processes:
logger.warn("Killing descendant process {} PID: {}"
.format(descendant.cmdline(), descendant.pid))
try:
kill_using_shell(descendant.pid, signal.SIGTERM)
descendant.wait()
except psutil.NoSuchProcess:
descendant_processes.remove(descendant)
logger.warn("Killed all descendant processes of {} PID: {}"
.format(root_process.cmdline(),
root_process.pid))
else:
logger.debug("There are no descendant processes to kill")
def kill_descendant_processes(logger, pids_to_kill=None):
"""
Kills all descendant processes of this process.
:param logger: logger
:type logger: logging.Logger
:param pids_to_kill: if specified, kill only these PIDs
:type pids_to_kill: list[int]
"""
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where a child process died but the PID got reused.
descendant_processes = [x for x in this_process.children(recursive=True)
if x.is_running()]
if pids_to_kill:
descendant_processes = [x for x in descendant_processes
if x.pid in pids_to_kill]
if len(descendant_processes) == 0:
logger.debug("There are no descendant processes that can be killed")
return
logger.warn("Terminating descendant processes of {} PID: {}"
.format(this_process.cmdline(),
this_process.pid))
temp_processes = descendant_processes[:]
for descendant in temp_processes:
try:
logger.warn("Terminating descendant process {} PID: {}"
.format(descendant.cmdline(), descendant.pid))
descendant.terminate()
except psutil.NoSuchProcess:
descendant_processes.remove(descendant)
logger.warn("Waiting up to {}s for processes to exit..."
.format(TIME_TO_WAIT_AFTER_SIGTERM))
try:
psutil.wait_procs(descendant_processes, TIME_TO_WAIT_AFTER_SIGTERM)
logger.warn("Done waiting")
except psutil.TimeoutExpired:
logger.warn("Ran out of time while waiting for "
"processes to exit")
# Then SIGKILL
descendant_processes = [x for x in this_process.children(recursive=True)
if x.is_running()]
if pids_to_kill:
descendant_processes = [x for x in descendant_processes
if x.pid in pids_to_kill]
if len(descendant_processes) > 0:
for descendant in descendant_processes:
logger.warn("Killing descendant process {} PID: {}"
.format(descendant.cmdline(), descendant.pid))
try:
descendant.kill()
descendant.wait()
except psutil.NoSuchProcess:
pass
logger.warn("Killed all descendant processes of {} PID: {}"
.format(this_process.cmdline(),
this_process.pid))
class AirflowImporter(object):
"""
Importer that dynamically loads a class and module from its parent. This
allows Airflow to support ``from airflow.operators import BashOperator``
even though BashOperator is actually in
``airflow.operators.bash_operator``.
The importer also takes over for the parent_module by wrapping it. This is
required to support attribute-based usage:
.. code:: python
from airflow import operators
operators.BashOperator(...)
"""
def __init__(self, parent_module, module_attributes):
"""
:param parent_module: The string package name of the parent module. For
example, 'airflow.operators'
:type parent_module: string
:param module_attributes: The file to class mappings for all importable
classes.
:type module_attributes: string
"""
self._parent_module = parent_module
self._attribute_modules = self._build_attribute_modules(module_attributes)
self._loaded_modules = {}
# Wrap the module so we can take over __getattr__.
sys.modules[parent_module.__name__] = self
@staticmethod
def _build_attribute_modules(module_attributes):
"""
Flips and flattens the module_attributes dictionary from:
module => [Attribute, ...]
To:
Attribute => module
This is useful so that we can find the module to use, given an
attribute.
"""
attribute_modules = {}
for module, attributes in list(module_attributes.items()):
for attribute in attributes:
attribute_modules[attribute] = module
return attribute_modules
def _load_attribute(self, attribute):
"""
Load the class attribute if it hasn't been loaded yet, and return it.
"""
module = self._attribute_modules.get(attribute, False)
if not module:
# This shouldn't happen. The check happens in find_modules, too.
raise ImportError(attribute)
elif module not in self._loaded_modules:
# Note that it's very important to only load a given modules once.
# If they are loaded more than once, the memory reference to the
# class objects changes, and Python thinks that an object of type
# Foo that was declared before Foo's module was reloaded is no
# longer the same type as Foo after it's reloaded.
path = os.path.realpath(self._parent_module.__file__)
folder = os.path.dirname(path)
f, filename, description = imp.find_module(module, [folder])
self._loaded_modules[module] = imp.load_module(module, f, filename, description)
# This functionality is deprecated, and AirflowImporter should be
# removed in 2.0.
warnings.warn(
"Importing {i} directly from {m} has been "
"deprecated. Please import from "
"'{m}.[operator_module]' instead. Support for direct "
"imports will be dropped entirely in Airflow 2.0.".format(
i=attribute, m=self._parent_module),
DeprecationWarning)
loaded_module = self._loaded_modules[module]
return getattr(loaded_module, attribute)
def __getattr__(self, attribute):
"""
Get an attribute from the wrapped module. If the attribute doesn't
exist, try and import it as a class from a submodule.
This is a Python trick that allows the class to pretend it's a module,
so that attribute-based usage works:
from airflow import operators
operators.BashOperator(...)
It also allows normal from imports to work:
from airflow.operators.bash_operator import BashOperator
"""
if hasattr(self._parent_module, attribute):
# Always default to the parent module if the attribute exists.
return getattr(self._parent_module, attribute)
elif attribute in self._attribute_modules:
# Try and import the attribute if it's got a module defined.
loaded_attribute = self._load_attribute(attribute)
setattr(self, attribute, loaded_attribute)
return loaded_attribute
raise AttributeError
|
gritlogic/incubator-airflow
|
airflow/utils/helpers.py
|
Python
|
apache-2.0
| 15,004
|
#!/usr/bin/env python
#
# Copyright 2016 Medoly
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from medoly.kanon import bloom
@bloom("model")
class User(object):
def __init__(self, uid, name):
self.uid = uid
self.name = name
def __json__(self):
return {"uid": self.uid, "name": self.name}
|
whiteclover/Medoly
|
examples/demo/app/user/model.py
|
Python
|
apache-2.0
| 825
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.vledashboard.stacks import views
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^select_template$',
views.SelectTemplateView.as_view(),
name='select_template'),
url(r'^launch$', views.CreateStackView.as_view(), name='launch'),
url(r'^stack/(?P<stack_id>[^/]+)/$',
views.DetailView.as_view(), name='detail'),
url(r'^(?P<stack_id>[^/]+)/change_template$',
views.ChangeTemplateView.as_view(), name='change_template'),
url(r'^(?P<stack_id>[^/]+)/edit_stack$',
views.EditStackView.as_view(), name='edit_stack'),
url(r'^stack/(?P<stack_id>[^/]+)/(?P<resource_name>[^/]+)/$',
views.ResourceView.as_view(), name='resource'),
url(r'^get_d3_data/(?P<stack_id>[^/]+)/$',
views.JSONView.as_view(), name='d3_data'),
)
|
flochaz/horizon
|
openstack_dashboard/dashboards/vledashboard/stacks/urls.py
|
Python
|
apache-2.0
| 1,491
|
#!/usr/bin/env python3
import os
from litmus.core.util import load_yaml
from litmus.core.manager import manager
from litmus.helper.helper import tizen_snapshot_downloader as downloader
from litmus.helper.tests import add_test_helper
def main(*args, **kwargs):
# init manager instance
mgr = manager(*args, **kwargs)
# init working directory
mgr.init_workingdir()
# get projectinfo
project_info = load_yaml('conf_mobile.yaml')
username = project_info['username']
password = project_info['password']
binary_urls = project_info['binary_urls']
# get version from parameter
# ex) 20160923.3
# you can customize params from litmus (adhoc|run) -p option
# Nth arg : kwargs['param'][N]
try:
version = kwargs['param'][0]
except (IndexError, TypeError):
version = None
# download binaries from snapshot download server
filenames = []
for url in binary_urls:
filenames.extend(downloader(url=url,
username=username,
password=password,
version=version))
# get an available device for testing.
# Please set up topology before acquiring device.
# Example)
# ~/.litmus/topology
# [TM2_001]
# dev_type = standalone_tm2
# serialno = 01234TEST
dut = mgr.acquire_dut('standalone_tm2', max_retry_times=180)
# flashing binaries to device.
dut.flash(filenames)
# turn on dut.
dut.on()
# run helper functions for testing.
if not os.path.exists('result'):
os.mkdir('result')
testcases = load_yaml('tc_mobile.yaml')
add_test_helper(dut, testcases)
dut.run_tests()
# turn off dut.
dut.off()
# release a device
mgr.release_dut(dut)
|
dhs-shine/litmus
|
litmus/templates/standalone_tm2/userscript.py
|
Python
|
apache-2.0
| 1,815
|
#!/usr/bin/python
#! -*- Encoding: Latin-1 -*-
import ConfigParser
import os
import threading
import logging
import logging.handlers
import eftepede_globals
import re
import socket
import pyftpdlib.servers
import pyftpdlib.handlers
import eftepede_authorizer
try:
import _winreg
except:
_winreg = None
class config(object):
pass
class eftepede_server(object):
def __init__(self):
self.read_configuration()
self.logger.info("eftepede! Server %s starting up..." % (eftepede_globals.CURRENT_VERSION, ))
def read_configuration(self):
# read configuration file
self.config_file = ConfigParser.SafeConfigParser()
self.config_file.read(os.path.join(os.getcwd(), "eftepede_config.ini"))
self.logger = logging.getLogger('pyftpdlib')
self.logger.setLevel(logging.ERROR)
logging.basicConfig(level = logging.ERROR)
if self.get_setting("Enabled", True, bool, "Trace"):
LOG_FILENAME = self.get_setting("Filename", os.path.join(os.getcwd(), "eftepede.log"), None, "Trace")
logging.basicConfig(filename=os.path.join(os.getcwd(), "pyftpdlib.log"), level=logging.DEBUG)
# Set up a specific logger with our desired output level
self.logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME,
maxBytes = self.get_setting("MaxSize", 1024*1024*10, int, "Trace"),
backupCount = self.get_setting("BackupCount", 3, int, "Trace"))
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.info("_" * 90);
self.logger.debug("just to see if debug comes on")
self.config = config()
self.config.database_provider = self.get_setting("database_provider", "")
self.config.ftp_portnum = self.get_setting("portnum", 21, int)
self.config.ftp_address = self.get_setting("address", "")
self.config.max_cons = self.get_setting("max_cons", 256, int)
self.config.max_cons_per_ip = self.get_setting("max_cons_per_ip", 5, int)
self.config.anonymous_enabled = self.get_setting("anonymous_enabled", False, bool)
if self.config.anonymous_enabled:
self.config.anonymous_homedir = self.get_setting("anonymous_homedir", os.getcwd())
self.config.masquerade_address = self.get_setting("masquerade_address", "")
for index, item in enumerate(dir(self.config)):
if not item.startswith("_"):
self.logger.info("%30s: %r" % (item, getattr(self.config, item), ))
self.logger.info("_" * 90);
if self.config.database_provider == "sqlite":
import eftepede_using_sqlite
eftepede_globals.Database = eftepede_using_sqlite.SQLite3Database(self, self.logger)
elif self.config.database_provider == "postgresql":
import eftepede_using_postgresql
eftepede_globals.Database = eftepede_using_postgresql.PostgresqlDatabase(self, self.logger)
elif self.config.database_provider == "mysql":
import eftepede_using_mysql
eftepede_globals.Database = eftepede_using_mysql.MySQLDatabase(self, self.logger)
else:
raise Exception("No valid database provider configured, please check eftepede_config.ini")
self.logger.info("eftepede_globals.Database: %r" % (eftepede_globals.Database, ))
self.authorizer = eftepede_authorizer.Authorizer(self.config, self.logger)
#pyftpdlib.servers.log = self.logger.info
#pyftpdlib.servers.logline = self.logger.info
#pyftpdlib.servers.logerror = self.logger.error
if self.config.anonymous_enabled:
self.authorizer.add_anonymous(self.config.anonymous_homedir)
def get_setting(self, variable, default, mapfunc = None, section = "Settings"):
try:
result = self.config_file.get(section, variable)
if mapfunc is not None:
if mapfunc == bool:
if result.lower() == "true":
result = True
elif result.lower() == "false":
result = False
else:
result = mapfunc(result)
else:
result = mapfunc(result)
return result
except Exception, e:
return default
def run(self):
# Instantiate FTP handler class
self.ftp_handler = pyftpdlib.handlers.FTPHandler
self.ftp_handler.authorizer = self.authorizer
# Define a customized banner (string returned when client connects)
self.ftp_handler.banner = "eftepede %s ready, thanks to Python, pyftpdlib, sqlite and a host of others..." % (pyftpdlib.__ver__, )
# Specify a masquerade address and the range of ports to use for
# passive connections. Decomment in case you're behind a NAT.
if self.config.masquerade_address:
self.ftp_handler.masquerade_address = masquerade_address
#if self.config.passive_ports:
# self.ftp_handler.passive_ports = range(60000, 65535)
# Instantiate FTP server class and listen to 0.0.0.0:21
address = (self.config.ftp_address, self.config.ftp_portnum)
self.ftpd = pyftpdlib.servers.FTPServer(address, self.ftp_handler)
# set a limit for connections
self.ftpd.max_cons = self.config.max_cons
self.ftpd.max_cons_per_ip = self.config.max_cons_per_ip
self.ftpd.serve_forever()
def main():
eftepede_server().run()
if __name__ == "__main__":
main()
|
gersonkurz/eftepede
|
eftepede_server.py
|
Python
|
apache-2.0
| 6,104
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image1'],
[TestAction.create_data_vol_template_from_volume, 'volume1', 'volume1-image2'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'vm1-snapshot5'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot9'],
[TestAction.stop_vm, 'vm1'],
[TestAction.ps_migrate_vm, 'vm1'],
[TestAction.start_vm, 'vm1'],
[TestAction.migrate_volume, 'volume2'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'vm1-snapshot9'],
[TestAction.start_vm, 'vm1'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
])
'''
The final status:
Running:['vm1']
Stopped:[]
Enadbled:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9', 'vm1-image1', 'volume1-image2']
attached:['volume1', 'volume2', 'volume3']
Detached:[]
Deleted:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']
Expunged:[]
Ha:[]
Group:
vm_snap2:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5']---vm1volume1_volume2_volume3
vm_snap3:['vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9']---vm1volume1_volume2_volume3
'''
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/vm_snapshots/paths/ceph_path24.py
|
Python
|
apache-2.0
| 1,940
|
"""Component to interface with cameras."""
import asyncio
import base64
import collections
from contextlib import suppress
from datetime import timedelta
import logging
import hashlib
from random import SystemRandom
import attr
from aiohttp import web
import async_timeout
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
CONF_FILENAME,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import bind_hass
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.components.http import HomeAssistantView, KEY_AUTHENTICATED
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
SERVICE_PLAY_MEDIA,
DOMAIN as DOMAIN_MP,
)
from homeassistant.components.stream import request_stream
from homeassistant.components.stream.const import (
OUTPUT_FORMATS,
FORMAT_CONTENT_TYPE,
CONF_STREAM_SOURCE,
CONF_LOOKBACK,
CONF_DURATION,
SERVICE_RECORD,
DOMAIN as DOMAIN_STREAM,
)
from homeassistant.components import websocket_api
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_when_setup
from .const import DOMAIN, DATA_CAMERA_PREFS
from .prefs import CameraPreferences
# mypy: allow-untyped-calls, allow-untyped-defs
_LOGGER = logging.getLogger(__name__)
SERVICE_ENABLE_MOTION = "enable_motion_detection"
SERVICE_DISABLE_MOTION = "disable_motion_detection"
SERVICE_SNAPSHOT = "snapshot"
SERVICE_PLAY_STREAM = "play_stream"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
ATTR_FILENAME = "filename"
ATTR_MEDIA_PLAYER = "media_player"
ATTR_FORMAT = "format"
STATE_RECORDING = "recording"
STATE_STREAMING = "streaming"
STATE_IDLE = "idle"
# Bitfield of features supported by the camera entity
SUPPORT_ON_OFF = 1
SUPPORT_STREAM = 2
DEFAULT_CONTENT_TYPE = "image/jpeg"
ENTITY_IMAGE_URL = "/api/camera_proxy/{0}?token={1}"
TOKEN_CHANGE_INTERVAL = timedelta(minutes=5)
_RND = SystemRandom()
MIN_STREAM_INTERVAL = 0.5 # seconds
CAMERA_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids})
CAMERA_SERVICE_SNAPSHOT = CAMERA_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_FILENAME): cv.template}
)
CAMERA_SERVICE_PLAY_STREAM = CAMERA_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_MEDIA_PLAYER): cv.entities_domain(DOMAIN_MP),
vol.Optional(ATTR_FORMAT, default="hls"): vol.In(OUTPUT_FORMATS),
}
)
CAMERA_SERVICE_RECORD = CAMERA_SERVICE_SCHEMA.extend(
{
vol.Required(CONF_FILENAME): cv.template,
vol.Optional(CONF_DURATION, default=30): vol.Coerce(int),
vol.Optional(CONF_LOOKBACK, default=0): vol.Coerce(int),
}
)
WS_TYPE_CAMERA_THUMBNAIL = "camera_thumbnail"
SCHEMA_WS_CAMERA_THUMBNAIL = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_CAMERA_THUMBNAIL,
vol.Required("entity_id"): cv.entity_id,
}
)
@attr.s
class Image:
"""Represent an image."""
content_type = attr.ib(type=str)
content = attr.ib(type=bytes)
@bind_hass
async def async_request_stream(hass, entity_id, fmt):
"""Request a stream for a camera entity."""
camera = _get_camera_from_entity_id(hass, entity_id)
camera_prefs = hass.data[DATA_CAMERA_PREFS].get(entity_id)
async with async_timeout.timeout(10):
source = await camera.stream_source()
if not source:
raise HomeAssistantError(
f"{camera.entity_id} does not support play stream service"
)
return request_stream(hass, source, fmt=fmt, keepalive=camera_prefs.preload_stream)
@bind_hass
async def async_get_image(hass, entity_id, timeout=10):
"""Fetch an image from a camera entity."""
camera = _get_camera_from_entity_id(hass, entity_id)
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
async with async_timeout.timeout(timeout):
image = await camera.async_camera_image()
if image:
return Image(camera.content_type, image)
raise HomeAssistantError("Unable to get image")
@bind_hass
async def async_get_mjpeg_stream(hass, request, entity_id):
"""Fetch an mjpeg stream from a camera entity."""
camera = _get_camera_from_entity_id(hass, entity_id)
return await camera.handle_async_mjpeg_stream(request)
async def async_get_still_stream(request, image_cb, content_type, interval):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
response = web.StreamResponse()
response.content_type = "multipart/x-mixed-replace; " "boundary=--frameboundary"
await response.prepare(request)
async def write_to_mjpeg_stream(img_bytes):
"""Write image to stream."""
await response.write(
bytes(
"--frameboundary\r\n"
"Content-Type: {}\r\n"
"Content-Length: {}\r\n\r\n".format(content_type, len(img_bytes)),
"utf-8",
)
+ img_bytes
+ b"\r\n"
)
last_image = None
while True:
img_bytes = await image_cb()
if not img_bytes:
break
if img_bytes != last_image:
await write_to_mjpeg_stream(img_bytes)
# Chrome seems to always ignore first picture,
# print it twice.
if last_image is None:
await write_to_mjpeg_stream(img_bytes)
last_image = img_bytes
await asyncio.sleep(interval)
return response
def _get_camera_from_entity_id(hass, entity_id):
"""Get camera component from entity_id."""
component = hass.data.get(DOMAIN)
if component is None:
raise HomeAssistantError("Camera integration not set up")
camera = component.get_entity(entity_id)
if camera is None:
raise HomeAssistantError("Camera not found")
if not camera.is_on:
raise HomeAssistantError("Camera is off")
return camera
async def async_setup(hass, config):
"""Set up the camera component."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
prefs = CameraPreferences(hass)
await prefs.async_initialize()
hass.data[DATA_CAMERA_PREFS] = prefs
hass.http.register_view(CameraImageView(component))
hass.http.register_view(CameraMjpegStream(component))
hass.components.websocket_api.async_register_command(
WS_TYPE_CAMERA_THUMBNAIL, websocket_camera_thumbnail, SCHEMA_WS_CAMERA_THUMBNAIL
)
hass.components.websocket_api.async_register_command(ws_camera_stream)
hass.components.websocket_api.async_register_command(websocket_get_prefs)
hass.components.websocket_api.async_register_command(websocket_update_prefs)
await component.async_setup(config)
async def preload_stream(hass, _):
for camera in component.entities:
camera_prefs = prefs.get(camera.entity_id)
if not camera_prefs.preload_stream:
continue
async with async_timeout.timeout(10):
source = await camera.stream_source()
if not source:
continue
request_stream(hass, source, keepalive=True)
async_when_setup(hass, DOMAIN_STREAM, preload_stream)
@callback
def update_tokens(time):
"""Update tokens of the entities."""
for entity in component.entities:
entity.async_update_token()
hass.async_create_task(entity.async_update_ha_state())
hass.helpers.event.async_track_time_interval(update_tokens, TOKEN_CHANGE_INTERVAL)
component.async_register_entity_service(
SERVICE_ENABLE_MOTION, CAMERA_SERVICE_SCHEMA, "async_enable_motion_detection"
)
component.async_register_entity_service(
SERVICE_DISABLE_MOTION, CAMERA_SERVICE_SCHEMA, "async_disable_motion_detection"
)
component.async_register_entity_service(
SERVICE_TURN_OFF, CAMERA_SERVICE_SCHEMA, "async_turn_off"
)
component.async_register_entity_service(
SERVICE_TURN_ON, CAMERA_SERVICE_SCHEMA, "async_turn_on"
)
component.async_register_entity_service(
SERVICE_SNAPSHOT, CAMERA_SERVICE_SNAPSHOT, async_handle_snapshot_service
)
component.async_register_entity_service(
SERVICE_PLAY_STREAM,
CAMERA_SERVICE_PLAY_STREAM,
async_handle_play_stream_service,
)
component.async_register_entity_service(
SERVICE_RECORD, CAMERA_SERVICE_RECORD, async_handle_record_service
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Camera(Entity):
"""The base class for camera entities."""
def __init__(self):
"""Initialize a camera."""
self.is_streaming = False
self.content_type = DEFAULT_CONTENT_TYPE
self.access_tokens: collections.deque = collections.deque([], 2)
self.async_update_token()
@property
def should_poll(self):
"""No need to poll cameras."""
return False
@property
def entity_picture(self):
"""Return a link to the camera feed as entity picture."""
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_tokens[-1])
@property
def supported_features(self):
"""Flag supported features."""
return 0
@property
def is_recording(self):
"""Return true if the device is recording."""
return False
@property
def brand(self):
"""Return the camera brand."""
return None
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return None
@property
def model(self):
"""Return the camera model."""
return None
@property
def frame_interval(self):
"""Return the interval between frames of the mjpeg stream."""
return 0.5
async def stream_source(self):
"""Return the source of the stream."""
return None
def camera_image(self):
"""Return bytes of camera image."""
raise NotImplementedError()
@callback
def async_camera_image(self):
"""Return bytes of camera image.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.camera_image)
async def handle_async_still_stream(self, request, interval):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
return await async_get_still_stream(
request, self.async_camera_image, self.content_type, interval
)
async def handle_async_mjpeg_stream(self, request):
"""Serve an HTTP MJPEG stream from the camera.
This method can be overridden by camera plaforms to proxy
a direct stream from the camera.
This method must be run in the event loop.
"""
return await self.handle_async_still_stream(request, self.frame_interval)
@property
def state(self):
"""Return the camera state."""
if self.is_recording:
return STATE_RECORDING
if self.is_streaming:
return STATE_STREAMING
return STATE_IDLE
@property
def is_on(self):
"""Return true if on."""
return True
def turn_off(self):
"""Turn off camera."""
raise NotImplementedError()
@callback
def async_turn_off(self):
"""Turn off camera."""
return self.hass.async_add_job(self.turn_off)
def turn_on(self):
"""Turn off camera."""
raise NotImplementedError()
@callback
def async_turn_on(self):
"""Turn off camera."""
return self.hass.async_add_job(self.turn_on)
def enable_motion_detection(self):
"""Enable motion detection in the camera."""
raise NotImplementedError()
@callback
def async_enable_motion_detection(self):
"""Call the job and enable motion detection."""
return self.hass.async_add_job(self.enable_motion_detection)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
raise NotImplementedError()
@callback
def async_disable_motion_detection(self):
"""Call the job and disable motion detection."""
return self.hass.async_add_job(self.disable_motion_detection)
@property
def state_attributes(self):
"""Return the camera state attributes."""
attrs = {"access_token": self.access_tokens[-1]}
if self.model:
attrs["model_name"] = self.model
if self.brand:
attrs["brand"] = self.brand
if self.motion_detection_enabled:
attrs["motion_detection"] = self.motion_detection_enabled
return attrs
@callback
def async_update_token(self):
"""Update the used token."""
self.access_tokens.append(
hashlib.sha256(_RND.getrandbits(256).to_bytes(32, "little")).hexdigest()
)
class CameraView(HomeAssistantView):
"""Base CameraView."""
requires_auth = False
def __init__(self, component):
"""Initialize a basic camera view."""
self.component = component
async def get(self, request, entity_id):
"""Start a GET request."""
camera = self.component.get_entity(entity_id)
if camera is None:
raise web.HTTPNotFound()
authenticated = (
request[KEY_AUTHENTICATED]
or request.query.get("token") in camera.access_tokens
)
if not authenticated:
raise web.HTTPUnauthorized()
if not camera.is_on:
_LOGGER.debug("Camera is off.")
raise web.HTTPServiceUnavailable()
return await self.handle(request, camera)
async def handle(self, request, camera):
"""Handle the camera request."""
raise NotImplementedError()
class CameraImageView(CameraView):
"""Camera view to serve an image."""
url = "/api/camera_proxy/{entity_id}"
name = "api:camera:image"
async def handle(self, request, camera):
"""Serve camera image."""
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
async with async_timeout.timeout(10):
image = await camera.async_camera_image()
if image:
return web.Response(body=image, content_type=camera.content_type)
raise web.HTTPInternalServerError()
class CameraMjpegStream(CameraView):
"""Camera View to serve an MJPEG stream."""
url = "/api/camera_proxy_stream/{entity_id}"
name = "api:camera:stream"
async def handle(self, request, camera):
"""Serve camera stream, possibly with interval."""
interval = request.query.get("interval")
if interval is None:
return await camera.handle_async_mjpeg_stream(request)
try:
# Compose camera stream from stills
interval = float(request.query.get("interval"))
if interval < MIN_STREAM_INTERVAL:
raise ValueError(f"Stream interval must be be > {MIN_STREAM_INTERVAL}")
return await camera.handle_async_still_stream(request, interval)
except ValueError:
raise web.HTTPBadRequest()
@websocket_api.async_response
async def websocket_camera_thumbnail(hass, connection, msg):
"""Handle get camera thumbnail websocket command.
Async friendly.
"""
try:
image = await async_get_image(hass, msg["entity_id"])
await connection.send_big_result(
msg["id"],
{
"content_type": image.content_type,
"content": base64.b64encode(image.content).decode("utf-8"),
},
)
except HomeAssistantError:
connection.send_message(
websocket_api.error_message(
msg["id"], "image_fetch_failed", "Unable to fetch image"
)
)
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "camera/stream",
vol.Required("entity_id"): cv.entity_id,
vol.Optional("format", default="hls"): vol.In(OUTPUT_FORMATS),
}
)
async def ws_camera_stream(hass, connection, msg):
"""Handle get camera stream websocket command.
Async friendly.
"""
try:
entity_id = msg["entity_id"]
camera = _get_camera_from_entity_id(hass, entity_id)
camera_prefs = hass.data[DATA_CAMERA_PREFS].get(entity_id)
async with async_timeout.timeout(10):
source = await camera.stream_source()
if not source:
raise HomeAssistantError(
f"{camera.entity_id} does not support play stream service"
)
fmt = msg["format"]
url = request_stream(
hass, source, fmt=fmt, keepalive=camera_prefs.preload_stream
)
connection.send_result(msg["id"], {"url": url})
except HomeAssistantError as ex:
_LOGGER.error("Error requesting stream: %s", ex)
connection.send_error(msg["id"], "start_stream_failed", str(ex))
except asyncio.TimeoutError:
_LOGGER.error("Timeout getting stream source")
connection.send_error(
msg["id"], "start_stream_failed", "Timeout getting stream source"
)
@websocket_api.async_response
@websocket_api.websocket_command(
{vol.Required("type"): "camera/get_prefs", vol.Required("entity_id"): cv.entity_id}
)
async def websocket_get_prefs(hass, connection, msg):
"""Handle request for account info."""
prefs = hass.data[DATA_CAMERA_PREFS].get(msg["entity_id"])
connection.send_result(msg["id"], prefs.as_dict())
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "camera/update_prefs",
vol.Required("entity_id"): cv.entity_id,
vol.Optional("preload_stream"): bool,
}
)
async def websocket_update_prefs(hass, connection, msg):
"""Handle request for account info."""
prefs = hass.data[DATA_CAMERA_PREFS]
changes = dict(msg)
changes.pop("id")
changes.pop("type")
entity_id = changes.pop("entity_id")
await prefs.async_update(entity_id, **changes)
connection.send_result(msg["id"], prefs.get(entity_id).as_dict())
async def async_handle_snapshot_service(camera, service):
"""Handle snapshot services calls."""
hass = camera.hass
filename = service.data[ATTR_FILENAME]
filename.hass = hass
snapshot_file = filename.async_render(variables={ATTR_ENTITY_ID: camera})
# check if we allow to access to that file
if not hass.config.is_allowed_path(snapshot_file):
_LOGGER.error("Can't write %s, no access to path!", snapshot_file)
return
image = await camera.async_camera_image()
def _write_image(to_file, image_data):
"""Executor helper to write image."""
with open(to_file, "wb") as img_file:
img_file.write(image_data)
try:
await hass.async_add_executor_job(_write_image, snapshot_file, image)
except OSError as err:
_LOGGER.error("Can't write image to file: %s", err)
async def async_handle_play_stream_service(camera, service_call):
"""Handle play stream services calls."""
async with async_timeout.timeout(10):
source = await camera.stream_source()
if not source:
raise HomeAssistantError(
f"{camera.entity_id} does not support play stream service"
)
hass = camera.hass
camera_prefs = hass.data[DATA_CAMERA_PREFS].get(camera.entity_id)
fmt = service_call.data[ATTR_FORMAT]
entity_ids = service_call.data[ATTR_MEDIA_PLAYER]
url = request_stream(hass, source, fmt=fmt, keepalive=camera_prefs.preload_stream)
data = {
ATTR_ENTITY_ID: entity_ids,
ATTR_MEDIA_CONTENT_ID: f"{hass.config.api.base_url}{url}",
ATTR_MEDIA_CONTENT_TYPE: FORMAT_CONTENT_TYPE[fmt],
}
await hass.services.async_call(
DOMAIN_MP, SERVICE_PLAY_MEDIA, data, blocking=True, context=service_call.context
)
async def async_handle_record_service(camera, call):
"""Handle stream recording service calls."""
async with async_timeout.timeout(10):
source = await camera.stream_source()
if not source:
raise HomeAssistantError(f"{camera.entity_id} does not support record service")
hass = camera.hass
filename = call.data[CONF_FILENAME]
filename.hass = hass
video_path = filename.async_render(variables={ATTR_ENTITY_ID: camera})
data = {
CONF_STREAM_SOURCE: source,
CONF_FILENAME: video_path,
CONF_DURATION: call.data[CONF_DURATION],
CONF_LOOKBACK: call.data[CONF_LOOKBACK],
}
await hass.services.async_call(
DOMAIN_STREAM, SERVICE_RECORD, data, blocking=True, context=call.context
)
|
Cinntax/home-assistant
|
homeassistant/components/camera/__init__.py
|
Python
|
apache-2.0
| 21,499
|
import random
import string
from six.moves.urllib.parse import urlparse
def region_from_glacier_url(url):
domain = urlparse(url).netloc
if "." in domain:
return domain.split(".")[1]
else:
return "us-east-1"
def vault_from_glacier_url(full_url):
return full_url.split("/")[-1]
def get_job_id():
return "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(92)
)
|
william-richard/moto
|
moto/glacier/utils.py
|
Python
|
apache-2.0
| 441
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import unittest
import sys
from xml.etree import ElementTree as ET
import xmlrpclib
from libcloud.compute.drivers.softlayer import SoftLayerNodeDriver as SoftLayer
from libcloud.compute.types import NodeState
from test import MockHttp
from test.compute import TestCaseMixin
from test.file_fixtures import ComputeFileFixtures
from test.secrets import SOFTLAYER_USER, SOFTLAYER_APIKEY
class MockSoftLayerTransport(xmlrpclib.Transport):
def request(self, host, handler, request_body, verbose=0):
self.verbose = 0
method = ET.XML(request_body).find('methodName').text
mock = SoftLayerMockHttp(host, 80)
mock.request('POST', "%s/%s" % (handler, method))
resp = mock.getresponse()
return self._parse_response(resp.body, None)
class SoftLayerTests(unittest.TestCase):
def setUp(self):
SoftLayer.connectionCls.proxyCls.transportCls = [MockSoftLayerTransport, MockSoftLayerTransport]
self.driver = SoftLayer(SOFTLAYER_USER, SOFTLAYER_APIKEY)
def test_list_nodes(self):
node = self.driver.list_nodes()[0]
self.assertEqual(node.name, 'test1')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.extra['password'], 'TEST')
def test_list_locations(self):
locations = self.driver.list_locations()
seattle = (l for l in locations if l.name == 'sea01').next()
self.assertEqual(seattle.country, 'US')
self.assertEqual(seattle.id, '18171')
def test_list_images(self):
images = self.driver.list_images()
image = images[0]
self.assertEqual(image.id, '1684')
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 2)
self.assertEqual(sizes[0].id, 'sl1')
class SoftLayerMockHttp(MockHttp):
fixtures = ComputeFileFixtures('softlayer')
def _xmlrpc_v3_SoftLayer_Account_getVirtualGuests(self, method, url, body, headers):
body = self.fixtures.load('v3_SoftLayer_Account_getVirtualGuests.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _xmlrpc_v3_SoftLayer_Location_Datacenter_getDatacenters(self, method, url, body, headers):
body = self.fixtures.load('v3_SoftLayer_Location_Datacenter_getDatacenters.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
cloudkick/libcloud
|
test/compute/test_softlayer.py
|
Python
|
apache-2.0
| 3,233
|
'''
Created on 2017年3月5日
@author: 张志远
'''
import pandas as pd
class Menu():
'''
整个软件的菜单结构
'''
def __init__(self, cur):
'''
Constructor
'''
self.menu_dict = {}
self.cur = cur
sql_str = 'SELECT t.id, t.name, nvl(t.parent_id,0) parent_id, t.module_name, t.class_name, t.method_name FROM menu_list t '
self.cur.execute(sql_str)
rs = self.cur.fetchall()
self. df = pd.DataFrame(
rs, index=range(1, len(rs) + 1), columns=['id', 'name', 'parent_id', 'module_name', 'class_name', 'method_name'])
# print(self.df)
def get_menu_list(self, parent_id=0):
'''
依据输入的parent_id,获取子菜单列表,当其为0时,获取根菜单
'''
menu_level = self.df[self.df['parent_id'] == parent_id]
return menu_level
# print(menu_level0['name'])
|
zzy93421/HFOrder
|
source/Menu.py
|
Python
|
apache-2.0
| 968
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA, Junki MIZUSHIMA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from subprocess import Popen, PIPE
import re
def __get_ika_log_version():
try:
commit_id = Popen(["git", "rev-parse", "--short", "HEAD"], stdout=PIPE).communicate()[0].decode('utf-8')
return re.sub(r'\s+', r'', commit_id)
except:
return 'unknown'
IKALOG_VERSION = __get_ika_log_version()
|
mzsm/IkaLog
|
ikalog/version.py
|
Python
|
apache-2.0
| 1,006
|
import unittest
from hepixvmitrust import VMListControler
il_simple_json = """"hv:imagelist": {
"hv:uri": "https://example.org/example-image-list.image_list",
"hv:version": "1",
"dc:description": "a README example of an image list",
"dc:date:created": "2011-03-10T17:09:12Z",
"dc:source": "example.org",
"hv:endorser": {
"hv:x509": {
"hv:dn": "/C=DE/O=GermanGrid/OU=DESY/CN=Owen Synge",
"dc:creator": "Owen Synge",
"hv:email": "owen.synge@desy.de",
"hv:ca": "/C=DE/O=GermanGrid/CN=GridKa-CA"
}
},
"dc:date:expires": "2011-04-07T17:09:12Z",
"hv:images": [
{
"hv:image": {
"hv:uri": "http://example.org/example-image.img",
"sl:osversion": "SL 5.5",
"sl:comments": "Vanila install with contextulization scripts",
"hv:version": "1",
"dc:description": "This is an README example VM",
"sl:checksum:sha512": "8b4c269a60da1061b434b696c4a89293bea847b66bd8ba486a914d4209df651193ee8d454f8231840b7500fab6740620c7111d9a17d08b743133dc393ba2c0d4",
"hv:size": 2147483648,
"sl:arch": "x86_64",
"hv:hypervisor": "kvm",
"dc:identifier": "488dcdc4-9ab1-4fc8-a7ba-b7a5428ecb3d",
"sl:os": "Linux",
"dc:title": "README example VM"
}
}
],
"dc:identifier": "4e186b44-2c64-40ea-97d5-e9e5c0bce059",
"dc:title": "README example"
}
}"""
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.listcontroler = VMListControler()
def tearDown(self):
pass
def test_listcontroler_loads(self):
self.listcontroler.loads(il_simple_json)
def test_listcontroler_verify(self):
self.listcontroler.loads(il_simple_json)
self.listcontroler.verify()
|
hepix-virtualisation/hepixvmitrust
|
test/test_first.py
|
Python
|
apache-2.0
| 1,829
|
###
from naoqi import ALProxy
port = 9559
robotIP = '192.168.0.102'
anim=ALProxy("ALAnimatedSpeech",robotIP,port)
cong={"bodyLnguageMde":"contextual"}
tts = ALProxy("ALTextToSpeech", robotIP, 9559)
tts.setParameter("speed", 80)
anim.say("Hi hello! I'm a robot that moving my hands following the movements of your hands.I usually know how to imitate those in front of me, but today I got confused and I do not recognize your hands very well. Can you help me control my body? I'll give you now three tasks, and for each of them you'll have 15 seconds, will you be successful?", cong)
anim.say("This is it? Are you already going? Let's do a little dance before you go", cong)
anim.say("Oh, I'm so unfocused, I got confused again. You're very good at it, maybe we'll do it again? Take a minute to understand how I got confused this time and then I will ask you to do more tasks", cong)
anim.say("You are so good at understanding how I move, do you want a challenge? Now you can move me any way you want for 2 minutes, at the end of the two minutes you will not have any tasks. Can you understand how am I programmed to move? If you do not want to, you can stop here", cong)
anim.say("Are you so good at understanding how I move, want a challenge? Now you can move me the way you want for 2 minutes, at the end of the two minutes you will not have any fat. Can you understand how I programmed to move? You can stop at any point.", cong)
anim.say("Oh, where am I? I got confused again. You helped me a lot the last time, let's do it again?. \n Take a minute to understand how I got confused this time and then ask you to do more tasks", cong)
|
CuriosityLabTAU/physicial_curiosity_big_experiment
|
for_aminations.py
|
Python
|
apache-2.0
| 1,653
|
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import inspect
import os
import re
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import utils as linux_utils
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def comment_rule(rule, comment):
if not cfg.CONF.AGENT.comment_iptables_rules or not comment:
return rule
return '%s -m comment --comment "%s"' % (rule, comment)
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None, comment=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
self.comment = comment
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return comment_rule('-A %s %s' % (chain, self.rule), self.comment)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
return
self.remove_chain(name, wrap)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None,
comment=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag, comment))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False, comment=None):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name,
comment=comment))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name,
comment=comment))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def _get_chain_rules(self, chain, wrap):
chain = get_chain_name(chain, wrap)
return [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
def is_chain_empty(self, chain, wrap=True):
return not self._get_chain_rules(chain, wrap)
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = self._get_chain_rules(chain, wrap)
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False,
root_helper=None, use_ipv6=False, namespace=None,
binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
config.register_iptables_opts(cfg.CONF)
self.use_ipv6 = use_ipv6
self.root_helper = root_helper
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
self.ipv4.update(
{'raw': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'raw': ['PREROUTING',
'OUTPUT']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various nova components. We set it as the last
# chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False,
comment=ic.SNAT_OUT)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def is_chain_empty(self, table, chain, ip_version=4, wrap=True):
try:
requested_table = {4: self.ipv4, 6: self.ipv6}[ip_version][table]
except KeyError:
return True
return requested_table.is_chain_empty(chain, wrap)
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug(_('Got semaphore / lock "%s"'), lock_name)
return self._apply_synchronized()
finally:
LOG.debug(_('Semaphore / lock released "%s"'), lock_name)
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, root_helper=self.root_helper)
all_lines = all_tables.split('\n')
# Traverse tables in sorted order for predictable dump output
for table_name in sorted(tables):
table = tables[table_name]
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
self.execute(args, process_input='\n'.join(all_lines),
root_helper=self.root_helper)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(all_lines)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
all_lines[log_start:log_end],
log_start + 1)
)
LOG.error(_("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug(_("IPTablesManager.apply completed with success"))
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug(_('Unable to find table %s'), table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _find_last_entry(self, filter_list, match_str):
# find a matching entry, starting from the bottom
for s in reversed(filter_list):
s = s.strip()
if match_str in s:
return s
def _modify_rules(self, current_lines, table, table_name):
# Chains are stored as sets to avoid duplicates.
# Sort the output chains here to make their order predictable.
unwrapped_chains = sorted(table.unwrapped_chains)
chains = sorted(table.chains)
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
old = self._find_last_entry(old_filter, chain_str)
if not old:
dup = self._find_last_entry(new_filter, chain_str)
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if old or dup:
chain_str = str(old or dup)
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
old = self._find_last_entry(old_filter, rule_str)
if not old:
dup = self._find_last_entry(new_filter, rule_str)
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if old or dup:
rule_str = str(old or dup)
# backup one index so we write the array correctly
if not old:
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precedence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
if self.use_ipv6:
cmd_tables += [('ip6tables', key)
for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = (self.execute(args,
root_helper=self.root_helper))
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
|
uni2u/neutron
|
neutron/agent/linux/iptables_manager.py
|
Python
|
apache-2.0
| 26,354
|
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DVID specific resource objects that are passed to intern.service.Service methods.
Author:
Luis Rodriguez
"""
from intern.resource.dvid.resource import DVIDResource
from intern.resource.dvid.resource import DataInstanceResource, RepositoryResource
|
jhuapl-boss/intern
|
intern/resource/dvid/__init__.py
|
Python
|
apache-2.0
| 870
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import functools
import unittest
import uuid
import mox
import nose.plugins.skip
from oslo.config import cfg
import stubout
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import service
from cinder import tests
from cinder.tests import fake_flags
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
cfg.BoolOpt('fake_tests',
default=True,
help='should we use everything for testing'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(test_opts)
LOG = logging.getLogger(__name__)
class skip_test(object):
"""Decorator that skips a test."""
# TODO(tr3buchet): remember forever what comstud did here
def __init__(self, msg):
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
raise nose.SkipTest(self.message)
return _skipper
class skip_if(object):
"""Decorator that skips a test if condition is true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
class skip_unless(object):
"""Decorator that skips a test if condition is not true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if not self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode."""
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if FLAGS.fake_tests:
raise unittest.SkipTest('Test cannot be run in fake mode')
else:
return func(*args, **kw)
return _skipper
class TestingException(Exception):
pass
class TestCase(unittest.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
fake_flags.set_defaults(FLAGS)
flags.parse_args([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
tests.reset_db()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.injected = []
self._services = []
FLAGS.set_override('fatal_exception_format_errors', True)
def tearDown(self):
"""Runs after each test method to tear down test environment."""
try:
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
super(TestCase, self).tearDown()
finally:
# Reset any overridden flags
FLAGS.reset()
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
for k, v in kw.iteritems():
FLAGS.set_override(k, v)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'cinder-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = str(d1)
d2str = str(d2)
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' % locals())
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' % locals())
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertable to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" % locals())
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
"""Assert a list of dicts are equivalent."""
def raise_assertion(msg):
L1str = str(L1)
L2str = str(L2)
base_msg = ('List of dictionaries do not match: %(msg)s '
'L1: %(L1str)s L2: %(L2str)s' % locals())
raise AssertionError(base_msg)
L1count = len(L1)
L2count = len(L2)
if L1count != L2count:
raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
'len(L2)=%(L2count)d' % locals())
for d1, d2 in zip(L1, L2):
self.assertDictMatch(d1, d2, approx_equal=approx_equal,
tolerance=tolerance)
def assertSubDictMatch(self, sub_dict, super_dict):
"""Assert a sub_dict is subset of super_dict."""
self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys())))
for k, sub_value in sub_dict.items():
super_value = super_dict[k]
if isinstance(sub_value, dict):
self.assertSubDictMatch(sub_value, super_value)
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
self.assertEqual(sub_value, super_value)
def assertIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' in 'b'"""
try:
f = super(TestCase, self).assertIn
except AttributeError:
self.assertTrue(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
def assertNotIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' NOT in 'b'"""
try:
f = super(TestCase, self).assertNotIn
except AttributeError:
self.assertFalse(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
|
tomasdubec/openstack-cinder
|
cinder/test.py
|
Python
|
apache-2.0
| 9,604
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = tf.contrib.learn.datasets.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = tf.contrib.learn.datasets.base.Dataset(data=iris.data[ids],
target=iris.target[ids])
return iris
def _iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int32)
class LinearClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age': tf.SparseTensor(values=['1'], indices=[[0, 0]], shape=[1, 1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.sparse_column_with_hash_bucket('age', 2)
classifier = tf.contrib.learn.LinearClassifier(
_joint_weight=True,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [150] instead of [150, 1]."""
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = tf.contrib.learn.datasets.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = tf.contrib.layers.real_valued_column('', dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100, 1], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
self.assertEqual(4, len(classifier.weights_))
self.assertEqual(3, len(classifier.bias_))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
def _optimizer():
return tf.train.FtrlOptimizer(learning_rate=0.1)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=_optimizer,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer='Ftrl',
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1], [0], [0], [0]], dtype=tf.float32)
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, target
def _my_metric_op(predictions, targets):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = tf.slice(predictions, [0, 1], [-1, 1])
return tf.reduce_sum(tf.mul(predictions, targets))
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[tf.contrib.layers.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): tf.contrib.metrics.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
tf.contrib.metrics.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn():
return {
'age': tf.constant([[1], [2]]),
}, tf.constant([[.7], [0]], dtype=tf.float32)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predictions_proba = classifier.predict_proba(input_fn=input_fn)
# Prediction probabilities mirror the target column, which proves that the
# classifier learns from float input.
self.assertAllClose(predictions_proba, [[.3, .7], [1., 0.]], atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]])
return features, target
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7)
]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=sparse_features,
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({})))
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age': tf.train.limit_epochs(tf.constant([1]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=['english'], indices=[[0, 0]], shape=[1, 1]),
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(classifier.predict(input_fn=predict_input_fn,
as_iterable=True))
out1_proba = list(classifier.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
del classifier
classifier2 = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
out2_class = list(classifier2.predict(input_fn=predict_input_fn,
as_iterable=True))
out2_proba = list(classifier2.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
classifier = tf.contrib.learn.LinearClassifier(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_target_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean target should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_target_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': tf.constant([[20], [20], [20]]),
'weights': tf.constant([[100], [1], [1]]),
}
target = tf.constant([[1], [0], [0]])
return features, target
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertFalse('centered_bias_weight' in classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language': tf.SparseTensor(values=['hindi'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier_no_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language])
classifier_with_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language],
optimizer=tf.train.FtrlOptimizer(learning_rate=1.0,
l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language': tf.SparseTensor(values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[1], [1], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier = tf.contrib.learn.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'maintenance_cost': tf.constant([[500.0], [200.0]]),
'sq_footage': tf.constant([[800.0], [600.0]]),
'weights': tf.constant([[1.0], [1.0]])
}, tf.constant([[0], [1]])
maintenance_cost = tf.contrib.layers.real_valued_column('maintenance_cost')
sq_footage = tf.contrib.layers.real_valued_column('sq_footage')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'dense_feature': tf.constant([[500.0, 800.0], [200.0, 600.0]])
}, tf.constant([[0], [1]])
dense_feature = tf.contrib.layers.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': tf.constant([[1000.0], [600.0], [700.0]]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
symmetric_l2_regularization=1.0)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.SparseTensor(values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5])
}, tf.constant([[1], [0], [1]])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = tf.contrib.layers.weighted_sparse_column(
country, 'price')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_weighted_by_price],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'language': tf.SparseTensor(values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
'country': tf.SparseTensor(values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[0], [0], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_language],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country],
hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age': tf.constant([[1], [2]]),
'language': tf.SparseTensor(values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}, tf.constant([[1], [0]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[10.]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearRegressor(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=cont_features,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_iris_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_iris_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, target
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(scores['loss'], 0.1875, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
def _input_fn_eval():
# 4 rows, with different weights.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(scores['loss'], 0.4125, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
target = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predictions, target, atol=0.1)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
target = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predictions, target, atol=0.1)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, target
def _my_metric_op(predictions, targets):
return tf.reduce_sum(tf.mul(predictions, targets))
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': tf.contrib.metrics.streaming_mean_squared_error,
'my_metric': _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests that when the key is a tuple, an error is raised.
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'
): tf.contrib.metrics.streaming_mean_squared_error})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict(input_fn=predict_input_fn))
del regressor
regressor2 = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns)
predictions2 = list(regressor2.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(x)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor.weights_.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'x': tf.constant(x),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant(y)
x_column = tf.contrib.layers.real_valued_column('x', dimension=3)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertAllClose([w[0] for w in weights],
regressor.weights_.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [5.0], [7.0]])
}, tf.constant([[1.55], [-1.25], [-3.0]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant([[1.4], [-0.8], [2.6]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
no_l1_reg_weights = regressor.weights_
# Regressor with L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = regressor.weights_
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder': tf.constant([[0.0]]*num_examples),
}, tf.constant([[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = tf.contrib.layers.real_valued_column('place_holder')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[place_holder],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(regressor.get_variable_value('linear/bias_weight')[0],
0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[x] for x in
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half/10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half/10)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[1 if x%10 == 0 else 0] for x in range(half)] +
[[-1 if x%10 == 0 else 0] for x in range(half)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.1, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], -0.1, err=0.05)
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
target = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, target
class FeatureColumnTest(tf.test.TestCase):
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
|
juharris/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/linear_test.py
|
Python
|
apache-2.0
| 57,856
|
# Copyright (C) 2018 RedHat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add os_hidden column to images table"""
from alembic import op
from sqlalchemy import Boolean, Column, sql
# revision identifiers, used by Alembic.
revision = 'rocky_expand01'
down_revision = 'queens_expand01'
branch_labels = None
depends_on = None
def upgrade():
h_col = Column('os_hidden', Boolean, default=False, nullable=False,
server_default=sql.expression.false())
op.add_column('images', h_col)
op.create_index('os_hidden_image_idx', 'images', ['os_hidden'])
|
openstack/glance
|
glance/db/sqlalchemy/alembic_migrations/versions/rocky_expand01_add_os_hidden.py
|
Python
|
apache-2.0
| 1,135
|
"""Converts cplate betas to a BED file."""
import argparse
import os
import re
import pandas as pd
COLUMNS = ['chrom', 'start', 'end', 'name', 'score', 'strand']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--summaries', dest='summaries',
help='cplate summaries file')
parser.add_argument('--gene_number', dest='gene_number', default=-1,
type=int, help='row number for gene')
parser.add_argument('--genes', dest='genes', help='gene definition file')
parser.add_argument('--output', dest='output', default='',
help='output BED file')
return parser.parse_args()
def main():
args = parse_args()
summaries = pd.read_table(args.summaries, delimiter=' ')
# Infer gene number if needed.
gene_number = args.gene_number
if gene_number < 0:
gene_number = (
int(re.search(r'gene(\d+)', args.summaries).group(1)) - 1)
# Infer output path if needed.
output = args.output
if output == '':
output = os.path.splitext(args.summaries)[0] + '_beta.bed'
genes = pd.read_csv(args.genes)
gene = genes.iloc[gene_number]
intervals = []
for i, row in summaries.iterrows():
start = i
end = i + 1
interval = {'start': start,
'end': end,
'name': '.',
'score': row['b'],
'strand': '.',
}
intervals.append(interval)
intervals = pd.DataFrame(intervals)
intervals['chrom'] = gene['Chrom']
intervals['start'] = intervals['start'] + gene['Start']
intervals['end'] = intervals['end'] + gene['Start']
intervals = intervals[COLUMNS]
intervals.to_csv(output, sep='\t', header=False, index=False, quoting=False)
if __name__ == '__main__':
main()
|
awblocker/cplate
|
scripts/betas_to_bed.py
|
Python
|
apache-2.0
| 1,878
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Checkpoint functions for BigMLer
"""
import os
import bigml.api
from bigml.util import console_log
from bigmler.utils import log_message
def is_source_created(path, suffix=""):
"""Checks existence and reads the source id from the source file in the
path directory
"""
source_id = None
try:
with open("%s%ssource%s" % (path, os.sep, suffix)) as source_file:
source_id = source_file.readline().strip()
try:
source_id = bigml.api.get_source_id(source_id)
return True, source_id
except ValueError:
return False, None
except IOError:
return False, None
def is_dataset_created(path, suffix=""):
"""Checks existence and reads the dataset id from the dataset file in
the path directory
"""
dataset_id = None
try:
with open("%s%sdataset%s" % (path, os.sep, suffix)) as dataset_file:
dataset_id = dataset_file.readline().strip()
try:
dataset_id = bigml.api.get_dataset_id(dataset_id)
return True, dataset_id
except ValueError:
return False, None
except IOError:
return False, None
def are_datasets_created(path, number_of_datasets, suffix='parts'):
"""Checks existence and reads the dataset ids from the datasets file in
the path directory
"""
dataset_ids = []
try:
with open("%s%sdataset_%s" % (path, os.sep, suffix)) as datasets_file:
for line in datasets_file:
dataset = line.strip()
try:
dataset_id = bigml.api.get_dataset_id(dataset)
dataset_ids.append(dataset_id)
except ValueError:
return False, dataset_ids
return len(dataset_ids) == number_of_datasets, dataset_ids
except IOError:
return False, dataset_ids
def are_models_created(path, number_of_models):
"""Checks existence and reads the model ids from the models file in the
path directory
"""
model_ids = []
try:
with open("%s%smodels" % (path, os.sep)) as models_file:
for line in models_file:
model = line.strip()
try:
model_id = bigml.api.get_model_id(model)
model_ids.append(model_id)
except ValueError:
return False, model_ids
return len(model_ids) == number_of_models, model_ids
except IOError:
return False, model_ids
def are_predictions_created(predictions_file, number_of_tests):
"""Checks existence and reads the predictions from the predictions file in
the path directory
"""
predictions = file_number_of_lines(predictions_file)
if predictions != number_of_tests:
os.remove(predictions_file)
return False, None
return True, None
def is_evaluation_created(path):
"""Checks existence and reads the evaluation id from the evaluation file
in the path directory
"""
evaluation_id = None
try:
with open("%s%sevaluation" % (path, os.sep)) as evaluation_file:
evaluation_id = evaluation_file.readline().strip()
try:
evaluation_id = bigml.api.get_evaluation_id(evaluation_id)
return True, evaluation_id
except ValueError:
return False, None
except IOError:
return False, None
def are_evaluations_created(path, number_of_evaluations):
"""Checks existence and reads the evaluation ids from the evaluations file
in the path directory and checks the corresponding evaluations
"""
evaluation_ids = []
try:
with open("%s%sevaluations" % (path, os.sep)) as evaluations_file:
for line in evaluations_file:
evaluation = line.strip()
try:
evaluation_id = bigml.api.get_evaluation_id(evaluation)
evaluation_ids.append(evaluation_id)
except ValueError:
return False, evaluation_ids
return len(evaluation_ids) == number_of_evaluations, evaluation_ids
except IOError:
return False, evaluation_ids
def are_ensembles_created(path, number_of_ensembles):
"""Checks and reads the ensembles ids from the ensembles file in the
path directory
"""
ensemble_ids = []
try:
with open("%s%sensembles" % (path, os.sep)) as ensembles_file:
for line in ensembles_file:
ensemble = line.strip()
try:
ensemble_id = bigml.api.get_ensemble_id(ensemble)
ensemble_ids.append(ensemble_id)
except ValueError:
return False, ensemble_ids
return len(ensemble_ids) == number_of_ensembles, ensemble_ids
except IOError:
return False, ensemble_ids
def checkpoint(function, *args, **kwargs):
"""Redirects to each checkpoint function
"""
common_parms = ['debug', 'message', 'log_file', 'console']
debug = kwargs.get('debug', False)
message = kwargs.get('message', None)
log_file = kwargs.get('log_file', None)
console = kwargs.get('console', False)
f_kwargs = {key: value for key, value in list(kwargs.items())
if not key in common_parms}
result = function(*args, **f_kwargs)
if debug:
console_log("Checkpoint: checking %s with args:\n%s\n\nResult:\n%s\n" %
(function.__name__, "\n".join([repr(arg) for arg in args]),
repr(result)))
# resume is the first element in the result tuple
if not result[0] and message is not None:
log_message(message, log_file=log_file, console=console)
return result
def file_number_of_lines(file_name):
"""Counts the number of lines in a file
"""
try:
item = (0, None)
with open(file_name) as file_handler:
for item in enumerate(file_handler):
pass
return item[0] + 1
except IOError:
return 0
def is_batch_prediction_created(path):
"""Checks existence and reads the batch prediction id from the
batch_prediction file in the path directory
"""
batch_prediction_id = None
try:
with open("%s%sbatch_prediction"
% (path, os.sep)) as batch_prediction_file:
batch_prediction_id = batch_prediction_file.readline().strip()
try:
batch_prediction_id = bigml.api.get_batch_prediction_id(
batch_prediction_id)
return True, batch_prediction_id
except ValueError:
return False, None
except IOError:
return False, None
def is_batch_centroid_created(path):
"""Checks existence and reads the batch centroid id from the
batch_centroid file in the path directory
"""
batch_centroid_id = None
try:
with open("%s%sbatch_centroid"
% (path, os.sep)) as batch_prediction_file:
batch_centroid_id = batch_prediction_file.readline().strip()
try:
batch_centroid_id = bigml.api.get_batch_centroid_id(
batch_centroid_id)
return True, batch_centroid_id
except ValueError:
return False, None
except IOError:
return False, None
def are_associations_created(path, number_of_associations):
"""Checks existence and reads the association ids from the associations
file in the path directory
"""
association_ids = []
try:
with open("%s%sassociations" % (path, os.sep)) as associations_file:
for line in associations_file:
association = line.strip()
try:
association_id = bigml.api.get_association_id(association)
association_ids.append(association_id)
except ValueError:
return False, association_ids
return len(association_ids) == number_of_associations, association_ids
except IOError:
return False, association_ids
def are_clusters_created(path, number_of_clusters):
"""Checks existence and reads the cluster ids from the clusters file in the
path directory
"""
cluster_ids = []
try:
with open("%s%sclusters" % (path, os.sep)) as clusters_file:
for line in clusters_file:
cluster = line.strip()
try:
cluster_id = bigml.api.get_cluster_id(cluster)
cluster_ids.append(cluster_id)
except ValueError:
return False, cluster_ids
return len(cluster_ids) == number_of_clusters, cluster_ids
except IOError:
return False, cluster_ids
def is_dataset_exported(filename):
"""Checks the existence of the CSV exported dataset file
"""
try:
with open(filename):
return True
except IOError:
return False
def is_batch_anomaly_score_created(path):
"""Checks existence and reads the batch anomaly score id from the
batch_anomaly_score file in the path directory
"""
batch_anomaly_score_id = None
try:
with open("%s%sbatch_anomaly_score"
% (path, os.sep)) as batch_prediction_file:
batch_anomaly_score_id = batch_prediction_file.readline().strip()
try:
batch_anomaly_score_id = bigml.api.get_batch_anomaly_score_id(
batch_anomaly_score_id)
return True, batch_anomaly_score_id
except ValueError:
return False, None
except IOError:
return False, None
def are_anomalies_created(path, number_of_anomalies):
"""Checks existence and reads the anomaly detector ids from the
anomalies file in the path directory
"""
anomaly_ids = []
try:
with open("%s%sanomalies" % (path, os.sep)) as anomalies_file:
for line in anomalies_file:
anomaly = line.strip()
try:
anomaly_id = bigml.api.get_anomaly_id(anomaly)
anomaly_ids.append(anomaly_id)
except ValueError:
return False, anomaly_ids
return len(anomaly_ids) == number_of_anomalies, anomaly_ids
except IOError:
return False, anomaly_ids
def is_project_created(path):
"""Checks existence and reads project id from the
project file in the path directory
"""
project_id = None
try:
with open("%s%sproject"
% (path, os.sep)) as project_file:
project_id = project_file.readline().strip()
try:
project_id = bigml.api.get_project_id(
project_id)
return True, project_id
except ValueError:
return False, None
except IOError:
return False, None
def are_samples_created(path, number_of_samples):
"""Checks existence and reads the samples ids from the samples file in the
path directory
"""
sample_ids = []
try:
with open("%s%ssamples" % (path, os.sep)) as samples_file:
for line in samples_file:
sample = line.strip()
try:
sample_id = bigml.api.get_sample_id(sample)
sample_ids.append(sample_id)
except ValueError:
return False, sample_ids
return len(sample_ids) == number_of_samples, sample_ids
except IOError:
return False, sample_ids
def are_logistic_regressions_created(path, number_of_logistic_regressions):
"""Checks existence and reads the logistic regression ids
from the logistic regressions file in the
path directory
"""
logistic_ids = []
try:
with open("%s%slogistic_regressions" % (path, os.sep)) as \
logistics_file:
for line in logistics_file:
logistic = line.strip()
try:
logistic_id = bigml.api.get_logistic_regression_id( \
logistic)
logistic_ids.append(logistic_id)
except ValueError:
return False, logistic_ids
return len(logistic_ids) == number_of_logistic_regressions, \
logistic_ids
except IOError:
return False, logistic_ids
def are_linear_regressions_created(path, number_of_linear_regressions):
"""Checks existence and reads the linear regression ids
from the linear regressions file in the
path directory
"""
linear_ids = []
try:
with open("%s%slinear_regressions" % (path, os.sep)) as \
linear_file:
for line in linear_file:
linear = line.strip()
try:
linear_id = bigml.api.get_linear_regression_id( \
linear)
linear_ids.append(linear_id)
except ValueError:
return False, linear_ids
return len(linear_ids) == number_of_linear_regressions, \
linear_ids
except IOError:
return False, linear_ids
def are_scripts_created(path, number_of_scripts):
"""Checks existence and reads the scripts ids from the scripts file in the
path directory
"""
script_ids = []
try:
with open("%s%sscripts" % (path, os.sep)) as scripts_file:
for line in scripts_file:
script = line.strip()
try:
script_id = bigml.api.get_script_id(script)
script_ids.append(script_id)
except ValueError:
return False, script_ids
if len(script_ids) == number_of_scripts:
return True, script_ids
return False, script_ids
except IOError:
return False, script_ids
def is_execution_created(path):
"""Checks existence and reads the execution id from the execution file in
the path directory
"""
execution_id = None
try:
with open("%s%sexecution" % (path, os.sep)) as execution_file:
execution_id = execution_file.readline().strip()
try:
execution_id = bigml.api.get_execution_id(execution_id)
return True, execution_id
except ValueError:
return False, None
except IOError:
return False, None
def is_library_created(path):
"""Checks existence and reads the library id from the library file in
the path directory
"""
library_id = None
try:
with open("%s%slibrary" % (path, os.sep)) as library_file:
library_id = library_file.readline().strip()
try:
library_id = bigml.api.get_library_id(library_id)
return True, library_id
except ValueError:
return False, None
except IOError:
return False, None
def are_topic_models_created(path, number_of_topic_models):
"""Checks existence and reads the topic model ids from the
topic models file in the
path directory
"""
topic_model_ids = []
try:
with open("%s%stopic_models" % (path, os.sep)) as topic_models_file:
for line in topic_models_file:
topic_model = line.strip()
try:
topic_model_id = bigml.api.get_topic_model_id(topic_model)
topic_model_ids.append(topic_model_id)
except ValueError:
return False, topic_model_ids
return len(topic_model_ids) == number_of_topic_models, topic_model_ids
except IOError:
return False, topic_model_ids
def is_batch_topic_distribution_created(path):
"""Checks existence and reads the batch topic distribution id from the
batch_topic_distribution file in the path directory
"""
batch_topic_distribution_id = None
try:
with open("%s%sbatch_topic_distribution"
% (path, os.sep)) as batch_prediction_file:
batch_topic_distribution_id = \
batch_prediction_file.readline().strip()
try:
batch_topic_distribution_id = \
bigml.api.get_batch_topic_distribution_id( \
batch_topic_distribution_id)
return True, batch_topic_distribution_id
except ValueError:
return False, None
except IOError:
return False, None
def are_time_series_created(path, number_of_time_series):
"""Checks existence and reads the time-series ids from the
time-series file in the
path directory
"""
time_series_ids = []
try:
with open("%s%stime_series" % (path, os.sep)) as time_series_file:
for line in time_series_file:
time_series = line.strip()
try:
time_series_id = bigml.api.get_time_series_id(time_series)
time_series_ids.append(time_series_id)
except ValueError:
return False, time_series_ids
return len(time_series_ids) == number_of_time_series, time_series_ids
except IOError:
return False, time_series_ids
def are_deepnets_created(path, number_of_deepnets):
"""Checks existence and reads the deepnet ids
from the deepnets file in the
path directory
"""
deepnet_ids = []
try:
with open("%s%sdeepnets" % (path, os.sep)) as \
deepnets_file:
for line in deepnets_file:
deepnet = line.strip()
try:
deepnet_id = bigml.api.get_deepnet_id( \
deepnet)
deepnet_ids.append(deepnet_id)
except ValueError:
return False, deepnet_ids
return len(deepnet_ids) == number_of_deepnets, \
deepnet_ids
except IOError:
return False, deepnet_ids
def is_external_connector_created(path):
"""Checks existence and reads external connector id from the
external_connector file in the path directory
"""
external_connector_id = None
try:
with open("%s%sexternal_connector"
% (path, os.sep)) as connector_file:
external_connector_id = connector_file.readline().strip()
try:
external_connector_id = bigml.api.get_external_connector_id(
external_connector_id)
return True, external_connector_id
except ValueError:
return False, None
except IOError:
return False, None
def is_batch_projection_created(path):
"""Checks existence and reads the batch projection id from the
batch_projection file in the path directory
"""
batch_projection_id = None
try:
with open("%s%sbatch_projection"
% (path, os.sep)) as batch_prediction_file:
batch_projection_id = batch_prediction_file.readline().strip()
try:
batch_projection_id = bigml.api.get_batch_projection_id(
batch_projection_id)
return True, batch_projection_id
except ValueError:
return False, None
except IOError:
return False, None
def is_forecast_created(path):
"""Checks existence and reads the forecast id from the forecast file
in the path directory
"""
forecast_id = None
try:
with open("%s%sforecast" % (path, os.sep)) as forecast_file:
forecast_id = forecast_file.readline().strip()
try:
forecast_id = bigml.api.get_forecast_id(forecast_id)
return True, forecast_id
except ValueError:
return False, None
except IOError:
return False, None
def are_fusions_created(path, number_of_fusions):
"""Checks existence and reads the fusion ids
from the fusions file in the
path directory
"""
fusion_ids = []
try:
with open("%s%sfusions" % (path, os.sep)) as \
fusions_file:
for line in fusions_file:
fusion = line.strip()
try:
fusion_id = bigml.api.get_fusion_id( \
fusion)
fusion_ids.append(fusion_id)
except ValueError:
return False, fusion_ids
return len(fusion_ids) == number_of_fusions, \
fusion_ids
except IOError:
return False, fusion_ids
def are_pcas_created(path, number_of_pcas):
"""Checks existence and reads the PCA ids
from the PCAs file in the
path directory
"""
pca_ids = []
try:
with open("%s%spcas" % (path, os.sep)) as \
pcas_file:
for line in pcas_file:
pca = line.strip()
try:
pca_id = bigml.api.get_pca_id( \
pca)
pca_ids.append(pca_id)
except ValueError:
return False, pca_ids
return len(pca_ids) == number_of_pcas, \
pca_ids
except IOError:
return False, pca_ids
|
jaor/bigmler
|
bigmler/checkpoint.py
|
Python
|
apache-2.0
| 22,228
|
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia_lib.common import constants as lib_constants
from wsme import types as wtypes
from octavia.api.common import types
from octavia.api.v2.types import health_monitor
from octavia.api.v2.types import member
from octavia.common import constants
class SessionPersistenceResponse(types.BaseType):
"""Defines which attributes are to be shown on any response."""
type = wtypes.wsattr(wtypes.text)
cookie_name = wtypes.wsattr(wtypes.text)
persistence_timeout = wtypes.wsattr(wtypes.IntegerType())
persistence_granularity = wtypes.wsattr(types.IPAddressType())
class SessionPersistencePOST(types.BaseType):
"""Defines mandatory and optional attributes of a POST request."""
type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES),
mandatory=True)
cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255),
default=None)
persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None)
persistence_granularity = wtypes.wsattr(types.IPAddressType(),
default=None)
class SessionPersistencePUT(types.BaseType):
"""Defines attributes that are acceptable of a PUT request."""
type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES))
cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255),
default=None)
persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None)
persistence_granularity = wtypes.wsattr(types.IPAddressType(),
default=None)
class BasePoolType(types.BaseType):
_type_to_model_map = {'admin_state_up': 'enabled',
'healthmonitor': 'health_monitor',
'healthmonitor_id': 'health_monitor.id',
'tls_container_ref': 'tls_certificate_id',
'ca_tls_container_ref': 'ca_tls_certificate_id',
'crl_container_ref': 'crl_container_id'}
_child_map = {'health_monitor': {'id': 'healthmonitor_id'}}
class PoolResponse(BasePoolType):
"""Defines which attributes are to be shown on any response."""
id = wtypes.wsattr(wtypes.UuidType())
name = wtypes.wsattr(wtypes.StringType())
description = wtypes.wsattr(wtypes.StringType())
provisioning_status = wtypes.wsattr(wtypes.StringType())
operating_status = wtypes.wsattr(wtypes.StringType())
admin_state_up = wtypes.wsattr(bool)
protocol = wtypes.wsattr(wtypes.text)
lb_algorithm = wtypes.wsattr(wtypes.text)
session_persistence = wtypes.wsattr(SessionPersistenceResponse)
project_id = wtypes.wsattr(wtypes.StringType())
loadbalancers = wtypes.wsattr([types.IdOnlyType])
listeners = wtypes.wsattr([types.IdOnlyType])
created_at = wtypes.wsattr(wtypes.datetime.datetime)
updated_at = wtypes.wsattr(wtypes.datetime.datetime)
healthmonitor_id = wtypes.wsattr(wtypes.UuidType())
members = wtypes.wsattr([types.IdOnlyType])
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType()))
tls_container_ref = wtypes.wsattr(wtypes.StringType())
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType())
crl_container_ref = wtypes.wsattr(wtypes.StringType())
tls_enabled = wtypes.wsattr(bool)
tls_ciphers = wtypes.wsattr(wtypes.StringType())
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType()))
alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType()))
@classmethod
def from_data_model(cls, data_model, children=False):
pool = super(PoolResponse, cls).from_data_model(
data_model, children=children)
if data_model.session_persistence:
pool.session_persistence = (
SessionPersistenceResponse.from_data_model(
data_model.session_persistence))
if cls._full_response():
del pool.loadbalancers
member_model = member.MemberFullResponse
if pool.healthmonitor:
pool.healthmonitor = (
health_monitor.HealthMonitorFullResponse
.from_data_model(data_model.health_monitor))
else:
if data_model.load_balancer:
pool.loadbalancers = [
types.IdOnlyType.from_data_model(data_model.load_balancer)]
else:
pool.loadbalancers = []
member_model = types.IdOnlyType
if data_model.health_monitor:
pool.healthmonitor_id = data_model.health_monitor.id
pool.listeners = [
types.IdOnlyType.from_data_model(i) for i in data_model.listeners]
pool.members = [
member_model.from_data_model(i) for i in data_model.members]
pool.tls_versions = data_model.tls_versions
pool.alpn_protocols = data_model.alpn_protocols
return pool
class PoolFullResponse(PoolResponse):
@classmethod
def _full_response(cls):
return True
members = wtypes.wsattr([member.MemberFullResponse])
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorFullResponse)
class PoolRootResponse(types.BaseType):
pool = wtypes.wsattr(PoolResponse)
class PoolsRootResponse(types.BaseType):
pools = wtypes.wsattr([PoolResponse])
pools_links = wtypes.wsattr([types.PageType])
class PoolPOST(BasePoolType):
"""Defines mandatory and optional attributes of a POST request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool, default=True)
listener_id = wtypes.wsattr(wtypes.UuidType())
loadbalancer_id = wtypes.wsattr(wtypes.UuidType())
protocol = wtypes.wsattr(
wtypes.Enum(str, *lib_constants.POOL_SUPPORTED_PROTOCOLS),
mandatory=True)
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS),
mandatory=True)
session_persistence = wtypes.wsattr(SessionPersistencePOST)
# TODO(johnsom) Remove after deprecation (R series)
project_id = wtypes.wsattr(wtypes.StringType(max_length=36))
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate)
members = wtypes.wsattr([member.MemberSingleCreate])
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255)))
tls_container_ref = wtypes.wsattr(
wtypes.StringType(max_length=255))
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
tls_enabled = wtypes.wsattr(bool, default=False)
tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048))
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(
max_length=32)))
alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType()))
class PoolRootPOST(types.BaseType):
pool = wtypes.wsattr(PoolPOST)
class PoolPUT(BasePoolType):
"""Defines attributes that are acceptable of a PUT request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool)
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS))
session_persistence = wtypes.wsattr(SessionPersistencePUT)
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255)))
tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
tls_enabled = wtypes.wsattr(bool)
tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048))
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(
max_length=32)))
alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType()))
class PoolRootPut(types.BaseType):
pool = wtypes.wsattr(PoolPUT)
class PoolSingleCreate(BasePoolType):
"""Defines mandatory and optional attributes of a POST request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool, default=True)
protocol = wtypes.wsattr(
wtypes.Enum(str, *lib_constants.POOL_SUPPORTED_PROTOCOLS))
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS))
session_persistence = wtypes.wsattr(SessionPersistencePOST)
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate)
members = wtypes.wsattr([member.MemberSingleCreate])
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255)))
tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
tls_enabled = wtypes.wsattr(bool, default=False)
tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048))
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(
max_length=32)))
alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType()))
class PoolStatusResponse(BasePoolType):
"""Defines which attributes are to be shown on status response."""
id = wtypes.wsattr(wtypes.UuidType())
name = wtypes.wsattr(wtypes.StringType())
provisioning_status = wtypes.wsattr(wtypes.StringType())
operating_status = wtypes.wsattr(wtypes.StringType())
health_monitor = wtypes.wsattr(
health_monitor.HealthMonitorStatusResponse)
members = wtypes.wsattr([member.MemberStatusResponse])
@classmethod
def from_data_model(cls, data_model, children=False):
pool = super(PoolStatusResponse, cls).from_data_model(
data_model, children=children)
member_model = member.MemberStatusResponse
if data_model.health_monitor:
pool.health_monitor = (
health_monitor.HealthMonitorStatusResponse.from_data_model(
data_model.health_monitor))
pool.members = [
member_model.from_data_model(i) for i in data_model.members]
return pool
|
openstack/octavia
|
octavia/api/v2/types/pool.py
|
Python
|
apache-2.0
| 11,027
|
from pyspark import SparkContext, SparkConf, SparkFiles
from pyspark.sql import SQLContext, Row
import ConfigParser as configparser
from subprocess import Popen, PIPE
from datetime import datetime
from vina_utils import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter, get_receptor_from_receptor_ligand_model, get_model_from_receptor_ligand_model
import os, sys
from os_utils import preparing_path
from gromacs_utils import get_value_from_xvg_sasa
from pdb_io import replace_chain_atom_line
def sorting_buried_area_ligand(sc, buried_areaRDD):
sqlCtx = SQLContext(sc)
buried_areaRDD = sc.parallelize(buried_areaRDD)
buried_areaRDD = buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_lig_rec=float(p[1]), buried_lig_rec_perc=float(p[2]), buried_lig_lig=float(p[3]), buried_lig_lig_perc=float(p[4]) ) ) #receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]),
buried_area_table = sqlCtx.createDataFrame(buried_areaRDD)
buried_area_table.registerTempTable("buried_area_ligand")
buried_area_sorted_by_lig_rec_perc = sqlCtx.sql("SELECT * FROM buried_area_ligand ORDER BY buried_lig_rec DESC")
return buried_area_sorted_by_lig_rec_perc
def save_buried_area_ligand(path_file_buried_area, buried_area_sorted_by_res_buried_area_perc):
f_buried_area = open(path_file_buried_area,"w")
for area in buried_area_sorted_by_res_buried_area_perc:
#splited_line = area[0].split("_-_")
#aux_recep = splited_line[0]
#aux_lig = str(splited_line[1])
#preparing receptor
#receptor = str(str(aux_recep).replace("compl_", " ")).strip()
#preparing ligand
#splited_aux_lig = str(aux_lig).split(get_separator_filename_mode())
#ligand = splited_aux_lig[0]
#model = splited_aux_lig[1]
pose = area[0]
buried_lig_rec = "{:.4f}".format(area[1])
buried_lig_rec_perc = "{:.4f}".format(area[2])
buried_lig_lig = "{:.4f}".format(area[3])
buried_lig_lig_perc = "{:.4f}".format(area[4])
#line = receptor+"\t"+ligand+"\t"+model+"\t"+str(buried_lig_rec)+"\t"+str(buried_lig_rec_perc)+"\t"+str(buried_lig_lig)+"\t"+str(buried_lig_lig_perc)+"\n"
line = str(pose)+"\t"+str(buried_lig_rec)+"\t"+str(buried_lig_rec_perc)+"\t"+str(buried_lig_lig)+"\t"+str(buried_lig_lig_perc)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def save_buried_area_ligand_sort(path_file_buried_area, buried_area):
f_buried_area = open(path_file_buried_area,"w")
line = "# buried_area_lig[nm2]\tburied_area_lig[%]\tburied_area_lig-lig[%]\tpose"+"\n"
f_buried_area.write(line)
for area in buried_area:
#receptor = area[0]
#ligand = area[1]
#model = area[2]
pose = str(str(area[0]).replace("compl_", " ")).strip()
buried_lig_rec = "{:.4f}".format(area[1])
buried_lig_rec_perc = "{:.4f}".format(area[2])
buried_lig_lig = "{:.4f}".format(area[3])
buried_lig_lig_perc = "{:.4f}".format(area[4])
#line = receptor+"\t"+ligand+"\t"+str(model)+"\t"+str(buried_lig_rec)+"\t"+str(buried_lig_rec_perc)+"\t"+str(buried_lig_lig)+"\t"+str(buried_lig_lig_perc)+"\n"
line = str(buried_lig_rec)+"\t"+str(buried_lig_rec_perc)+"\t"+str(buried_lig_lig_perc)+"\t"+str(pose)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def loading_lines_from_ligandArea_files(line):
line_splited = str(line).split()
#line_ret = ( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5]), float(line_splited[6]) )
line_ret = ( str(line_splited[0]), float(line_splited[1]), float(line_splited[2]), float(line_splited[3]), float(line_splited[4]) )
return line_ret
def get_files_ligandArea(mypath):
only_mol2_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".ligandArea"):
f_path = os.path.join(root,file)
only_mol2_file.append(f_path)
return only_mol2_file
def get_residues_receptor_from_ndx_files(f_name_ndx):
list_res = []
f_ndx = open(f_name_ndx,"r")
for line in f_ndx:
if line.find("rec_") > -1:
line = line.translate(None, "]")
line = line.translate(None, "[")
line = line.strip()
res = line.split("_")
res = res[1]+"_"+res[2]
list_res.append(res)
f_ndx.close()
return list_res
def save_log(finish_time, start_time):
log_file_name = 'vs_buried_areas_ligand_receptor.log'
current_path = os.getcwd()
path_file = os.path.join(current_path, log_file_name)
log_file = open(path_file, 'w')
diff_time = finish_time - start_time
msg = 'Starting ' + str(start_time) +'\n'
log_file.write(msg)
msg = 'Finishing ' + str(finish_time) +'\n'
log_file.write(msg)
msg = 'Time Execution (seconds): ' + str(diff_time.total_seconds()) +'\n'
log_file.write(msg)
def main():
config = configparser.ConfigParser()
config.read('config.ini')
#Path for Gromacs project
gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path'))
#Path where PDB ligand are - They are NOT participated in docking
pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path')
#Path that contains all files for analysis
path_analysis = config.get('DEFAULT', 'path_analysis')
#Path where all pdb receptor are
path_receptor_pdb = config.get('DEFAULT', 'pdb_path')
#Path for saving pdb files of models generated by VS
path_analysis_pdb = get_directory_pdb_analysis(path_analysis)
# Create SPARK config
maxResultSize = str(config.get('SPARK', 'maxResultSize'))
conf = (SparkConf().set("spark.driver.maxResultSize", maxResultSize))
# Create context
sc = SparkContext(conf=conf)
#Adding Python Source file
#Path for drugdesign project
path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign')
sc.addPyFile(os.path.join(path_spark_drugdesign,"vina_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"os_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"gromacs_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"pdb_io.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"json_utils.py"))
#Adding bash scripts
sc.addFile(os.path.join(path_spark_drugdesign,"make_ndx_buried_area_ligand.sh"))
#Parameters form command line
#Indicates probe. Example: 0.14
probe = float(sys.argv[1])
#Indicates ndots. Example: 24
ndots = int(sys.argv[2])
#Broadcast
path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb)
gromacs_path = sc.broadcast(gromacs_path)
pdb_ligand_path = sc.broadcast(pdb_ligand_path)
probe = sc.broadcast(probe)
ndots = sc.broadcast(ndots)
start_time = datetime.now()
os.environ["GMX_MAXBACKUP"]="-1"
#Loading all PDB receptor files into memory
list_all_pdb_receptor_files_path = []
all_receptor_for_complex = get_files_pdb(path_receptor_pdb)
for receptor in all_receptor_for_complex:
list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor))
for pdb_receptor_files in list_all_pdb_receptor_files_path:
#Getting receptor name by fully path
base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0]))
#PDB file loaded into memory is sent by broadcast
pdb_file_receptor = pdb_receptor_files[1]
pdb_file_receptor = sc.broadcast(pdb_file_receptor)
#Loading PDB model files based on receptor into memory
base_file_name_receptor_for_filter = base_file_name_receptor+"_-_"
all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter)
all_model_for_complexRDD = sc.parallelize(all_model_for_complex)
all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect()
# ********** Starting function **********************************************************
def save_model_receptor(list_receptor_model_file):
receptor_file = pdb_file_receptor.value #Obtained from broadcast
model_file = list_receptor_model_file[0]
full_path_for_save_complex = list_receptor_model_file[1]
#Open file for writting the complex
f_compl = open(full_path_for_save_complex, "w")
#Insert lines of receptor
for item in receptor_file:
f_compl.write(item)
#Insert lines of model and insert Z chain
for item in model_file:
item = replace_chain_atom_line(item,"d","z")
f_compl.write(item)
f_compl.close()
# ********** Finish function **********************************************************
# ********** Starting function **********************************************************
def compute_buried_area_ligand(pdb_complex):
chZ = "chZ"
buried_lig_rec_perc = -1.0
buried_lig_rec = -1.0
buried_lig_lig = -1.0
buried_lig_lig_perc = -1.0
base_name = get_name_model_pdb(pdb_complex)
ligand_name = get_ligand_from_receptor_ligand_model(base_name)
receptor_name = get_receptor_from_receptor_ligand_model(base_name)
pose = get_model_from_receptor_ligand_model(base_name)
pdb_before_vs = os.path.join(pdb_ligand_path.value,ligand_name+".pdb")
#ndx files
f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+".ndx")
#xvg files
xvg_temp_sasa_lig_pose = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_lig_pose"+".xvg")
xvg_temp_sasa_lig_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_lig_complex"+".xvg")
xvg_temp_sasa_lig_min = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_lig_min"+".xvg")
# Creates a selection with the residues that are closer than 6A to the ligand
script_make_ndx_buried_area_ligand = SparkFiles.get("make_ndx_buried_area_ligand.sh") #Getting bash script that was copied by addFile command
command = script_make_ndx_buried_area_ligand + " " + gromacs_path.value + " "+ pdb_complex + " "+ f_ndx + " "+ xvg_temp_sasa_lig_pose + " "+ str(probe.value) + " "+ str(ndots.value) + " "+ xvg_temp_sasa_lig_complex + " "+ pdb_before_vs + " "+ xvg_temp_sasa_lig_min
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
try:
# SASA of the isolated ligand in the pose conformation
sasa_lig_pose = get_value_from_xvg_sasa(xvg_temp_sasa_lig_pose)
# SASA of the complexed ligand in the pose conformation
sasa_lig_complex = get_value_from_xvg_sasa(xvg_temp_sasa_lig_complex)
# SASA of the isolated ligand in its energy-minimized conformation. Only for carbohydrates!
sasa_lig_min = get_value_from_xvg_sasa(xvg_temp_sasa_lig_min)
# Area of the ligand which is buried in the receptor
buried_lig_rec = sasa_lig_pose - sasa_lig_complex
buried_lig_rec_perc = buried_lig_rec / sasa_lig_pose
# Area of the ligand in the pose conformation which is buried in itself when compared to the energy-minimized conformation
buried_lig_lig = sasa_lig_min - sasa_lig_pose
buried_lig_lig_perc = buried_lig_lig / sasa_lig_min
returned_list = (base_name, buried_lig_rec, buried_lig_rec_perc, buried_lig_lig, buried_lig_lig_perc)
#Deleting files
os.remove(f_ndx)
os.remove(xvg_temp_sasa_lig_pose)
os.remove(xvg_temp_sasa_lig_complex)
os.remove(xvg_temp_sasa_lig_min)
return returned_list
except:
return (base_name, float(0.0), float(0.0), float(0.0), float(0.0))
# ********** Finish function **********************************************************
# ********** Starting function **********************************************************
def build_list_model_for_complex(model):
full_path_model = model[0]
model_file = model[1]
path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained from broadcast
#Building complex file based on model file name
base_name_model = get_name_model_pdb(full_path_model)
complex_name = "compl_"+base_name_model+".pdb"
full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name)
list_receptor_model_file = (model_file, full_path_for_save_complex)
save_model_receptor(list_receptor_model_file)
list_ret = compute_buried_area_ligand(full_path_for_save_complex)
os.remove(full_path_for_save_complex)
return list_ret
# ********** Finish function **********************************************************
all_model_filesRDD = sc.parallelize(all_model_filesRDD)
all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect()
#Saving buried area of residue receptor
full_area_file = os.path.join(path_analysis,base_file_name_receptor+".ligandArea")
save_buried_area_ligand(full_area_file, all_model_filesRDD)
#Loading all area file
all_area_file = os.path.join(path_analysis,"*.ligandArea")
buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_ligandArea_files).collect()
#Sorting by buried_lig_lig column
buried_area_sorted_by_buried_lig_rec = sorting_buried_area_ligand(sc, buried_areaRDD)
buried_area_sorted_by_buried_lig_rec = buried_area_sorted_by_buried_lig_rec.map(lambda p: (p.pose, p.buried_lig_rec, p.buried_lig_rec_perc, p.buried_lig_lig, p.buried_lig_lig_perc) ).collect() #p.receptor, p.ligand, p.model
#Saving buried area ligand file
path_file_buried_area = os.path.join(path_analysis, "summary_buried_area_ligand.dat")
save_buried_area_ligand_sort(path_file_buried_area, buried_area_sorted_by_buried_lig_rec)
#Removing all area files
all_area_files = get_files_ligandArea(path_analysis)
for area_file in all_area_files:
os.remove(area_file)
finish_time = datetime.now()
save_log(finish_time, start_time)
main()
|
rodrigofaccioli/drugdesign
|
virtualscreening/vina/spark/buried_area_ligand.py
|
Python
|
apache-2.0
| 13,488
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cassandra import InvalidRequest
from cassandra.cluster import Cluster
from cassandra.cluster import NoHostAvailable
from cassandra.cqlengine import columns, CQLEngineException
from cassandra.cqlengine import connection as conn
from cassandra.cqlengine.management import drop_keyspace, sync_table, drop_table, create_keyspace_simple
from cassandra.cqlengine.models import Model, QuerySetDescriptor
from cassandra.cqlengine.query import ContextQuery, BatchQuery, ModelQuerySet
from tests.integration.cqlengine import setup_connection, DEFAULT_KEYSPACE
from tests.integration.cqlengine.base import BaseCassEngTestCase
from tests.integration.cqlengine.query import test_queryset
class TestModel(Model):
__keyspace__ = 'ks1'
partition = columns.Integer(primary_key=True)
cluster = columns.Integer(primary_key=True)
count = columns.Integer()
text = columns.Text()
class AnotherTestModel(Model):
__keyspace__ = 'ks1'
partition = columns.Integer(primary_key=True)
cluster = columns.Integer(primary_key=True)
count = columns.Integer()
text = columns.Text()
class ContextQueryConnectionTests(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(ContextQueryConnectionTests, cls).setUpClass()
create_keyspace_simple('ks1', 1)
conn.unregister_connection('default')
conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True)
conn.register_connection('cluster', ['127.0.0.1'])
with ContextQuery(TestModel, connection='cluster') as tm:
sync_table(tm)
@classmethod
def tearDownClass(cls):
super(ContextQueryConnectionTests, cls).tearDownClass()
with ContextQuery(TestModel, connection='cluster') as tm:
drop_table(tm)
drop_keyspace('ks1', connections=['cluster'])
# reset the default connection
conn.unregister_connection('fake_cluster')
conn.unregister_connection('cluster')
setup_connection(DEFAULT_KEYSPACE)
def setUp(self):
super(BaseCassEngTestCase, self).setUp()
def test_context_connection_priority(self):
"""
Tests to ensure the proper connection priority is honored.
Explicit connection should have higest priority,
Followed by context query connection
Default connection should be honored last.
@since 3.7
@jira_ticket PYTHON-613
@expected_result priorities should be honored
@test_category object_mapper
"""
# model keyspace write/read
# Set the default connection on the Model
TestModel.__connection__ = 'cluster'
with ContextQuery(TestModel) as tm:
tm.objects.create(partition=1, cluster=1)
# ContextQuery connection should have priority over default one
with ContextQuery(TestModel, connection='fake_cluster') as tm:
with self.assertRaises(NoHostAvailable):
tm.objects.create(partition=1, cluster=1)
# Explicit connection should have priority over ContextQuery one
with ContextQuery(TestModel, connection='fake_cluster') as tm:
tm.objects.using(connection='cluster').create(partition=1, cluster=1)
# Reset the default conn of the model
TestModel.__connection__ = None
# No model connection and an invalid default connection
with ContextQuery(TestModel) as tm:
with self.assertRaises(NoHostAvailable):
tm.objects.create(partition=1, cluster=1)
def test_context_connection_with_keyspace(self):
"""
Tests to ensure keyspace param is honored
@since 3.7
@jira_ticket PYTHON-613
@expected_result Invalid request is thrown
@test_category object_mapper
"""
# ks2 doesn't exist
with ContextQuery(TestModel, connection='cluster', keyspace='ks2') as tm:
with self.assertRaises(InvalidRequest):
tm.objects.create(partition=1, cluster=1)
class ManagementConnectionTests(BaseCassEngTestCase):
keyspaces = ['ks1', 'ks2']
conns = ['cluster']
@classmethod
def setUpClass(cls):
super(ManagementConnectionTests, cls).setUpClass()
conn.unregister_connection('default')
conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True)
conn.register_connection('cluster', ['127.0.0.1'])
@classmethod
def tearDownClass(cls):
super(ManagementConnectionTests, cls).tearDownClass()
# reset the default connection
conn.unregister_connection('fake_cluster')
conn.unregister_connection('cluster')
setup_connection(DEFAULT_KEYSPACE)
def setUp(self):
super(BaseCassEngTestCase, self).setUp()
def test_create_drop_keyspace(self):
"""
Tests drop and create keyspace with connections explicitly set
@since 3.7
@jira_ticket PYTHON-613
@expected_result keyspaces should be created and dropped
@test_category object_mapper
"""
# No connection (default is fake)
with self.assertRaises(NoHostAvailable):
create_keyspace_simple(self.keyspaces[0], 1)
# Explicit connections
for ks in self.keyspaces:
create_keyspace_simple(ks, 1, connections=self.conns)
for ks in self.keyspaces:
drop_keyspace(ks, connections=self.conns)
def test_create_drop_table(self):
"""
Tests drop and create Table with connections explicitly set
@since 3.7
@jira_ticket PYTHON-613
@expected_result Tables should be created and dropped
@test_category object_mapper
"""
for ks in self.keyspaces:
create_keyspace_simple(ks, 1, connections=self.conns)
# No connection (default is fake)
with self.assertRaises(NoHostAvailable):
sync_table(TestModel)
# Explicit connections
sync_table(TestModel, connections=self.conns)
# Explicit drop
drop_table(TestModel, connections=self.conns)
# Model connection
TestModel.__connection__ = 'cluster'
sync_table(TestModel)
TestModel.__connection__ = None
# No connection (default is fake)
with self.assertRaises(NoHostAvailable):
drop_table(TestModel)
# Model connection
TestModel.__connection__ = 'cluster'
drop_table(TestModel)
TestModel.__connection__ = None
# Model connection
for ks in self.keyspaces:
drop_keyspace(ks, connections=self.conns)
def test_connection_creation_from_session(self):
"""
Test to ensure that you can register a connection from a session
@since 3.8
@jira_ticket PYTHON-649
@expected_result queries should execute appropriately
@test_category object_mapper
"""
session = Cluster(['127.0.0.1']).connect()
connection_name = 'from_session'
conn.register_connection(connection_name, session=session)
self.addCleanup(conn.unregister_connection, connection_name)
def test_connection_param_validation(self):
"""
Test to validate that invalid parameter combinations for registering connections via session are not tolerated
@since 3.8
@jira_ticket PYTHON-649
@expected_result queries should execute appropriately
@test_category object_mapper
"""
session = Cluster(['127.0.0.1']).connect()
with self.assertRaises(CQLEngineException):
conn.register_connection("bad_coonection1", session=session, consistency="not_null")
with self.assertRaises(CQLEngineException):
conn.register_connection("bad_coonection2", session=session, lazy_connect="not_null")
with self.assertRaises(CQLEngineException):
conn.register_connection("bad_coonection3", session=session, retry_connect="not_null")
with self.assertRaises(CQLEngineException):
conn.register_connection("bad_coonection4", session=session, cluster_options="not_null")
with self.assertRaises(CQLEngineException):
conn.register_connection("bad_coonection5", hosts="not_null", session=session)
class BatchQueryConnectionTests(BaseCassEngTestCase):
conns = ['cluster']
@classmethod
def setUpClass(cls):
super(BatchQueryConnectionTests, cls).setUpClass()
create_keyspace_simple('ks1', 1)
sync_table(TestModel)
sync_table(AnotherTestModel)
conn.unregister_connection('default')
conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True)
conn.register_connection('cluster', ['127.0.0.1'])
@classmethod
def tearDownClass(cls):
super(BatchQueryConnectionTests, cls).tearDownClass()
# reset the default connection
conn.unregister_connection('fake_cluster')
conn.unregister_connection('cluster')
setup_connection(DEFAULT_KEYSPACE)
drop_keyspace('ks1')
def setUp(self):
super(BaseCassEngTestCase, self).setUp()
def test_basic_batch_query(self):
"""
Test Batch queries with connections explicitly set
@since 3.7
@jira_ticket PYTHON-613
@expected_result queries should execute appropriately
@test_category object_mapper
"""
# No connection with a QuerySet (default is a fake one)
with self.assertRaises(NoHostAvailable):
with BatchQuery() as b:
TestModel.objects.batch(b).create(partition=1, cluster=1)
# Explicit connection with a QuerySet
with BatchQuery(connection='cluster') as b:
TestModel.objects.batch(b).create(partition=1, cluster=1)
# Get an object from the BD
with ContextQuery(TestModel, connection='cluster') as tm:
obj = tm.objects.get(partition=1, cluster=1)
obj.__connection__ = None
# No connection with a model (default is a fake one)
with self.assertRaises(NoHostAvailable):
with BatchQuery() as b:
obj.count = 2
obj.batch(b).save()
# Explicit connection with a model
with BatchQuery(connection='cluster') as b:
obj.count = 2
obj.batch(b).save()
def test_batch_query_different_connection(self):
"""
Test BatchQuery with Models that have a different connection
@since 3.7
@jira_ticket PYTHON-613
@expected_result queries should execute appropriately
@test_category object_mapper
"""
# Testing on a model class
TestModel.__connection__ = 'cluster'
AnotherTestModel.__connection__ = 'cluster2'
with self.assertRaises(CQLEngineException):
with BatchQuery() as b:
TestModel.objects.batch(b).create(partition=1, cluster=1)
AnotherTestModel.objects.batch(b).create(partition=1, cluster=1)
TestModel.__connection__ = None
AnotherTestModel.__connection__ = None
with BatchQuery(connection='cluster') as b:
TestModel.objects.batch(b).create(partition=1, cluster=1)
AnotherTestModel.objects.batch(b).create(partition=1, cluster=1)
# Testing on a model instance
with ContextQuery(TestModel, AnotherTestModel, connection='cluster') as (tm, atm):
obj1 = tm.objects.get(partition=1, cluster=1)
obj2 = atm.objects.get(partition=1, cluster=1)
obj1.__connection__ = 'cluster'
obj2.__connection__ = 'cluster2'
obj1.count = 4
obj2.count = 4
with self.assertRaises(CQLEngineException):
with BatchQuery() as b:
obj1.batch(b).save()
obj2.batch(b).save()
def test_batch_query_connection_override(self):
"""
Test that we cannot override a BatchQuery connection per model
@since 3.7
@jira_ticket PYTHON-613
@expected_result Proper exceptions should be raised
@test_category object_mapper
"""
with self.assertRaises(CQLEngineException):
with BatchQuery(connection='cluster') as b:
TestModel.batch(b).using(connection='test').save()
with self.assertRaises(CQLEngineException):
with BatchQuery(connection='cluster') as b:
TestModel.using(connection='test').batch(b).save()
with ContextQuery(TestModel, AnotherTestModel, connection='cluster') as (tm, atm):
obj1 = tm.objects.get(partition=1, cluster=1)
obj1.__connection__ = None
with self.assertRaises(CQLEngineException):
with BatchQuery(connection='cluster') as b:
obj1.using(connection='test').batch(b).save()
with self.assertRaises(CQLEngineException):
with BatchQuery(connection='cluster') as b:
obj1.batch(b).using(connection='test').save()
class UsingDescriptorTests(BaseCassEngTestCase):
conns = ['cluster']
keyspaces = ['ks1', 'ks2']
@classmethod
def setUpClass(cls):
super(UsingDescriptorTests, cls).setUpClass()
conn.unregister_connection('default')
conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True)
conn.register_connection('cluster', ['127.0.0.1'])
@classmethod
def tearDownClass(cls):
super(UsingDescriptorTests, cls).tearDownClass()
# reset the default connection
conn.unregister_connection('fake_cluster')
conn.unregister_connection('cluster')
setup_connection(DEFAULT_KEYSPACE)
for ks in cls.keyspaces:
drop_keyspace(ks)
def setUp(self):
super(BaseCassEngTestCase, self).setUp()
def _reset_data(self):
for ks in self.keyspaces:
drop_keyspace(ks, connections=self.conns)
for ks in self.keyspaces:
create_keyspace_simple(ks, 1, connections=self.conns)
sync_table(TestModel, keyspaces=self.keyspaces, connections=self.conns)
def test_keyspace(self):
"""
Test keyspace segregation when same connection is used
@since 3.7
@jira_ticket PYTHON-613
@expected_result Keyspace segration is honored
@test_category object_mapper
"""
self._reset_data()
with ContextQuery(TestModel, connection='cluster') as tm:
# keyspace Model class
tm.objects.using(keyspace='ks2').create(partition=1, cluster=1)
tm.objects.using(keyspace='ks2').create(partition=2, cluster=2)
with self.assertRaises(TestModel.DoesNotExist):
tm.objects.get(partition=1, cluster=1) # default keyspace ks1
obj1 = tm.objects.using(keyspace='ks2').get(partition=1, cluster=1)
obj1.count = 2
obj1.save()
with self.assertRaises(NoHostAvailable):
TestModel.objects.using(keyspace='ks2').get(partition=1, cluster=1)
obj2 = TestModel.objects.using(connection='cluster', keyspace='ks2').get(partition=1, cluster=1)
self.assertEqual(obj2.count, 2)
# Update test
TestModel.objects(partition=2, cluster=2).using(connection='cluster', keyspace='ks2').update(count=5)
obj3 = TestModel.objects.using(connection='cluster', keyspace='ks2').get(partition=2, cluster=2)
self.assertEqual(obj3.count, 5)
TestModel.objects(partition=2, cluster=2).using(connection='cluster', keyspace='ks2').delete()
with self.assertRaises(TestModel.DoesNotExist):
TestModel.objects.using(connection='cluster', keyspace='ks2').get(partition=2, cluster=2)
def test_connection(self):
"""
Test basic connection functionality
@since 3.7
@jira_ticket PYTHON-613
@expected_result proper connection should be used
@test_category object_mapper
"""
self._reset_data()
# Model class
with self.assertRaises(NoHostAvailable):
TestModel.objects.create(partition=1, cluster=1)
TestModel.objects.using(connection='cluster').create(partition=1, cluster=1)
TestModel.objects(partition=1, cluster=1).using(connection='cluster').update(count=2)
obj1 = TestModel.objects.using(connection='cluster').get(partition=1, cluster=1)
self.assertEqual(obj1.count, 2)
obj1.using(connection='cluster').update(count=5)
obj1 = TestModel.objects.using(connection='cluster').get(partition=1, cluster=1)
self.assertEqual(obj1.count, 5)
obj1.using(connection='cluster').delete()
with self.assertRaises(TestModel.DoesNotExist):
TestModel.objects.using(connection='cluster').get(partition=1, cluster=1)
class ModelQuerySetNew(ModelQuerySet):
def __init__(self, *args, **kwargs):
super(ModelQuerySetNew, self).__init__(*args, **kwargs)
self._connection = "cluster"
class BaseConnectionTestNoDefault(object):
conns = ['cluster']
@classmethod
def setUpClass(cls):
conn.register_connection('cluster', ['127.0.0.1'])
test_queryset.TestModel.__queryset__ = ModelQuerySetNew
test_queryset.IndexedTestModel.__queryset__ = ModelQuerySetNew
test_queryset.IndexedCollectionsTestModel.__queryset__ = ModelQuerySetNew
test_queryset.TestMultiClusteringModel.__queryset__ = ModelQuerySetNew
super(BaseConnectionTestNoDefault, cls).setUpClass()
conn.unregister_connection('default')
@classmethod
def tearDownClass(cls):
conn.unregister_connection('cluster')
setup_connection(DEFAULT_KEYSPACE)
super(BaseConnectionTestNoDefault, cls).tearDownClass()
# reset the default connection
def setUp(self):
super(BaseCassEngTestCase, self).setUp()
class TestQuerySetOperationConnection(BaseConnectionTestNoDefault, test_queryset.TestQuerySetOperation):
"""
Execute test_queryset.TestQuerySetOperation using non default connection
@since 3.7
@jira_ticket PYTHON-613
@expected_result proper connection should be used
@test_category object_mapper
"""
pass
class TestQuerySetDistinctNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetDistinct):
"""
Execute test_queryset.TestQuerySetDistinct using non default connection
@since 3.7
@jira_ticket PYTHON-613
@expected_result proper connection should be used
@test_category object_mapper
"""
pass
class TestQuerySetOrderingNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetOrdering):
"""
Execute test_queryset.TestQuerySetOrdering using non default connection
@since 3.7
@jira_ticket PYTHON-613
@expected_result proper connection should be used
@test_category object_mapper
"""
pass
class TestQuerySetCountSelectionAndIterationNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetCountSelectionAndIteration):
"""
Execute test_queryset.TestQuerySetOrdering using non default connection
@since 3.7
@jira_ticket PYTHON-613
@expected_result proper connection should be used
@test_category object_mapper
"""
pass
class TestQuerySetSlicingNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetSlicing):
"""
Execute test_queryset.TestQuerySetOrdering using non default connection
@since 3.7
@jira_ticket PYTHON-613
@expected_result proper connection should be used
@test_category object_mapper
"""
pass
class TestQuerySetValidationNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetValidation):
"""
Execute test_queryset.TestQuerySetOrdering using non default connection
@since 3.7
@jira_ticket PYTHON-613
@expected_result proper connection should be used
@test_category object_mapper
"""
pass
class TestQuerySetDeleteNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetDelete):
"""
Execute test_queryset.TestQuerySetDelete using non default connection
@since 3.7
@jira_ticket PYTHON-613
@expected_result proper connection should be used
@test_category object_mapper
"""
pass
class TestValuesListNoDefault(BaseConnectionTestNoDefault, test_queryset.TestValuesList):
"""
Execute test_queryset.TestValuesList using non default connection
@since 3.7
@jira_ticket PYTHON-613
@expected_result proper connection should be used
@test_category object_mapper
"""
pass
class TestObjectsPropertyNoDefault(BaseConnectionTestNoDefault, test_queryset.TestObjectsProperty):
"""
Execute test_queryset.TestObjectsProperty using non default connection
@since 3.7
@jira_ticket PYTHON-613
@expected_result proper connection should be used
@test_category object_mapper
"""
pass
|
vipjml/python-driver
|
tests/integration/cqlengine/test_connections.py
|
Python
|
apache-2.0
| 21,818
|
from doajtest.helpers import DoajTestCase
from portality import models
from portality import lcc
from portality.forms.application_forms import JournalFormFactory
from portality.forms.application_processors import ManEdJournalReview
from doajtest.fixtures import JournalFixtureFactory
from copy import deepcopy
JOURNAL_SOURCE = JournalFixtureFactory.make_journal_source()
JOURNAL_FORM = JournalFixtureFactory.make_journal_form()
#####################################################################
# Mocks required to make some of the lookups work
#####################################################################
@classmethod
def editor_group_pull(cls, field, value):
eg = models.EditorGroup()
eg.set_editor("eddie")
eg.set_associates(["associate", "assan"])
eg.set_name("Test Editor Group")
return eg
def mock_lookup_code(code):
if code == "H": return "Social Sciences"
if code == "HB1-3840": return "Economic theory. Demography"
return None
class TestManEdJournalReview(DoajTestCase):
def setUp(self):
super(TestManEdJournalReview, self).setUp()
self.editor_group_pull = models.EditorGroup.pull_by_key
models.EditorGroup.pull_by_key = editor_group_pull
self.old_lookup_code = lcc.lookup_code
lcc.lookup_code = mock_lookup_code
def tearDown(self):
super(TestManEdJournalReview, self).tearDown()
models.EditorGroup.pull_by_key = self.editor_group_pull
lcc.lookup_code = self.old_lookup_code
def test_01_maned_review_success(self):
"""Give the Managing Editor's journal form a full workout"""
journal = models.Journal(**JOURNAL_SOURCE)
owner = models.Account()
owner.set_id("owner")
owner.save(blocking=True)
journal.set_owner(owner.id)
JOURNAL_FORM["owner"] = owner.id
# we start by constructing it from source
formulaic_context = JournalFormFactory.context("admin")
fc = formulaic_context.processor(source=journal)
# fc = formcontext.JournalFormFactory.get_form_context(role="admin", source=models.Journal(**JOURNAL_SOURCE))
assert isinstance(fc, ManEdJournalReview)
assert fc.form is not None
assert fc.source is not None
assert fc.form_data is None
# no need to check form rendering - there are no disabled fields
# now construct it from form data (with a known source)
fc = formulaic_context.processor(
formdata=JOURNAL_FORM,
source=journal
)
assert isinstance(fc, ManEdJournalReview)
assert fc.form is not None
assert fc.source is not None
assert fc.form_data is not None
# test each of the workflow components individually ...
# pre-validate and ensure that the disabled fields get re-set
fc.pre_validate()
# no disabled fields, so just test the function runs
# run the validation itself
assert fc.validate(), fc.form.errors
# run the crosswalk (no need to look in detail, xwalks are tested elsewhere)
fc.form2target()
assert fc.target is not None
# patch the target with data from the source
fc.patch_target()
assert fc.target.created_date == "2000-01-01T00:00:00Z"
assert fc.target.id == "abcdefghijk_journal"
assert fc.target.current_application == "qwertyuiop"
related = fc.target.related_applications
assert len(related) == 2
assert related[0].get("application_id") == "asdfghjkl"
assert related[0].get("date_accepted") == "2018-01-01T00:00:00Z"
assert related[1].get("application_id") == "zxcvbnm"
assert related[1].get("date_accepted") is None
# everything else is overridden by the form, so no need to check it has patched
# now do finalise (which will also re-run all of the steps above)
fc.finalise()
assert True # gives us a place to drop a break point later if we need it
def test_02_maned_review_optional_validation(self):
"""Test optional validation in the Managing Editor's journal form"""
# construct it from form data (with a known source)
formulaic_context = JournalFormFactory.context("admin")
fc = formulaic_context.processor(
formdata=JOURNAL_FORM,
source=models.Journal(**JOURNAL_SOURCE)
)
# do not re-test the form context
# run the validation, but make it fail by omitting a required field
fc.form.title.data = ''
assert not fc.validate()
# tick the optional validation box and try again
fc.form.make_all_fields_optional.data = True
assert fc.validate(), fc.form.errors
# run the crosswalk, don't test it at all in this test
fc.form2target()
# patch the target with data from the source
fc.patch_target()
# right, so let's see if we managed to get a title-less journal from this
assert fc.target.bibjson().title is None, fc.target.bibjson().title
def test_04_maned_review_doaj_seal(self):
"""Test the seal checkbox on the maned review form"""
# construct it from form data (with a known source)
formulaic_context = JournalFormFactory.context("admin")
fc = formulaic_context.processor(
formdata=JOURNAL_FORM,
source=models.Journal(**JOURNAL_SOURCE)
)
# set the seal to False using the form
fc.form.doaj_seal.data = False
# run the crosswalk, don't test it at all in this test
fc.form2target()
# patch the target with data from the source
fc.patch_target()
# ensure the model has seal set to False
assert fc.target.has_seal() is False
# Set the seal to True in the object and check the form reflects this
fc.source.set_seal(True)
fc.source2form()
assert fc.form.doaj_seal.data is True
def test_05_maned_review_continuations(self):
# construct it from form data (with a known source)
formulaic_context = JournalFormFactory.context("admin")
fc = formulaic_context.processor(
formdata=JOURNAL_FORM,
source=models.Journal(**JOURNAL_SOURCE)
)
# check the form has the continuations data
assert fc.form.continues.data == ["1111-1111"]
assert fc.form.continued_by.data == ["2222-2222"]
assert fc.form.discontinued_date.data == "2001-01-01"
# run the crosswalk, don't test it at all in this test
fc.form2target()
# patch the target with data from the source
fc.patch_target()
# ensure the model has the continuations data
assert fc.target.bibjson().replaces == ["1111-1111"]
assert fc.target.bibjson().is_replaced_by == ["2222-2222"]
assert fc.target.bibjson().discontinued_date == "2001-01-01"
def test_06_maned_review_no_continuation(self):
source = deepcopy(JOURNAL_SOURCE)
source["bibjson"]["replaces"] = []
source["bibjson"]["is_replaced_by"] = []
source["bibjson"]["discontinued_date"] = ""
j = models.Journal(**source)
bj = j.bibjson() # just checking this works, as it uses an inner DataObj
form = deepcopy(JOURNAL_FORM)
form["continues"] = ""
form["continued_by"] = ""
form["discontinued_date"] = ""
# construct it from form data (with a known source)
formulaic_context = JournalFormFactory.context("admin")
fc = formulaic_context.processor(
formdata=form,
source=j
)
# check the form has the continuations data
assert fc.form.continues.data == []
assert fc.form.continued_by.data == []
assert fc.form.discontinued_date.data == ""
# run the crosswalk, don't test it at all in this test
fc.form2target()
# patch the target with data from the source
fc.patch_target()
# ensure the model has the continuations data
assert fc.target.bibjson().replaces == []
assert fc.target.bibjson().is_replaced_by == []
assert fc.target.bibjson().discontinued_date is None
|
DOAJ/doaj
|
doajtest/unit/application_processors/test_maned_journal_review.py
|
Python
|
apache-2.0
| 8,279
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('iscsi_targets', meta, autoload=True)
table.drop()
|
dims/cinder
|
cinder/db/sqlalchemy/migrate_repo/versions/063_drop_iscsi_targets_table.py
|
Python
|
apache-2.0
| 769
|
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""risr_proj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# pylint: disable=import-error
from django.contrib import admin
from django.urls import path, re_path
from dashboard import views
urlpatterns = [
path('admin/', admin.site.urls),
re_path(r'^api/dashboard/$', views.dashboard_list),
]
|
googleinterns/risr
|
risr-app/risr_proj/risr_proj/urls.py
|
Python
|
apache-2.0
| 1,476
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
import netaddr
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.subnets import utils
LOG = logging.getLogger(__name__)
class CreateNetworkInfoAction(workflows.Action):
net_name = forms.CharField(max_length=255,
label=_("Network Name"),
required=False)
if api.neutron.is_port_profiles_supported():
widget = None
else:
widget = forms.HiddenInput()
net_profile_id = forms.ChoiceField(label=_("Network Profile"),
required=False,
widget=widget)
admin_state = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"),
required=False,
help_text=_("The state to start"
" the network in."))
shared = forms.BooleanField(label=_("Shared"), initial=False,
required=False)
with_subnet = forms.BooleanField(label=_("Create Subnet"),
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-slug': 'with_subnet',
'data-hide-tab': 'create_network__'
'createsubnetinfo'
'action,'
'create_network__'
'createsubnetdetail'
'action,',
'data-hide-on-checked': 'false'
}),
initial=True,
required=False)
def __init__(self, request, *args, **kwargs):
super(CreateNetworkInfoAction, self).__init__(request,
*args, **kwargs)
if api.neutron.is_port_profiles_supported():
self.fields['net_profile_id'].choices = (
self.get_network_profile_choices(request))
def get_network_profile_choices(self, request):
profile_choices = [('', _("Select a profile"))]
for profile in self._get_profiles(request, 'network'):
profile_choices.append((profile.id, profile.name))
return profile_choices
def _get_profiles(self, request, type_p):
profiles = []
try:
profiles = api.neutron.profile_list(request, type_p)
except Exception:
msg = _('Network Profiles could not be retrieved.')
exceptions.handle(request, msg)
return profiles
# TODO(absubram): Add ability to view network profile information
# in the network detail if a profile is used.
class Meta(object):
name = _("Network")
help_text = _("Create a new network. "
"In addition, a subnet associated with the network "
"can be created in the next panel.")
class CreateNetworkInfo(workflows.Step):
action_class = CreateNetworkInfoAction
contributes = ("net_name", "admin_state", "net_profile_id", "with_subnet",
"shared")
class CreateSubnetInfoAction(workflows.Action):
subnet_name = forms.CharField(max_length=255,
widget=forms.TextInput(attrs={
}),
label=_("Subnet Name"),
required=False)
address_source = forms.ChoiceField(
required=False,
label=_('Network Address Source'),
choices=[('manual', _('Enter Network Address manually')),
('subnetpool', _('Allocate Network Address from a pool'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'source',
}))
subnetpool = forms.ChoiceField(
label=_("Address pool"),
widget=forms.SelectWidget(attrs={
'class': 'switched switchable',
'data-slug': 'subnetpool',
'data-switch-on': 'source',
'data-source-subnetpool': _('Address pool')},
data_attrs=('name', 'prefixes',
'ip_version',
'min_prefixlen',
'max_prefixlen',
'default_prefixlen'),
transform=lambda x: "%s (%s)" % (x.name, ", ".join(x.prefixes))
if 'prefixes' in x else "%s" % (x.name)),
required=False)
prefixlen = forms.ChoiceField(widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'subnetpool',
}),
label=_('Network Mask'),
required=False)
cidr = forms.IPField(label=_("Network Address"),
required=False,
initial="",
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-manual': _("Network Address"),
}),
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24, 2001:DB8::/48)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'ipversion',
}),
label=_("IP Version"),
required=False)
gateway_ip = forms.IPField(
label=_("Gateway IP"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'source gateway_ip',
'data-source-manual': _("Gateway IP")
}),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254) "
"The default value is the first IP of the "
"network address "
"(e.g. 192.168.0.1 for 192.168.0.0/24, "
"2001:DB8::1 for 2001:DB8::/48). "
"If you use the default, leave blank. "
"If you do not want to use a gateway, "
"check 'Disable Gateway' below."),
version=forms.IPv4 | forms.IPv6,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-slug': 'gateway_ip',
'data-hide-on-checked': 'true'
}),
initial=False,
required=False)
msg = _('Specify "Network Address", "Address pool" or '
'clear "Create Subnet" checkbox.')
class Meta(object):
name = _("Subnet")
help_text = _('Create a subnet associated with the network. '
'Advanced configuration is available by clicking on the '
'"Subnet Details" tab.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetInfoAction, self).__init__(request, context, *args,
**kwargs)
if 'with_subnet' in context:
self.fields['with_subnet'] = forms.BooleanField(
initial=context['with_subnet'],
required=False,
widget=forms.HiddenInput()
)
if not getattr(settings, 'OPENSTACK_NEUTRON_NETWORK',
{}).get('enable_ipv6', True):
self.fields['ip_version'].widget = forms.HiddenInput()
self.fields['ip_version'].initial = 4
try:
if api.neutron.is_extension_supported(request,
'subnet_allocation'):
self.fields['subnetpool'].choices = \
self.get_subnetpool_choices(request)
else:
self.hide_subnetpool_choices()
except Exception:
self.hide_subnetpool_choices()
msg = _('Unable to initialize subnetpools')
exceptions.handle(request, msg)
if len(self.fields['subnetpool'].choices) > 1:
# Pre-populate prefixlen choices to satisfy Django
# ChoiceField Validation. This is overridden w/data from
# subnetpool on select.
self.fields['prefixlen'].choices = \
zip(list(range(0, 128 + 1)),
list(range(0, 128 + 1)))
# Populate data-fields for switching the prefixlen field
# when user selects a subnetpool other than
# "Provider default pool"
for (id, name) in self.fields['subnetpool'].choices:
if not len(id):
continue
key = 'data-subnetpool-' + id
self.fields['prefixlen'].widget.attrs[key] = \
_('Network Mask')
else:
self.hide_subnetpool_choices()
def get_subnetpool_choices(self, request):
subnetpool_choices = [('', _('Select a pool'))]
default_ipv6_subnet_pool_label = \
getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}).get(
'default_ipv6_subnet_pool_label', None)
default_ipv4_subnet_pool_label = \
getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}).get(
'default_ipv4_subnet_pool_label', None)
if default_ipv6_subnet_pool_label:
subnetpool_dict = {'ip_version': 6,
'name': default_ipv6_subnet_pool_label}
subnetpool = api.neutron.SubnetPool(subnetpool_dict)
subnetpool_choices.append(('', subnetpool))
if default_ipv4_subnet_pool_label:
subnetpool_dict = {'ip_version': 4,
'name': default_ipv4_subnet_pool_label}
subnetpool = api.neutron.SubnetPool(subnetpool_dict)
subnetpool_choices.append(('', subnetpool))
for subnetpool in api.neutron.subnetpool_list(request):
subnetpool_choices.append((subnetpool.id, subnetpool))
return subnetpool_choices
def hide_subnetpool_choices(self):
self.fields['address_source'].widget = forms.HiddenInput()
self.fields['subnetpool'].choices = []
self.fields['subnetpool'].widget = forms.HiddenInput()
self.fields['prefixlen'].widget = forms.HiddenInput()
def _check_subnet_data(self, cleaned_data, is_create=True):
cidr = cleaned_data.get('cidr')
ip_version = int(cleaned_data.get('ip_version'))
gateway_ip = cleaned_data.get('gateway_ip')
no_gateway = cleaned_data.get('no_gateway')
address_source = cleaned_data.get('address_source')
# When creating network from a pool it is allowed to supply empty
# subnetpool_id signaling that Neutron should choose the default
# pool configured by the operator. This is also part of the IPv6
# Prefix Delegation Workflow.
if not cidr and address_source != 'subnetpool':
raise forms.ValidationError(self.msg)
if cidr:
subnet = netaddr.IPNetwork(cidr)
if subnet.version != ip_version:
msg = _('Network Address and IP version are inconsistent.')
raise forms.ValidationError(msg)
if (ip_version == 4 and subnet.prefixlen == 32) or \
(ip_version == 6 and subnet.prefixlen == 128):
msg = _("The subnet in the Network Address is "
"too small (/%s).") % subnet.prefixlen
raise forms.ValidationError(msg)
if not no_gateway and gateway_ip:
if netaddr.IPAddress(gateway_ip).version is not ip_version:
msg = _('Gateway IP and IP version are inconsistent.')
raise forms.ValidationError(msg)
if not is_create and not no_gateway and not gateway_ip:
msg = _('Specify IP address of gateway or '
'check "Disable Gateway".')
raise forms.ValidationError(msg)
def clean(self):
cleaned_data = super(CreateSubnetInfoAction, self).clean()
with_subnet = cleaned_data.get('with_subnet')
if not with_subnet:
return cleaned_data
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(workflows.Step):
action_class = CreateSubnetInfoAction
contributes = ("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway", "subnetpool",
"prefixlen", "address_source")
class CreateSubnetDetailAction(workflows.Action):
enable_dhcp = forms.BooleanField(label=_("Enable DHCP"),
initial=True, required=False)
ipv6_modes = forms.ChoiceField(
label=_("IPv6 Address Configuration Mode"),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'ipversion',
'data-ipversion-6': _("IPv6 Address Configuration Mode"),
}),
initial=utils.IPV6_DEFAULT_MODE,
required=False,
help_text=_("Specifies how IPv6 addresses and additional information "
"are configured. We can specify SLAAC/DHCPv6 stateful/"
"DHCPv6 stateless provided by OpenStack, "
"or specify no option. "
"'No options specified' means addresses are configured "
"manually or configured by a non-OpenStack system."))
allocation_pools = forms.CharField(
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Allocation Pools"),
help_text=_("IP address allocation pools. Each entry is: "
"start_ip_address,end_ip_address "
"(e.g., 192.168.1.100,192.168.1.120) "
"and one entry per line."),
required=False)
dns_nameservers = forms.CharField(
widget=forms.widgets.Textarea(attrs={'rows': 4}),
label=_("DNS Name Servers"),
help_text=_("IP address list of DNS name servers for this subnet. "
"One entry per line."),
required=False)
host_routes = forms.CharField(
widget=forms.widgets.Textarea(attrs={'rows': 4}),
label=_("Host Routes"),
help_text=_("Additional routes announced to the hosts. "
"Each entry is: destination_cidr,nexthop "
"(e.g., 192.168.200.0/24,10.56.1.254) "
"and one entry per line."),
required=False)
class Meta(object):
name = _("Subnet Details")
help_text = _('Specify additional attributes for the subnet.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetDetailAction, self).__init__(request, context,
*args, **kwargs)
if not getattr(settings, 'OPENSTACK_NEUTRON_NETWORK',
{}).get('enable_ipv6', True):
self.fields['ipv6_modes'].widget = forms.HiddenInput()
def populate_ipv6_modes_choices(self, request, context):
return [(value, _("%s (Default)") % label)
if value == utils.IPV6_DEFAULT_MODE
else (value, label)
for value, label in utils.IPV6_MODE_CHOICES]
def _convert_ip_address(self, ip, field_name):
try:
return netaddr.IPAddress(ip)
except (netaddr.AddrFormatError, ValueError):
msg = (_('%(field_name)s: Invalid IP address (value=%(ip)s)')
% {'field_name': field_name, 'ip': ip})
raise forms.ValidationError(msg)
def _convert_ip_network(self, network, field_name):
try:
return netaddr.IPNetwork(network)
except (netaddr.AddrFormatError, ValueError):
msg = (_('%(field_name)s: Invalid IP address (value=%(network)s)')
% {'field_name': field_name, 'network': network})
raise forms.ValidationError(msg)
def _check_allocation_pools(self, allocation_pools):
for p in allocation_pools.split('\n'):
p = p.strip()
if not p:
continue
pool = p.split(',')
if len(pool) != 2:
msg = _('Start and end addresses must be specified '
'(value=%s)') % p
raise forms.ValidationError(msg)
start, end = [self._convert_ip_address(ip, "allocation_pools")
for ip in pool]
if start > end:
msg = _('Start address is larger than end address '
'(value=%s)') % p
raise forms.ValidationError(msg)
def _check_dns_nameservers(self, dns_nameservers):
for ns in dns_nameservers.split('\n'):
ns = ns.strip()
if not ns:
continue
self._convert_ip_address(ns, "dns_nameservers")
def _check_host_routes(self, host_routes):
for r in host_routes.split('\n'):
r = r.strip()
if not r:
continue
route = r.split(',')
if len(route) != 2:
msg = _('Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)') % r
raise forms.ValidationError(msg)
self._convert_ip_network(route[0], "host_routes")
self._convert_ip_address(route[1], "host_routes")
def clean(self):
cleaned_data = super(CreateSubnetDetailAction, self).clean()
self._check_allocation_pools(cleaned_data.get('allocation_pools'))
self._check_host_routes(cleaned_data.get('host_routes'))
self._check_dns_nameservers(cleaned_data.get('dns_nameservers'))
return cleaned_data
class CreateSubnetDetail(workflows.Step):
action_class = CreateSubnetDetailAction
contributes = ("enable_dhcp", "ipv6_modes", "allocation_pools",
"dns_nameservers", "host_routes")
class CreateNetwork(workflows.Workflow):
slug = "create_network"
name = _("Create Network")
finalize_button_name = _("Create")
success_message = _('Created network "%s".')
failure_message = _('Unable to create network "%s".')
default_steps = (CreateNetworkInfo,
CreateSubnetInfo,
CreateSubnetDetail)
wizard = True
def get_success_url(self):
return reverse("horizon:project:networks:index")
def get_failure_url(self):
return reverse("horizon:project:networks:index")
def format_status_message(self, message):
name = self.context.get('net_name') or self.context.get('net_id', '')
return message % name
def _create_network(self, request, data):
try:
params = {'name': data['net_name'],
'admin_state_up': (data['admin_state'] == 'True'),
'shared': data['shared']}
if api.neutron.is_port_profiles_supported():
params['net_profile_id'] = data['net_profile_id']
network = api.neutron.network_create(request, **params)
self.context['net_id'] = network.id
msg = (_('Network "%s" was successfully created.') %
network.name_or_id)
LOG.debug(msg)
return network
except Exception as e:
msg = (_('Failed to create network "%(network)s": %(reason)s') %
{"network": data['net_name'], "reason": e})
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
return False
def _setup_subnet_parameters(self, params, data, is_create=True):
"""Setup subnet parameters
This methods setups subnet parameters which are available
in both create and update.
"""
is_update = not is_create
params['enable_dhcp'] = data['enable_dhcp']
if int(data['ip_version']) == 6:
ipv6_modes = utils.get_ipv6_modes_attrs_from_menu(
data['ipv6_modes'])
if ipv6_modes[0] and is_create:
params['ipv6_ra_mode'] = ipv6_modes[0]
if ipv6_modes[1] and is_create:
params['ipv6_address_mode'] = ipv6_modes[1]
if data['allocation_pools']:
pools = [dict(zip(['start', 'end'], pool.strip().split(',')))
for pool in data['allocation_pools'].split('\n')
if pool.strip()]
params['allocation_pools'] = pools
if data['host_routes'] or is_update:
routes = [dict(zip(['destination', 'nexthop'],
route.strip().split(',')))
for route in data['host_routes'].split('\n')
if route.strip()]
params['host_routes'] = routes
if data['dns_nameservers'] or is_update:
nameservers = [ns.strip()
for ns in data['dns_nameservers'].split('\n')
if ns.strip()]
params['dns_nameservers'] = nameservers
def _create_subnet(self, request, data, network=None, tenant_id=None,
no_redirect=False):
if network:
network_id = network.id
network_name = network.name
else:
network_id = self.context.get('network_id')
network_name = self.context.get('network_name')
try:
params = {'network_id': network_id,
'name': data['subnet_name']}
if 'cidr' in data and data['cidr']:
params['cidr'] = data['cidr']
if 'ip_version' in data and data['ip_version']:
params['ip_version'] = int(data['ip_version'])
if tenant_id:
params['tenant_id'] = tenant_id
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
if 'subnetpool' in data and len(data['subnetpool']):
params['subnetpool_id'] = data['subnetpool']
if 'prefixlen' in data and len(data['prefixlen']):
params['prefixlen'] = data['prefixlen']
self._setup_subnet_parameters(params, data)
subnet = api.neutron.subnet_create(request, **params)
self.context['subnet_id'] = subnet.id
msg = _('Subnet "%s" was successfully created.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = _('Failed to create subnet "%(sub)s" for network "%(net)s": '
' %(reason)s')
if no_redirect:
redirect = None
else:
redirect = self.get_failure_url()
exceptions.handle(request,
msg % {"sub": data['cidr'], "net": network_name,
"reason": e},
redirect=redirect)
return False
def _delete_network(self, request, network):
"""Delete the created network when subnet creation failed."""
try:
api.neutron.network_delete(request, network.id)
msg = _('Delete the created network "%s" '
'due to subnet creation failure.') % network.name
LOG.debug(msg)
redirect = self.get_failure_url()
messages.info(request, msg)
raise exceptions.Http302(redirect)
except Exception:
msg = _('Failed to delete network "%s"') % network.name
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
def handle(self, request, data):
network = self._create_network(request, data)
if not network:
return False
# If we do not need to create a subnet, return here.
if not data['with_subnet']:
return True
subnet = self._create_subnet(request, data, network, no_redirect=True)
if subnet:
return True
else:
self._delete_network(request, network)
return False
|
Tesora/tesora-horizon
|
openstack_dashboard/dashboards/project/networks/workflows.py
|
Python
|
apache-2.0
| 26,337
|
from .selector_spec import ( # noqa: F401
SelectionUnion,
SelectionSpec,
SelectionIntersection,
SelectionDifference,
SelectionCriteria,
)
from .selector import ( # noqa: F401
ResourceTypeSelector,
NodeSelector,
)
from .cli import ( # noqa: F401
parse_difference,
parse_from_selectors_definition,
)
from .queue import GraphQueue # noqa: F401
from .graph import Graph, UniqueId # noqa: F401
|
analyst-collective/dbt
|
core/dbt/graph/__init__.py
|
Python
|
apache-2.0
| 430
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink.streaming_aead.streaming_aead."""
import io
import os
import tempfile
from absl.testing import absltest
import tink
from tink import streaming_aead
def setUpModule():
streaming_aead.register()
def get_primitive() -> streaming_aead.StreamingAead:
key_template = streaming_aead.streaming_aead_key_templates.AES128_GCM_HKDF_4KB
keyset_handle = tink.new_keyset_handle(key_template)
primitive = keyset_handle.primitive(streaming_aead.StreamingAead)
return primitive
class StreamingAeadTest(absltest.TestCase):
"""End-to-end test of Streaming AEAD Encrypting/Decrypting Streams."""
def test_encrypt_decrypt(self):
primitive = get_primitive()
long_plaintext = b' '.join(b'%d' % i for i in range(100 * 1000))
aad = b'associated_data'
with tempfile.TemporaryDirectory() as tmpdirname:
filename = os.path.join(tmpdirname, 'encrypted_file')
dest = open(filename, 'wb')
with primitive.new_encrypting_stream(dest, aad) as es:
n = es.write(long_plaintext)
self.assertTrue(dest.closed)
self.assertLen(long_plaintext, n)
src = open(filename, 'rb')
with primitive.new_decrypting_stream(src, aad) as ds:
output = ds.read()
self.assertTrue(src.closed)
self.assertEqual(output, long_plaintext)
def test_encrypt_decrypt_raw(self):
primitive = get_primitive()
long_plaintext = b' '.join(b'%d' % i for i in range(100 * 1000))
aad = b'associated_data'
with tempfile.TemporaryDirectory() as tmpdirname:
filename = os.path.join(tmpdirname, 'encrypted_file_raw')
dest = open(filename, 'wb', buffering=0) # returns a raw file.
with primitive.new_encrypting_stream(dest, aad) as es:
n = es.write(long_plaintext)
self.assertTrue(dest.closed)
self.assertLen(long_plaintext, n)
src = open(filename, 'rb', buffering=0) # returns a raw file.
with primitive.new_decrypting_stream(src, aad) as ds:
output = ds.read()
self.assertTrue(src.closed)
self.assertEqual(output, long_plaintext)
def test_encrypt_decrypt_textiowrapper(self):
primitive = get_primitive()
text_lines = [
'ᚻᛖ ᚳᚹᚫᚦ ᚦᚫᛏ ᚻᛖ ᛒᚢᛞᛖ ᚩᚾ ᚦᚫᛗ ᛚᚪᚾᛞᛖ ᚾᚩᚱᚦᚹᛖᚪᚱᛞᚢᛗ ᚹᛁᚦ ᚦᚪ ᚹᛖᛥᚫ\n',
'⡌⠁⠧⠑ ⠼⠁⠒ ⡍⠜⠇⠑⠹⠰⠎ ⡣⠕⠌\n',
'2H₂ + O₂ ⇌ 2H₂O\n',
'smile 😀\n']
aad = b'associated_data'
with tempfile.TemporaryDirectory() as tmpdirname:
filename = os.path.join(tmpdirname, 'encrypted_textfile')
dest = open(filename, 'wb')
with io.TextIOWrapper(
primitive.new_encrypting_stream(dest, aad), encoding='utf8') as es:
es.writelines(text_lines)
self.assertTrue(dest.closed)
src = open(filename, 'rb')
with io.TextIOWrapper(
primitive.new_decrypting_stream(src, aad), encoding='utf8') as es:
for i, text_line in enumerate(es):
self.assertEqual(text_line, text_lines[i])
self.assertTrue(src.closed)
def test_encrypt_fails_on_nonwritable_stream(self):
primitive = get_primitive()
with tempfile.TemporaryDirectory() as tmpdirname:
filename = os.path.join(tmpdirname, 'file')
with open(filename, 'wb') as f:
f.write(b'data')
with open(filename, 'rb') as dest: # dest is not writable
with self.assertRaises(ValueError):
primitive.new_encrypting_stream(dest, b'aad')
def test_decrypt_fails_on_nonreadable_stream(self):
primitive = get_primitive()
with tempfile.TemporaryDirectory() as tmpdirname:
# src not readable
with open(os.path.join(tmpdirname, 'file2'), 'wb') as src:
with self.assertRaises(ValueError):
primitive.new_decrypting_stream(src, b'aad')
if __name__ == '__main__':
absltest.main()
|
google/tink
|
python/tink/streaming_aead/_streaming_aead_test.py
|
Python
|
apache-2.0
| 4,470
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Nelder-Mead derivative-free minimization algorithm.
The Nelder-Mead method is one of the most popular derivative-free minimization
methods. For an optimization problem in `n` dimensions it maintains a set of
`n+1` candidate solutions that span a non-degenerate simplex. It successively
modifies the simplex based on a set of moves (reflection, expansion, shrinkage
and contraction) using the function values at each of the vertices.
"""
import collections
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensorshape_util
# Tolerance to check for floating point zeros.
_EPSILON = 1e-10
NelderMeadOptimizerResults = collections.namedtuple(
'NelderMeadOptimizerResults', [
'converged', # Scalar boolean tensor indicating whether the minimum
# was found within tolerance.
'num_objective_evaluations', # The total number of objective
# evaluations performed.
'position', # A tensor containing the last argument value found
# during the search. If the search converged, then
# this value is the argmin of the objective function.
'objective_value', # A tensor containing the value of the objective
# function at the `position`. If the search
# converged, then this is the (local) minimum of
# the objective function.
'final_simplex', # The last simplex constructed before stopping.
'final_objective_values', # The objective function evaluated at the
# vertices of the final simplex.
'initial_simplex', # The initial simplex.
'initial_objective_values', # The values of the objective function
# at the vertices of the initial simplex.
'num_iterations' # The number of iterations of the algorithm performed.
])
def minimize(objective_function,
initial_simplex=None,
initial_vertex=None,
step_sizes=None,
objective_at_initial_simplex=None,
objective_at_initial_vertex=None,
batch_evaluate_objective=False,
func_tolerance=1e-8,
position_tolerance=1e-8,
parallel_iterations=1,
max_iterations=None,
reflection=None,
expansion=None,
contraction=None,
shrinkage=None,
name=None):
"""Minimum of the objective function using the Nelder Mead simplex algorithm.
Performs an unconstrained minimization of a (possibly non-smooth) function
using the Nelder Mead simplex method. Nelder Mead method does not support
univariate functions. Hence the dimensions of the domain must be 2 or greater.
For details of the algorithm, see
[Press, Teukolsky, Vetterling and Flannery(2007)][1].
Points in the domain of the objective function may be represented as a
`Tensor` of general shape but with rank at least 1. The algorithm proceeds
by modifying a full rank simplex in the domain. The initial simplex may
either be specified by the user or can be constructed using a single vertex
supplied by the user. In the latter case, if `v0` is the supplied vertex,
the simplex is the convex hull of the set:
```None
S = {v0} + {v0 + step_i * e_i}
```
Here `e_i` is a vector which is `1` along the `i`-th axis and zero elsewhere
and `step_i` is a characteristic length scale along the `i`-th axis. If the
step size is not supplied by the user, a unit step size is used in every axis.
Alternately, a single step size may be specified which is used for every
axis. The most flexible option is to supply a bespoke step size for every
axis.
### Usage:
The following example demonstrates the usage of the Nelder Mead minimzation
on a two dimensional problem with the minimum located at a non-differentiable
point.
```python
# The objective function
def sqrt_quadratic(x):
return tf.sqrt(tf.reduce_sum(x ** 2, axis=-1))
start = tf.constant([6.0, -21.0]) # Starting point for the search.
optim_results = tfp.optimizer.nelder_mead_minimize(
sqrt_quadratic, initial_vertex=start, func_tolerance=1e-8,
batch_evaluate_objective=True)
# Check that the search converged
assert(optim_results.converged)
# Check that the argmin is close to the actual value.
np.testing.assert_allclose(optim_results.position, np.array([0.0, 0.0]),
atol=1e-7)
# Print out the total number of function evaluations it took.
print("Function evaluations: %d" % optim_results.num_objective_evaluations)
```
### References:
[1]: William Press, Saul Teukolsky, William Vetterling and Brian Flannery.
Numerical Recipes in C++, third edition. pp. 502-507. (2007).
http://numerical.recipes/cpppages/chap0sel.pdf
[2]: Jeffrey Lagarias, James Reeds, Margaret Wright and Paul Wright.
Convergence properties of the Nelder-Mead simplex method in low dimensions,
Siam J. Optim., Vol 9, No. 1, pp. 112-147. (1998).
http://www.math.kent.edu/~reichel/courses/Opt/reading.material.2/nelder.mead.pdf
[3]: Fuchang Gao and Lixing Han. Implementing the Nelder-Mead simplex
algorithm with adaptive parameters. Computational Optimization and
Applications, Vol 51, Issue 1, pp 259-277. (2012).
https://pdfs.semanticscholar.org/15b4/c4aa7437df4d032c6ee6ce98d6030dd627be.pdf
Args:
objective_function: A Python callable that accepts a point as a
real `Tensor` and returns a `Tensor` of real dtype containing
the value of the function at that point. The function
to be minimized. If `batch_evaluate_objective` is `True`, the callable
may be evaluated on a `Tensor` of shape `[n+1] + s ` where `n` is
the dimension of the problem and `s` is the shape of a single point
in the domain (so `n` is the size of a `Tensor` representing a
single point).
In this case, the expected return value is a `Tensor` of shape `[n+1]`.
Note that this method does not support univariate functions so the problem
dimension `n` must be strictly greater than 1.
initial_simplex: (Optional) `Tensor` of real dtype. The initial simplex to
start the search. If supplied, should be a `Tensor` of shape `[n+1] + s`
where `n` is the dimension of the problem and `s` is the shape of a
single point in the domain. Each row (i.e. the `Tensor` with a given
value of the first index) is interpreted as a vertex of a simplex and
hence the rows must be affinely independent. If not supplied, an axes
aligned simplex is constructed using the `initial_vertex` and
`step_sizes`. Only one and at least one of `initial_simplex` and
`initial_vertex` must be supplied.
initial_vertex: (Optional) `Tensor` of real dtype and any shape that can
be consumed by the `objective_function`. A single point in the domain that
will be used to construct an axes aligned initial simplex.
step_sizes: (Optional) `Tensor` of real dtype and shape broadcasting
compatible with `initial_vertex`. Supplies the simplex scale along each
axes. Only used if `initial_simplex` is not supplied. See description
above for details on how step sizes and initial vertex are used to
construct the initial simplex.
objective_at_initial_simplex: (Optional) Rank `1` `Tensor` of real dtype
of a rank `1` `Tensor`. The value of the objective function at the
initial simplex. May be supplied only if `initial_simplex` is
supplied. If not supplied, it will be computed.
objective_at_initial_vertex: (Optional) Scalar `Tensor` of real dtype. The
value of the objective function at the initial vertex. May be supplied
only if the `initial_vertex` is also supplied.
batch_evaluate_objective: (Optional) Python `bool`. If True, the objective
function will be evaluated on all the vertices of the simplex packed
into a single tensor. If False, the objective will be mapped across each
vertex separately. Evaluating the objective function in a batch allows
use of vectorization and should be preferred if the objective function
allows it.
func_tolerance: (Optional) Scalar `Tensor` of real dtype. The algorithm
stops if the absolute difference between the largest and the smallest
function value on the vertices of the simplex is below this number.
position_tolerance: (Optional) Scalar `Tensor` of real dtype. The
algorithm stops if the largest absolute difference between the
coordinates of the vertices is below this threshold.
parallel_iterations: (Optional) Positive integer. The number of iterations
allowed to run in parallel.
max_iterations: (Optional) Scalar positive `Tensor` of dtype `int32`.
The maximum number of iterations allowed. If `None` then no limit is
applied.
reflection: (Optional) Positive Scalar `Tensor` of same dtype as
`initial_vertex`. This parameter controls the scaling of the reflected
vertex. See, [Press et al(2007)][1] for details. If not specified,
uses the dimension dependent prescription of [Gao and Han(2012)][3].
expansion: (Optional) Positive Scalar `Tensor` of same dtype as
`initial_vertex`. Should be greater than `1` and `reflection`. This
parameter controls the expanded scaling of a reflected vertex.
See, [Press et al(2007)][1] for details. If not specified, uses the
dimension dependent prescription of [Gao and Han(2012)][3].
contraction: (Optional) Positive scalar `Tensor` of same dtype as
`initial_vertex`. Must be between `0` and `1`. This parameter controls
the contraction of the reflected vertex when the objective function at
the reflected point fails to show sufficient decrease.
See, [Press et al(2007)][1] for more details. If not specified, uses
the dimension dependent prescription of [Gao and Han(2012][3].
shrinkage: (Optional) Positive scalar `Tensor` of same dtype as
`initial_vertex`. Must be between `0` and `1`. This parameter is the scale
by which the simplex is shrunk around the best point when the other
steps fail to produce improvements.
See, [Press et al(2007)][1] for more details. If not specified, uses
the dimension dependent prescription of [Gao and Han(2012][3].
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'minimize' is used.
Returns:
optimizer_results: A namedtuple containing the following items:
converged: Scalar boolean tensor indicating whether the minimum was
found within tolerance.
num_objective_evaluations: The total number of objective
evaluations performed.
position: A `Tensor` containing the last argument value found
during the search. If the search converged, then
this value is the argmin of the objective function.
objective_value: A tensor containing the value of the objective
function at the `position`. If the search
converged, then this is the (local) minimum of
the objective function.
final_simplex: The last simplex constructed before stopping.
final_objective_values: The objective function evaluated at the
vertices of the final simplex.
initial_simplex: The starting simplex.
initial_objective_values: The objective function evaluated at the
vertices of the initial simplex.
num_iterations: The number of iterations of the main algorithm body.
Raises:
ValueError: If any of the following conditions hold
1. If none or more than one of `initial_simplex` and `initial_vertex` are
supplied.
2. If `initial_simplex` and `step_sizes` are both specified.
"""
with tf.name_scope(name or 'minimize'):
(
dim,
_,
simplex,
objective_at_simplex,
num_evaluations
) = _prepare_args(objective_function,
initial_simplex,
initial_vertex,
step_sizes,
objective_at_initial_simplex,
objective_at_initial_vertex,
batch_evaluate_objective)
domain_dtype = simplex.dtype
(
reflection,
expansion,
contraction,
shrinkage
) = _resolve_parameters(dim,
reflection,
expansion,
contraction,
shrinkage,
domain_dtype)
closure_kwargs = dict(
objective_function=objective_function,
dim=dim,
func_tolerance=func_tolerance,
position_tolerance=position_tolerance,
batch_evaluate_objective=batch_evaluate_objective,
reflection=reflection,
expansion=expansion,
contraction=contraction,
shrinkage=shrinkage)
def _loop_body(_, iterations, simplex, objective_at_simplex,
num_evaluations):
(
converged,
next_simplex,
next_objective,
evaluations
) = nelder_mead_one_step(simplex, objective_at_simplex, **closure_kwargs)
return (converged, iterations + 1, next_simplex, next_objective,
num_evaluations + evaluations)
initial_args = (False, 0, simplex, objective_at_simplex,
num_evaluations)
# Loop until either we have converged or if the max iterations are supplied
# then until we have converged or exhausted the available iteration budget.
def _is_converged(converged, num_iterations, *ignored_args): # pylint:disable=unused-argument
# It is important to ensure that not_converged is a tensor. If
# converged is not a tensor but a Python bool, then the overloaded
# op '~' acts as bitwise complement so ~True = -2 and ~False = -1.
# In that case, the loop will never terminate.
not_converged = tf.logical_not(converged)
return (not_converged if max_iterations is None
else (not_converged & (num_iterations < max_iterations)))
(converged, num_iterations, final_simplex, final_objective_values,
final_evaluations) = tf.while_loop(
cond=_is_converged,
body=_loop_body,
loop_vars=initial_args,
parallel_iterations=parallel_iterations)
order = tf.argsort(
final_objective_values, direction='ASCENDING', stable=True)
best_index = order[0]
# The explicit cast to Tensor below is done to avoid returning a mixture
# of Python types and Tensors which cause problems with session.run.
# In the eager mode, converged may remain a Python bool. Trying to evaluate
# the whole tuple in one evaluate call will raise an exception because
# of the presence of non-tensors. This is very annoying so we explicitly
# cast those arguments to Tensors.
return NelderMeadOptimizerResults(
converged=tf.convert_to_tensor(converged),
num_objective_evaluations=final_evaluations,
position=final_simplex[best_index],
objective_value=final_objective_values[best_index],
final_simplex=final_simplex,
final_objective_values=final_objective_values,
num_iterations=tf.convert_to_tensor(num_iterations),
initial_simplex=simplex,
initial_objective_values=objective_at_simplex)
def nelder_mead_one_step(current_simplex,
current_objective_values,
objective_function=None,
dim=None,
func_tolerance=None,
position_tolerance=None,
batch_evaluate_objective=False,
reflection=None,
expansion=None,
contraction=None,
shrinkage=None,
name=None):
"""A single iteration of the Nelder Mead algorithm."""
with tf.name_scope(name or 'nelder_mead_one_step'):
domain_dtype = dtype_util.base_dtype(current_simplex.dtype)
order = tf.argsort(
current_objective_values, direction='ASCENDING', stable=True)
(
best_index,
worst_index,
second_worst_index
) = order[0], order[-1], order[-2]
worst_vertex = current_simplex[worst_index]
(
best_objective_value,
worst_objective_value,
second_worst_objective_value
) = (
current_objective_values[best_index],
current_objective_values[worst_index],
current_objective_values[second_worst_index]
)
# Compute the centroid of the face opposite the worst vertex.
face_centroid = tf.reduce_sum(
current_simplex, axis=0) - worst_vertex
face_centroid /= tf.cast(dim, domain_dtype)
# Reflect the worst vertex through the opposite face.
reflected = face_centroid + reflection * (face_centroid - worst_vertex)
objective_at_reflected = objective_function(reflected)
num_evaluations = 1
has_converged = _check_convergence(current_simplex,
current_simplex[best_index],
best_objective_value,
worst_objective_value,
func_tolerance,
position_tolerance)
def _converged_fn():
return (True, current_simplex, current_objective_values, np.int32(0))
case0 = has_converged, _converged_fn
accept_reflected = (
(objective_at_reflected < second_worst_objective_value) &
(objective_at_reflected >= best_objective_value))
accept_reflected_fn = _accept_reflected_fn(current_simplex,
current_objective_values,
worst_index,
reflected,
objective_at_reflected)
case1 = accept_reflected, accept_reflected_fn
do_expansion = objective_at_reflected < best_objective_value
expansion_fn = _expansion_fn(objective_function,
current_simplex,
current_objective_values,
worst_index,
reflected,
objective_at_reflected,
face_centroid,
expansion)
case2 = do_expansion, expansion_fn
do_outside_contraction = (
(objective_at_reflected < worst_objective_value) &
(objective_at_reflected >= second_worst_objective_value)
)
outside_contraction_fn = _outside_contraction_fn(
objective_function,
current_simplex,
current_objective_values,
face_centroid,
best_index,
worst_index,
reflected,
objective_at_reflected,
contraction,
shrinkage,
batch_evaluate_objective)
case3 = do_outside_contraction, outside_contraction_fn
default_fn = _inside_contraction_fn(objective_function,
current_simplex,
current_objective_values,
face_centroid,
best_index,
worst_index,
worst_objective_value,
contraction,
shrinkage,
batch_evaluate_objective)
(
converged,
next_simplex,
next_objective_at_simplex,
case_evals) = ps.case([case0, case1, case2, case3],
default=default_fn, exclusive=False)
tensorshape_util.set_shape(next_simplex, current_simplex.shape)
tensorshape_util.set_shape(next_objective_at_simplex,
current_objective_values.shape)
return (
converged,
next_simplex,
next_objective_at_simplex,
num_evaluations + case_evals
)
def _accept_reflected_fn(simplex,
objective_values,
worst_index,
reflected,
objective_at_reflected):
"""Creates the condition function pair for a reflection to be accepted."""
def _replace_worst_with_reflected():
next_simplex = _replace_at_index(simplex, worst_index, reflected)
next_objective_values = _replace_at_index(objective_values, worst_index,
objective_at_reflected)
return False, next_simplex, next_objective_values, np.int32(0)
return _replace_worst_with_reflected
def _expansion_fn(objective_function,
simplex,
objective_values,
worst_index,
reflected,
objective_at_reflected,
face_centroid,
expansion):
"""Creates the condition function pair for an expansion."""
def _expand_and_maybe_replace():
"""Performs the expansion step."""
expanded = face_centroid + expansion * (reflected - face_centroid)
expanded_objective_value = objective_function(expanded)
expanded_is_better = (expanded_objective_value <
objective_at_reflected)
accept_expanded_fn = lambda: (expanded, expanded_objective_value)
accept_reflected_fn = lambda: (reflected, objective_at_reflected)
next_pt, next_objective_value = ps.cond(
expanded_is_better, accept_expanded_fn, accept_reflected_fn)
next_simplex = _replace_at_index(simplex, worst_index, next_pt)
next_objective_at_simplex = _replace_at_index(objective_values,
worst_index,
next_objective_value)
return False, next_simplex, next_objective_at_simplex, np.int32(1)
return _expand_and_maybe_replace
def _outside_contraction_fn(objective_function,
simplex,
objective_values,
face_centroid,
best_index,
worst_index,
reflected,
objective_at_reflected,
contraction,
shrinkage,
batch_evaluate_objective):
"""Creates the condition function pair for an outside contraction."""
def _contraction():
"""Performs a contraction."""
contracted = face_centroid + contraction * (reflected - face_centroid)
objective_at_contracted = objective_function(contracted)
is_contracted_acceptable = objective_at_contracted <= objective_at_reflected
def _accept_contraction():
next_simplex = _replace_at_index(simplex, worst_index, contracted)
objective_at_next_simplex = _replace_at_index(
objective_values,
worst_index,
objective_at_contracted)
return (False,
next_simplex,
objective_at_next_simplex,
np.int32(1))
def _reject_contraction():
return _shrink_towards_best(objective_function,
simplex,
best_index,
shrinkage,
batch_evaluate_objective)
return ps.cond(is_contracted_acceptable,
_accept_contraction,
_reject_contraction)
return _contraction
def _inside_contraction_fn(objective_function,
simplex,
objective_values,
face_centroid,
best_index,
worst_index,
worst_objective_value,
contraction,
shrinkage,
batch_evaluate_objective):
"""Creates the condition function pair for an inside contraction."""
def _contraction():
"""Performs a contraction."""
contracted = face_centroid - contraction * (face_centroid -
simplex[worst_index])
objective_at_contracted = objective_function(contracted)
is_contracted_acceptable = objective_at_contracted <= worst_objective_value
def _accept_contraction():
next_simplex = _replace_at_index(simplex, worst_index, contracted)
objective_at_next_simplex = _replace_at_index(
objective_values,
worst_index,
objective_at_contracted)
return (
False,
next_simplex,
objective_at_next_simplex,
np.int32(1)
)
def _reject_contraction():
return _shrink_towards_best(objective_function, simplex, best_index,
shrinkage, batch_evaluate_objective)
return ps.cond(is_contracted_acceptable,
_accept_contraction,
_reject_contraction)
return _contraction
def _shrink_towards_best(objective_function,
simplex,
best_index,
shrinkage,
batch_evaluate_objective):
"""Shrinks the simplex around the best vertex."""
# If the contraction step fails to improve the average objective enough,
# the simplex is shrunk towards the best vertex.
best_vertex = simplex[best_index]
shrunk_simplex = best_vertex + shrinkage * (simplex - best_vertex)
objective_at_shrunk_simplex, evals = _evaluate_objective_multiple(
objective_function,
shrunk_simplex,
batch_evaluate_objective)
return (False,
shrunk_simplex,
objective_at_shrunk_simplex,
evals)
def _replace_at_index(x, index, replacement):
"""Replaces an element at supplied index."""
return tf.tensor_scatter_nd_update(x, [[index]], [replacement])
def _check_convergence(simplex,
best_vertex,
best_objective,
worst_objective,
func_tolerance,
position_tolerance):
"""Returns True if the simplex has converged.
If the simplex size is smaller than the `position_tolerance` or the variation
of the function value over the vertices of the simplex is smaller than the
`func_tolerance` return True else False.
Args:
simplex: `Tensor` of real dtype. The simplex to test for convergence. For
more details, see the docstring for `initial_simplex` argument
of `minimize`.
best_vertex: `Tensor` of real dtype and rank one less than `simplex`. The
vertex with the best (i.e. smallest) objective value.
best_objective: Scalar `Tensor` of real dtype. The best (i.e. smallest)
value of the objective function at a vertex.
worst_objective: Scalar `Tensor` of same dtype as `best_objective`. The
worst (i.e. largest) value of the objective function at a vertex.
func_tolerance: Scalar positive `Tensor`. The tolerance for the variation
of the objective function value over the simplex. If the variation over
the simplex vertices is below this threshold, convergence is True.
position_tolerance: Scalar positive `Tensor`. The algorithm stops if the
lengths (under the supremum norm) of edges connecting to the best vertex
are below this threshold.
Returns:
has_converged: A scalar boolean `Tensor` indicating whether the algorithm
is deemed to have converged.
"""
objective_convergence = tf.abs(worst_objective -
best_objective) < func_tolerance
simplex_degeneracy = tf.reduce_max(
tf.abs(simplex - best_vertex)) < position_tolerance
return objective_convergence | simplex_degeneracy
def _prepare_args(objective_function,
initial_simplex,
initial_vertex,
step_sizes,
objective_at_initial_simplex,
objective_at_initial_vertex,
batch_evaluate_objective):
"""Computes the initial simplex and the objective values at the simplex.
Args:
objective_function: A Python callable that accepts a point as a
real `Tensor` and returns a `Tensor` of real dtype containing
the value of the function at that point. The function
to be evaluated at the simplex. If `batch_evaluate_objective` is `True`,
the callable may be evaluated on a `Tensor` of shape `[n+1] + s `
where `n` is the dimension of the problem and `s` is the shape of a
single point in the domain (so `n` is the size of a `Tensor`
representing a single point).
In this case, the expected return value is a `Tensor` of shape `[n+1]`.
initial_simplex: None or `Tensor` of real dtype. The initial simplex to
start the search. If supplied, should be a `Tensor` of shape `[n+1] + s`
where `n` is the dimension of the problem and `s` is the shape of a
single point in the domain. Each row (i.e. the `Tensor` with a given
value of the first index) is interpreted as a vertex of a simplex and
hence the rows must be affinely independent. If not supplied, an axes
aligned simplex is constructed using the `initial_vertex` and
`step_sizes`. Only one and at least one of `initial_simplex` and
`initial_vertex` must be supplied.
initial_vertex: None or `Tensor` of real dtype and any shape that can
be consumed by the `objective_function`. A single point in the domain that
will be used to construct an axes aligned initial simplex.
step_sizes: None or `Tensor` of real dtype and shape broadcasting
compatible with `initial_vertex`. Supplies the simplex scale along each
axes. Only used if `initial_simplex` is not supplied. See the docstring
of `minimize` for more details.
objective_at_initial_simplex: None or rank `1` `Tensor` of real dtype.
The value of the objective function at the initial simplex.
May be supplied only if `initial_simplex` is
supplied. If not supplied, it will be computed.
objective_at_initial_vertex: None or scalar `Tensor` of real dtype. The
value of the objective function at the initial vertex. May be supplied
only if the `initial_vertex` is also supplied.
batch_evaluate_objective: Python `bool`. If True, the objective function
will be evaluated on all the vertices of the simplex packed into a
single tensor. If False, the objective will be mapped across each
vertex separately.
Returns:
prepared_args: A tuple containing the following elements:
dimension: Scalar `Tensor` of `int32` dtype. The dimension of the problem
as inferred from the supplied arguments.
num_vertices: Scalar `Tensor` of `int32` dtype. The number of vertices
in the simplex.
simplex: A `Tensor` of same dtype as `initial_simplex`
(or `initial_vertex`). The first component of the shape of the
`Tensor` is `num_vertices` and each element represents a vertex of
the simplex.
objective_at_simplex: A `Tensor` of same dtype as the dtype of the
return value of objective_function. The shape is a vector of size
`num_vertices`. The objective function evaluated at the simplex.
num_evaluations: An `int32` scalar `Tensor`. The number of points on
which the objective function was evaluated.
Raises:
ValueError: If any of the following conditions hold
1. If none or more than one of `initial_simplex` and `initial_vertex` are
supplied.
2. If `initial_simplex` and `step_sizes` are both specified.
"""
if objective_at_initial_simplex is not None and initial_simplex is None:
raise ValueError('`objective_at_initial_simplex` specified but the'
'`initial_simplex` was not.')
if objective_at_initial_vertex is not None and initial_vertex is None:
raise ValueError('`objective_at_initial_vertex` specified but the'
'`initial_vertex` was not.')
# The full simplex was specified.
if initial_simplex is not None:
if initial_vertex is not None:
raise ValueError('Both `initial_simplex` and `initial_vertex` specified.'
' Only one of the two should be specified.')
if step_sizes is not None:
raise ValueError('`step_sizes` must not be specified when an'
' `initial_simplex` has been specified.')
return _prepare_args_with_initial_simplex(objective_function,
initial_simplex,
objective_at_initial_simplex,
batch_evaluate_objective)
if initial_vertex is None:
raise ValueError('One of `initial_simplex` or `initial_vertex`'
' must be supplied')
if step_sizes is None:
step_sizes = _default_step_sizes(initial_vertex)
return _prepare_args_with_initial_vertex(objective_function,
initial_vertex,
step_sizes,
objective_at_initial_vertex,
batch_evaluate_objective)
def _default_step_sizes(reference_vertex):
"""Chooses default step sizes according to [Gao and Han(2010)][3]."""
# Step size to choose when the coordinate is zero.
small_sizes = dtype_util.as_numpy_dtype(reference_vertex.dtype)(0.00025)
# Step size to choose when the coordinate is non-zero.
large_sizes = reference_vertex * 0.05
return tf.where(
tf.abs(reference_vertex) < _EPSILON, small_sizes, large_sizes)
def _prepare_args_with_initial_simplex(objective_function,
initial_simplex,
objective_at_initial_simplex,
batch_evaluate_objective):
"""Evaluates the objective function at the specified initial simplex."""
initial_simplex = tf.convert_to_tensor(initial_simplex)
# If d is the dimension of the problem, the number of vertices in the
# simplex should be d+1. From this, we can infer the number of dimensions
# as n - 1 where n is the number of vertices specified.
num_vertices = tf.shape(initial_simplex)[0]
dim = num_vertices - 1
num_evaluations = np.int32(0)
if objective_at_initial_simplex is None:
objective_at_initial_simplex, n_evals = _evaluate_objective_multiple(
objective_function, initial_simplex, batch_evaluate_objective)
num_evaluations += n_evals
objective_at_initial_simplex = tf.convert_to_tensor(
objective_at_initial_simplex)
return (dim,
num_vertices,
initial_simplex,
objective_at_initial_simplex,
num_evaluations)
def _prepare_args_with_initial_vertex(objective_function,
initial_vertex,
step_sizes,
objective_at_initial_vertex,
batch_evaluate_objective):
"""Constructs a standard axes aligned simplex."""
dim = ps.size(initial_vertex)
# tf.eye complains about np.array(.., np.int32) num_rows, only welcomes numpy
# scalars. TODO(b/162529062): Remove the following line.
dim = dim if tf.is_tensor(dim) else int(dim)
num_vertices = dim + 1
unit_vectors_along_axes = tf.reshape(
tf.eye(dim, dim, dtype=dtype_util.base_dtype(initial_vertex.dtype)),
ps.concat([[dim], ps.shape(initial_vertex)], axis=0))
# If step_sizes does not broadcast to initial_vertex, the multiplication
# in the second term will fail.
simplex_face = initial_vertex + step_sizes * unit_vectors_along_axes
simplex = tf.concat([tf.expand_dims(initial_vertex, axis=0),
simplex_face], axis=0)
# Evaluate the objective function at the simplex vertices.
if objective_at_initial_vertex is None:
objective_at_simplex, num_evaluations = _evaluate_objective_multiple(
objective_function, simplex, batch_evaluate_objective)
else:
objective_at_simplex_face, num_evaluations = _evaluate_objective_multiple(
objective_function, simplex_face, batch_evaluate_objective)
objective_at_simplex = tf.concat(
[
tf.expand_dims(objective_at_initial_vertex, axis=0),
objective_at_simplex_face
], axis=0)
return (dim,
num_vertices,
simplex,
objective_at_simplex,
num_evaluations)
def _resolve_parameters(dim,
reflection,
expansion,
contraction,
shrinkage,
dtype):
"""Applies the [Gao and Han][3] presciption to the unspecified parameters."""
dim = tf.cast(dim, dtype=dtype)
reflection = 1. if reflection is None else reflection
expansion = (1. + 2. / dim) if expansion is None else expansion
contraction = (0.75 - 1. / (2 * dim)) if contraction is None else contraction
shrinkage = (1. - 1. / dim) if shrinkage is None else shrinkage
return reflection, expansion, contraction, shrinkage
def _evaluate_objective_multiple(objective_function, arg_batch,
batch_evaluate_objective):
"""Evaluates the objective function on a batch of points.
If `batch_evaluate_objective` is True, returns
`objective function(arg_batch)` else it maps the `objective_function`
across the `arg_batch`.
Args:
objective_function: A Python callable that accepts a single `Tensor` of
rank 'R > 1' and any shape 's' and returns a scalar `Tensor` of real dtype
containing the value of the function at that point. If
`batch a `Tensor` of shape `[batch_size] + s ` where `batch_size` is the
size of the batch of args. In this case, the expected return value is a
`Tensor` of shape `[batch_size]`.
arg_batch: A `Tensor` of real dtype. The batch of arguments at which to
evaluate the `objective_function`. If `batch_evaluate_objective` is False,
`arg_batch` will be unpacked along the zeroth axis and the
`objective_function` will be applied to each element.
batch_evaluate_objective: `bool`. Whether the `objective_function` can
evaluate a batch of arguments at once.
Returns:
A tuple containing:
objective_values: A `Tensor` of real dtype and shape `[batch_size]`.
The value of the objective function evaluated at the supplied
`arg_batch`.
num_evaluations: An `int32` scalar `Tensor`containing the number of
points on which the objective function was evaluated (i.e `batch_size`).
"""
n_points = tf.shape(arg_batch)[0]
if batch_evaluate_objective:
return objective_function(arg_batch), n_points
return tf.map_fn(objective_function, arg_batch), n_points
|
tensorflow/probability
|
tensorflow_probability/python/optimizer/nelder_mead.py
|
Python
|
apache-2.0
| 40,382
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement resource abstraction for Course-level items."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import cgi
import yaml
import models
import courses
import messages
from common import resource
from common import safe_dom
from common import schema_fields
from common import tags
from common import utils as common_utils
from models import services
from tools import verify
DRAFT_TEXT = messages.DRAFT_TEXT
PUBLISHED_TEXT = messages.PUBLISHED_TEXT
# Allowed graders. Keys of this dict represent internal keys for the grader
# type, and the value represents the corresponding string that will appear in
# the dashboard UI.
AUTO_GRADER_NAME = 'Automatic grading'
HUMAN_GRADER_NAME = 'Peer review'
ALLOWED_GRADERS_NAMES = {
courses.AUTO_GRADER: AUTO_GRADER_NAME,
courses.HUMAN_GRADER: HUMAN_GRADER_NAME,
}
# When expanding GCB tags within questions, these tags may not be used
# (so as to forestall infinite recursion)
TAGS_EXCLUDED_FROM_QUESTIONS = set(
['question', 'question-group', 'gcb-questionnaire', 'text-file-upload-tag'])
class SaQuestionConstants(object):
DEFAULT_WIDTH_COLUMNS = 100
DEFAULT_HEIGHT_ROWS = 1
class ResourceQuestionBase(resource.AbstractResourceHandler):
TYPE_MC_QUESTION = 'question_mc'
TYPE_SA_QUESTION = 'question_sa'
@classmethod
def get_question_key_type(cls, qu):
"""Utility to convert between question type codes."""
if qu.type == models.QuestionDTO.MULTIPLE_CHOICE:
return cls.TYPE_MC_QUESTION
elif qu.type == models.QuestionDTO.SHORT_ANSWER:
return cls.TYPE_SA_QUESTION
else:
raise ValueError('Unknown question type: %s' % qu.type)
@classmethod
def get_resource(cls, course, key):
return models.QuestionDAO.load(key)
@classmethod
def get_resource_title(cls, rsrc):
return rsrc.description
@classmethod
def get_data_dict(cls, course, key):
return cls.get_resource(course, key).dict
@classmethod
def get_view_url(cls, rsrc):
return None
@classmethod
def get_edit_url(cls, key):
return 'dashboard?action=edit_question&key=%s' % key
@classmethod
def _add_html_field_to(
cls, registry, name, label, class_name, supportCustomTags,
description=None, optional=True):
registry.add_property(schema_fields.SchemaField(
name, label, 'html', optional=optional,
extra_schema_dict_values={
'supportCustomTags': supportCustomTags,
'excludedCustomTags': TAGS_EXCLUDED_FROM_QUESTIONS,
'rteButtonSet': 'reduced',
'className': class_name},
description=description))
class ResourceSAQuestion(ResourceQuestionBase):
TYPE = ResourceQuestionBase.TYPE_SA_QUESTION
GRADER_TYPES = [
('case_insensitive', 'Case insensitive string match'),
('regex', 'Regular expression'),
('numeric', 'Numeric')]
@classmethod
def get_schema(cls, course, key, forbidCustomTags=False):
"""Get the InputEx schema for the short answer question editor."""
supportCustomTags = (
not forbidCustomTags and tags.CAN_USE_DYNAMIC_TAGS.value)
sa_question = schema_fields.FieldRegistry(
'Short Answer Question',
description='short answer question',
extra_schema_dict_values={'className': 'sa-container'})
sa_question.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
cls._add_html_field_to(
sa_question, 'question', 'Question', 'sa-question',
supportCustomTags, optional=False)
cls._add_html_field_to(
sa_question, 'hint', 'Hint', 'sa-hint', supportCustomTags,
description=messages.SHORT_ANSWER_HINT_DESCRIPTION)
cls._add_html_field_to(
sa_question, 'defaultFeedback', 'Feedback', 'sa-feedback',
supportCustomTags,
description=messages.INCORRECT_ANSWER_FEEDBACK)
sa_question.add_property(schema_fields.SchemaField(
'rows', 'Rows', 'string', optional=True, i18n=False,
extra_schema_dict_values={
'className': 'sa-rows',
'value': SaQuestionConstants.DEFAULT_HEIGHT_ROWS
},
description=messages.INPUT_FIELD_HEIGHT_DESCRIPTION))
sa_question.add_property(schema_fields.SchemaField(
'columns', 'Columns', 'string', optional=True, i18n=False,
extra_schema_dict_values={
'className': 'sa-columns',
'value': SaQuestionConstants.DEFAULT_WIDTH_COLUMNS
},
description=messages.INPUT_FIELD_WIDTH_DESCRIPTION))
grader_type = schema_fields.FieldRegistry(
'Answer',
extra_schema_dict_values={'className': 'sa-grader'})
grader_type.add_property(schema_fields.SchemaField(
'score', 'Score', 'string',
description=messages.SHORT_ANSWER_SCORE_DESCRIPTION,
extra_schema_dict_values={
'className': 'sa-grader-score',
'value': '1.0',
}, i18n=False))
grader_type.add_property(schema_fields.SchemaField(
'matcher', 'Type', 'string',
description=messages.SHORT_ANSWER_TYPE_DESCRIPTION,
extra_schema_dict_values={'className': 'sa-grader-score'},
i18n=False, optional=True, select_data=cls.GRADER_TYPES))
grader_type.add_property(schema_fields.SchemaField(
'response', 'Answer', 'string',
description=messages.SHORT_ANSWER_ANSWER_DESCRIPTION,
extra_schema_dict_values={
'className': 'inputEx-Field sa-grader-text'},
optional=False))
cls._add_html_field_to(
grader_type, 'feedback', 'Feedback', 'sa-grader-feedback',
supportCustomTags,
description=messages.SHORT_ANSWER_FEEDBACK_DESCRIPTION)
graders_array = schema_fields.FieldArray(
'graders', '', item_type=grader_type,
extra_schema_dict_values={
'className': 'sa-grader-container',
'listAddLabel': 'Add an answer',
'listRemoveLabel': 'Delete this answer'},
optional=True)
sa_question.add_property(graders_array)
sa_question.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=False,
extra_schema_dict_values={
'className': 'inputEx-Field sa-description'},
description=messages.QUESTION_DESCRIPTION))
return sa_question
class ResourceMCQuestion(ResourceQuestionBase):
TYPE = ResourceQuestionBase.TYPE_MC_QUESTION
@classmethod
def get_schema(cls, course, key, forbidCustomTags=False):
"""Get the InputEx schema for the multiple choice question editor."""
supportCustomTags = (
not forbidCustomTags and tags.CAN_USE_DYNAMIC_TAGS.value)
mc_question = schema_fields.FieldRegistry(
'Multiple Choice Question',
description='multiple choice question',
extra_schema_dict_values={'className': 'mc-container'})
mc_question.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
cls._add_html_field_to(
mc_question, 'question', 'Question', 'mc-question',
supportCustomTags, optional=False)
cls._add_html_field_to(
mc_question, 'defaultFeedback', 'Feedback', 'mc-question',
supportCustomTags,
description=messages.MULTIPLE_CHOICE_FEEDBACK_DESCRIPTION)
mc_question.add_property(schema_fields.SchemaField(
'permute_choices', 'Randomize Choices', 'boolean',
description=messages.MULTIPLE_CHOICE_RANDOMIZE_CHOICES_DESCRIPTION,
extra_schema_dict_values={'className': 'mc-bool-option'},
optional=True))
mc_question.add_property(schema_fields.SchemaField(
'all_or_nothing_grading', 'All or Nothing', 'boolean',
optional=True, description='Disallow partial credit. Assign a '
'score of 0.0 to a question unless its raw score is 1.0.',
extra_schema_dict_values={'className': 'mc-bool-option'}))
mc_question.add_property(schema_fields.SchemaField(
'show_answer_when_incorrect', 'Display Correct', 'boolean',
optional=True, description='Display the correct choice if '
'answer is incorrect.',
extra_schema_dict_values={'className': 'mc-bool-option'}))
mc_question.add_property(schema_fields.SchemaField(
'multiple_selections', 'Selection', 'boolean',
optional=True,
select_data=[
(False, 'Allow only one selection'),
(True, 'Allow multiple selections')],
extra_schema_dict_values={
'_type': 'radio',
'className': 'mc-selection'}))
choice_type = schema_fields.FieldRegistry(
'Choice',
extra_schema_dict_values={'className': 'mc-choice'})
choice_type.add_property(schema_fields.SchemaField(
'score', 'Score', 'string', optional=True, i18n=False,
extra_schema_dict_values={
'className': 'mc-choice-score', 'value': '0'}))
cls._add_html_field_to(
choice_type, 'text', 'Text', 'mc-choice-text', supportCustomTags)
cls._add_html_field_to(
choice_type, 'feedback', 'Feedback', 'mc-choice-feedback',
supportCustomTags,
description=messages.MULTIPLE_CHOICE_CHOICE_FEEDBACK_DESCRIPTION)
choices_array = schema_fields.FieldArray(
'choices', None, item_type=choice_type,
extra_schema_dict_values={
'className': 'mc-choice-container',
'listAddLabel': 'Add a choice',
'listRemoveLabel': 'Delete choice'})
mc_question.add_property(choices_array)
mc_question.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=False,
extra_schema_dict_values={
'className': 'inputEx-Field mc-description'},
description=messages.QUESTION_DESCRIPTION))
return mc_question
class ResourceQuestionGroup(resource.AbstractResourceHandler):
TYPE = 'question_group'
@classmethod
def get_resource(cls, course, key):
return models.QuestionGroupDAO.load(key)
@classmethod
def get_resource_title(cls, rsrc):
return rsrc.description
@classmethod
def get_schema(cls, course, key):
"""Return the InputEx schema for the question group editor."""
question_group = schema_fields.FieldRegistry(
'Question Group', description='question_group')
question_group.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
question_group.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True))
question_group.add_property(schema_fields.SchemaField(
'introduction', 'Introduction', 'html', optional=True))
item_type = schema_fields.FieldRegistry(
'Item',
extra_schema_dict_values={'className': 'question-group-item'})
item_type.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'number', optional=True, i18n=False,
extra_schema_dict_values={'className': 'question-group-weight'}))
question_select_data = [(q.id, q.description) for q in sorted(
models.QuestionDAO.get_all(), key=lambda x: x.description)]
item_type.add_property(schema_fields.SchemaField(
'question', 'Question', 'string', optional=True, i18n=False,
select_data=question_select_data,
extra_schema_dict_values={'className': 'question-group-question'}))
item_array_classes = 'question-group-items'
if not question_select_data:
item_array_classes += ' empty-question-list'
item_array = schema_fields.FieldArray(
'items', None, item_type=item_type,
extra_schema_dict_values={
'className': item_array_classes,
'sortable': 'true',
'listAddLabel': 'Add a question',
'listRemoveLabel': 'Remove'})
question_group.add_property(item_array)
return question_group
@classmethod
def get_data_dict(cls, course, key):
return models.QuestionGroupDAO.load(int(key)).dict
@classmethod
def get_view_url(cls, rsrc):
return None
@classmethod
def get_edit_url(cls, key):
return 'dashboard?action=edit_question_group&key=%s' % key
class ResourceCourseSettings(resource.AbstractResourceHandler):
TYPE = 'course_settings'
@classmethod
def get_resource(cls, course, key):
entire_schema = course.create_settings_schema()
return entire_schema.clone_only_items_named([key])
@classmethod
def get_resource_title(cls, rsrc):
return ' '.join([sr.title for sr in rsrc.sub_registries.itervalues()])
@classmethod
def get_schema(cls, course, key):
return cls.get_resource(course, key)
@classmethod
def get_data_dict(cls, course, key):
schema = cls.get_schema(course, key)
json_entity = {}
schema.convert_entity_to_json_entity(
course.get_environ(course.app_context), json_entity)
return json_entity[key]
@classmethod
def get_view_url(cls, rsrc):
return None
@classmethod
def get_edit_url(cls, key):
action = 'settings_{}'.format(key)
return 'dashboard?action={}'.format(action)
def workflow_key(key):
return 'workflow:%s' % key
class LabelGroupsHelper(object):
"""Various methods that make it easier to attach labels to objects."""
LABELS_FIELD_NAME = 'labels'
TRACKS_FIELD_NAME = 'tracks'
LOCALES_FIELD_NAME = 'locales'
FIELDS = [
{
'name': LABELS_FIELD_NAME,
'label': 'Labels',
'description':
'The {content} is tagged with these labels for your reference.',
'type_id': models.LabelDTO.LABEL_TYPE_GENERAL,
},
{
'name': TRACKS_FIELD_NAME,
'label': 'Tracks',
'description':
'The {content} is part of these tracks. If none are selected, '
'it will be part of all tracks.',
'type_id': models.LabelDTO.LABEL_TYPE_COURSE_TRACK,
'topic_id': 'labels:%s' % TRACKS_FIELD_NAME,
},
{
'name': LOCALES_FIELD_NAME,
'label': 'Languages',
'description':
'The {content} is available in these languages, in addition to '
'the base language.',
'type_id': models.LabelDTO.LABEL_TYPE_LOCALE,
},
]
@classmethod
def add_labels_schema_fields(cls, schema, type_name, exclude_types=None):
"""Creates multiple form fields for choosing labels"""
if exclude_types is None:
exclude_types = []
for field in cls.FIELDS:
if field['name'] not in exclude_types:
description = field['description'].format(content=type_name)
topic_id = field.get('topic_id')
if topic_id:
description = services.help_urls.make_learn_more_message(
description, topic_id)
schema.add_property(schema_fields.FieldArray(
field['name'], field['label'], description=str(description),
extra_schema_dict_values={
'_type': 'checkbox-list',
'noItemsHideField': True,
},
item_type=schema_fields.SchemaField(None, None, 'string',
extra_schema_dict_values={'_type': 'boolean'},
i18n=False),
optional=True,
select_data=cls._labels_to_choices(field['type_id'])))
@classmethod
def _labels_to_choices(cls, label_type):
"""Produces select_data for a label type"""
return [(label.id, label.title) for label in sorted(
models.LabelDAO.get_all_of_type(label_type),
key=lambda label: label.title)]
@classmethod
def remove_label_field_data(cls, data):
"""Deletes label field data from a payload"""
for field in cls.FIELDS:
del data[field['name']]
@classmethod
def field_data_to_labels(cls, data):
"""Collects chosen labels from all fields into a single set"""
labels = set()
for field in cls.FIELDS:
if field['name'] in data:
labels |= set(data[field['name']])
return labels
@classmethod
def _filter_labels(cls, labels, label_type):
"""Filters chosen labels by a given type"""
return [label.id for label in sorted(
models.LabelDAO.get_all_of_type(label_type),
key=lambda label: label.title)
if str(label.id) in labels]
@classmethod
def labels_to_field_data(cls, labels, exclude_types=None):
"""Filters chosen labels by type into data for multiple fields"""
if exclude_types is None:
exclude_types = []
return {
field['name']: cls._filter_labels(labels, field['type_id'])
for field in cls.FIELDS
if not field['name'] in exclude_types}
class UnitTools(object):
def __init__(self, course):
self._course = course
def apply_updates(self, unit, updated_unit_dict, errors):
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
self._apply_updates_to_assessment(unit, updated_unit_dict, errors)
elif unit.type == verify.UNIT_TYPE_LINK:
self._apply_updates_to_link(unit, updated_unit_dict, errors)
elif unit.type == verify.UNIT_TYPE_UNIT:
self._apply_updates_to_unit(unit, updated_unit_dict, errors)
else:
raise ValueError('Unknown unit type %s' % unit.type)
def _apply_updates_common(self, unit, updated_unit_dict, errors):
"""Apply changes common to all unit types."""
if 'title' in updated_unit_dict:
unit.title = updated_unit_dict['title']
if 'description' in updated_unit_dict:
unit.description = updated_unit_dict['description']
labels = LabelGroupsHelper.field_data_to_labels(updated_unit_dict)
if labels and self._course.get_parent_unit(unit.unit_id):
track_label_ids = models.LabelDAO.get_set_of_ids_of_type(
models.LabelDTO.LABEL_TYPE_COURSE_TRACK)
if track_label_ids.intersection(labels):
errors.append('Cannot set track labels on entities which '
'are used within other units.')
unit.labels = common_utils.list_to_text(labels)
def _apply_updates_to_assessment(self, unit, updated_unit_dict, errors):
"""Store the updated assessment."""
entity_dict = {}
ResourceAssessment.get_schema(
self._course, unit.unit_id).convert_json_to_entity(
updated_unit_dict, entity_dict)
self._apply_updates_common(unit, entity_dict, errors)
if 'weight' in entity_dict:
try:
unit.weight = float(entity_dict['weight'])
if unit.weight < 0:
errors.append('The weight must be a non-negative integer.')
except ValueError:
errors.append('The weight must be an integer.')
if 'content' in entity_dict:
content = entity_dict['content']
if content:
self._course.set_assessment_content(
unit, content, errors=errors)
if 'html_content' in entity_dict:
unit.html_content = entity_dict['html_content']
if 'html_check_answers' in entity_dict:
unit.html_check_answers = entity_dict['html_check_answers']
if 'workflow' in entity_dict:
workflow_dict = entity_dict['workflow']
def convert_date(key):
due_date = workflow_dict.get(key)
if due_date:
workflow_dict[key] = due_date.strftime(
courses.ISO_8601_DATE_FORMAT)
convert_date(courses.SUBMISSION_DUE_DATE_KEY)
convert_date(courses.REVIEW_DUE_DATE_KEY)
if len(courses.ALLOWED_MATCHERS_NAMES) == 1:
workflow_dict[courses.MATCHER_KEY] = (
courses.ALLOWED_MATCHERS_NAMES.keys()[0])
unit.workflow_yaml = yaml.safe_dump(workflow_dict)
unit.workflow.validate(errors=errors)
# Only save the review form if the assessment needs human grading.
if not errors:
if self._course.needs_human_grader(unit):
if 'review_form' in entity_dict:
review_form = entity_dict['review_form']
if review_form:
self._course.set_review_form(
unit, review_form, errors=errors)
if 'html_review_form' in entity_dict:
unit.html_review_form = entity_dict['html_review_form']
elif entity_dict.get('review_form'):
errors.append(
'Review forms for auto-graded assessments should be empty.')
def _apply_updates_to_link(self, unit, updated_unit_dict, errors):
self._apply_updates_common(unit, updated_unit_dict, errors)
if 'url' in updated_unit_dict:
unit.href = updated_unit_dict['url']
def _is_assessment_unused(self, unit, assessment, errors):
parent_unit = self._course.get_parent_unit(assessment.unit_id)
if parent_unit and parent_unit.unit_id != unit.unit_id:
errors.append(
'Assessment "%s" is already associated to unit "%s"' % (
assessment.title, parent_unit.title))
return False
return True
def _is_assessment_version_ok(self, assessment, errors):
# Here, we want to establish that the display model for the
# assessment is compatible with the assessment being used in
# the context of a Unit. Model version 1.4 is not, because
# the way sets up submission is to build an entirely new form
# from JavaScript (independent of the form used to display the
# assessment), and the way it learns the ID of the assessment
# is by looking in the URL (as opposed to taking a parameter).
# This is incompatible with the URLs for unit display, so we
# just disallow older assessments here.
model_version = self._course.get_assessment_model_version(assessment)
if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:
errors.append(
'The version of assessment "%s" ' % assessment.title +
'is not compatible with use as a pre/post unit element')
return False
return True
def _is_assessment_on_track(self, assessment, errors):
if self._course.get_unit_track_labels(assessment):
errors.append(
'Assessment "%s" has track labels, ' % assessment.title +
'so it cannot be used as a pre/post unit element')
return True
return False
def _apply_updates_to_unit(self, unit, updated_unit_dict, errors):
self._apply_updates_common(unit, updated_unit_dict, errors)
if 'unit_header' in updated_unit_dict:
unit.unit_header = updated_unit_dict['unit_header']
if 'unit_footer' in updated_unit_dict:
unit.unit_footer = updated_unit_dict['unit_footer']
if 'manual_progress' in updated_unit_dict:
unit.manual_progress = updated_unit_dict['manual_progress']
if 'pre_assessment' in updated_unit_dict:
unit.pre_assessment = None
pre_assessment_id = updated_unit_dict['pre_assessment']
if pre_assessment_id >= 0:
assessment = self._course.find_unit_by_id(pre_assessment_id)
if (self._is_assessment_unused(unit, assessment, errors) and
self._is_assessment_version_ok(assessment, errors) and
not self._is_assessment_on_track(assessment, errors)):
unit.pre_assessment = pre_assessment_id
if 'post_assessment' in updated_unit_dict:
unit.post_assessment = None
post_assessment_id = updated_unit_dict['post_assessment']
if (post_assessment_id >= 0 and
pre_assessment_id == post_assessment_id):
errors.append(
'The same assessment cannot be used as both the pre '
'and post assessment of a unit.')
elif post_assessment_id >= 0:
assessment = self._course.find_unit_by_id(post_assessment_id)
if (assessment and
self._is_assessment_unused(unit, assessment, errors) and
self._is_assessment_version_ok(assessment, errors) and
not self._is_assessment_on_track(assessment, errors)):
unit.post_assessment = post_assessment_id
if 'show_contents_on_one_page' in updated_unit_dict:
unit.show_contents_on_one_page = (
updated_unit_dict['show_contents_on_one_page'])
def unit_to_dict(self, unit, keys=None):
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
return self._assessment_to_dict(unit, keys=keys)
elif unit.type == verify.UNIT_TYPE_LINK:
return self._link_to_dict(unit)
elif unit.type == verify.UNIT_TYPE_UNIT:
return self._unit_to_dict(unit)
else:
raise ValueError('Unknown unit type %s' % unit.type)
def _unit_to_dict_common(self, unit):
data = {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'description': unit.description or '',
}
exclude_types = []
if self._course.get_parent_unit(unit.unit_id):
exclude_types.append(LabelGroupsHelper.TRACKS_FIELD_NAME)
data.update(LabelGroupsHelper.labels_to_field_data(
common_utils.text_to_list(unit.labels),
exclude_types=exclude_types))
return data
def _get_assessment_path(self, unit):
return self._course.app_context.fs.impl.physical_to_logical(
self._course.get_assessment_filename(unit.unit_id))
def _get_review_form_path(self, unit):
return self._course.app_context.fs.impl.physical_to_logical(
self._course.get_review_filename(unit.unit_id))
def _assessment_to_dict(self, unit, keys=None):
"""Assemble a dict with the unit data fields."""
assert unit.type == 'A'
content = None
if keys is not None and 'content' in keys:
path = self._get_assessment_path(unit)
fs = self._course.app_context.fs
if fs.isfile(path):
content = fs.get(path)
else:
content = ''
review_form = None
if keys is not None and 'review_form' in keys:
review_form_path = self._get_review_form_path(unit)
if review_form_path and fs.isfile(review_form_path):
review_form = fs.get(review_form_path)
else:
review_form = ''
workflow = unit.workflow
if workflow.get_submission_due_date():
submission_due_date = workflow.get_submission_due_date()
else:
submission_due_date = None
if workflow.get_review_due_date():
review_due_date = workflow.get_review_due_date()
else:
review_due_date = None
unit_common = self._unit_to_dict_common(unit)
unit_common.update({
'weight': str(unit.weight if hasattr(unit, 'weight') else 0),
'content': content,
'html_content': (
'' if unit.is_old_style_assessment(self._course)
else unit.html_content),
'html_check_answers': (
False if unit.is_old_style_assessment(self._course)
else unit.html_check_answers),
workflow_key(courses.SINGLE_SUBMISSION_KEY): (
workflow.is_single_submission()),
workflow_key(courses.SUBMISSION_DUE_DATE_KEY): (
submission_due_date),
workflow_key(courses.SHOW_FEEDBACK_KEY): (
workflow.show_feedback()),
workflow_key(courses.GRADER_KEY): workflow.get_grader(),
})
return {
'assessment': unit_common,
'review_opts': {
workflow_key(courses.MATCHER_KEY): workflow.get_matcher(),
workflow_key(courses.REVIEW_DUE_DATE_KEY): review_due_date,
workflow_key(courses.REVIEW_MIN_COUNT_KEY): (
workflow.get_review_min_count()),
workflow_key(courses.REVIEW_WINDOW_MINS_KEY): (
workflow.get_review_window_mins()),
'review_form': review_form,
'html_review_form': (
unit.html_review_form or ''
if hasattr(unit, 'html_review_form') else ''),
}
}
def _link_to_dict(self, unit):
assert unit.type == 'O'
ret = self._unit_to_dict_common(unit)
ret['url'] = unit.href
return ret
def _unit_to_dict(self, unit):
assert unit.type == 'U'
ret = self._unit_to_dict_common(unit)
ret['unit_header'] = unit.unit_header or ''
ret['unit_footer'] = unit.unit_footer or ''
ret['pre_assessment'] = unit.pre_assessment or -1
ret['post_assessment'] = unit.post_assessment or -1
ret['show_contents_on_one_page'] = (
unit.show_contents_on_one_page or False)
ret['manual_progress'] = unit.manual_progress or False
return ret
class ResourceUnitBase(resource.AbstractResourceHandler):
ASSESSMENT_TYPE = 'assessment'
UNIT_TYPE = 'unit'
LINK_TYPE = 'link'
# These default values can be overridden by class-scoped constants in
# specific derived classes.
TITLE_DESCRIPTION = messages.UNIT_TITLE_DESCRIPTION
DESCRIPTION_DESCRIPTION = messages.UNIT_DESCRIPTION_DESCRIPTION
@classmethod
def key_for_unit(cls, unit, course=None):
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
unit_type = cls.ASSESSMENT_TYPE
elif unit.type == verify.UNIT_TYPE_LINK:
unit_type = cls.LINK_TYPE
elif unit.type == verify.UNIT_TYPE_UNIT:
unit_type = cls.UNIT_TYPE
else:
raise ValueError('Unknown unit type: %s' % unit.type)
return resource.Key(unit_type, unit.unit_id, course=course)
@classmethod
def get_resource(cls, course, key):
return course.find_unit_by_id(key)
@classmethod
def get_resource_title(cls, rsrc):
return rsrc.title
@classmethod
def get_data_dict(cls, course, key):
unit = course.find_unit_by_id(key)
return UnitTools(course).unit_to_dict(unit)
@classmethod
def _generate_common_schema(
cls, title, hidden_header=False, exclude_fields=None):
group_class_name = 'inputEx-Group new-form-layout'
if hidden_header:
group_class_name += ' hidden-header'
ret = schema_fields.FieldRegistry(title, extra_schema_dict_values={
'className': group_class_name})
ret.add_property(schema_fields.SchemaField(
'key', 'ID', 'string', editable=False,
extra_schema_dict_values={'className': 'inputEx-Field keyHolder'},
hidden=True))
ret.add_property(schema_fields.SchemaField(
'type', 'Type', 'string', editable=False, hidden=True))
ret.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description=cls.TITLE_DESCRIPTION, optional=False))
ret.add_property(schema_fields.SchemaField(
'description', 'Description', 'string',
description=cls.DESCRIPTION_DESCRIPTION, optional=True))
return ret
class ResourceUnit(ResourceUnitBase):
TYPE = ResourceUnitBase.UNIT_TYPE
@classmethod
def get_schema(cls, course, key):
schema = cls._generate_common_schema('Unit')
LabelGroupsHelper.add_labels_schema_fields(schema, 'unit')
schema.add_property(schema_fields.SchemaField(
'pre_assessment', 'Pre-Assessment', 'integer', optional=True,
description=messages.UNIT_PRE_ASSESSMENT_DESCRIPTION))
schema.add_property(schema_fields.SchemaField(
'post_assessment', 'Post-Assessment', 'integer', optional=True,
description=messages.UNIT_POST_ASSESSMENT_DESCRIPTION))
schema.add_property(schema_fields.SchemaField(
'show_contents_on_one_page', 'Show on One Page', 'boolean',
description=messages.UNIT_SHOW_ON_ONE_PAGE_DESCRIPTION,
optional=True))
schema.add_property(schema_fields.SchemaField(
'manual_progress', 'Allow Progress Override', 'boolean',
description=services.help_urls.make_learn_more_message(
messages.UNIT_ALLOW_PROGRESS_OVERRIDE_DESCRIPTION,
'course:%s:manual_progress' % ResourceUnitBase.UNIT_TYPE),
optional=True))
schema.add_property(schema_fields.SchemaField(
'unit_header', 'Header', 'html', optional=True,
description=messages.UNIT_HEADER_DESCRIPTION,
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.DESCRIPTIVE_SCOPE,
'className': 'inputEx-Field html-content cb-editor-small'}))
schema.add_property(schema_fields.SchemaField(
'unit_footer', 'Footer', 'html', optional=True,
description=messages.UNIT_FOOTER_DESCRIPTION,
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.DESCRIPTIVE_SCOPE,
'className': 'inputEx-Field html-content cb-editor-small'}))
return schema
@classmethod
def get_view_url(cls, rsrc):
return 'unit?unit=%s' % rsrc.unit_id
@classmethod
def get_edit_url(cls, key):
return 'dashboard?action=edit_unit&key=%s' % key
class ResourceAssessment(ResourceUnitBase):
TYPE = ResourceUnitBase.ASSESSMENT_TYPE
TITLE_DESCRIPTION = messages.ASSESSMENT_TITLE_DESCRIPTION
DESCRIPTION_DESCRIPTION = messages.ASSESSMENT_DESCRIPTION_DESCRIPTION
AVAILABILITY_DESCRIPTION = messages.ASSESSMENT_AVAILABILITY_DESCRIPTION
SYLLABUS_VISIBILITY_DESCRIPTION = (
messages.ASSESSMENT_SYLLABUS_VISIBILITY_DESCRIPTION)
@classmethod
def get_schema(cls, course, key):
reg = schema_fields.FieldRegistry('Assessment',
extra_schema_dict_values={
'className': 'inputEx-Group new-form-layout'})
# Course level settings.
course_opts = cls._generate_common_schema(
'Assessment Config', hidden_header=True)
unit = cls.get_resource(course, key)
exclude_types = []
if course.get_parent_unit(unit.unit_id):
exclude_types.append(LabelGroupsHelper.TRACKS_FIELD_NAME)
LabelGroupsHelper.add_labels_schema_fields(
course_opts, 'assessment', exclude_types=exclude_types)
course_opts.add_property(schema_fields.SchemaField(
'weight', 'Points', 'number',
description=messages.ASSESSMENT_POINTS_DESCRIPTION,
i18n=False, optional=False))
course_opts.add_property(schema_fields.SchemaField(
'content', 'Assessment Content (JavaScript)', 'text', optional=True,
description=services.help_urls.make_learn_more_message(
messages.ASSESSMENT_CONTENT_JAVASCRIPT_DESCRIPTION,
'course:%s:content' % ResourceUnitBase.ASSESSMENT_TYPE),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
course_opts.add_property(schema_fields.SchemaField(
'html_content', 'Assessment Content', 'html', optional=True,
description=services.help_urls.make_learn_more_message(
messages.ASSESSMENT_CONTENT_DESCRIPTION,
'course:%s:html_content' % ResourceUnitBase.ASSESSMENT_TYPE),
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.ASSESSMENT_SCOPE,
'className': 'inputEx-Field html-content'}))
course_opts.add_property(schema_fields.SchemaField(
'html_check_answers', "Show Correct Answer", 'boolean',
description=messages.ASSESSMENT_SHOW_CORRECT_ANSWER_DESCRIPTION,
extra_schema_dict_values={
'className': ('inputEx-Field inputEx-CheckBox'
' assessment-editor-check-answers')},
optional=True))
course_opts.add_property(schema_fields.SchemaField(
workflow_key(courses.SINGLE_SUBMISSION_KEY), 'Single Submission',
'boolean', optional=True,
description=messages.ASSESSMENT_SINGLE_SUBMISSION_DESCRIPTION))
course_opts.add_property(schema_fields.SchemaField(
workflow_key(courses.SUBMISSION_DUE_DATE_KEY),
'Due Date', 'datetime', optional=True,
description=str(messages.ASSESSMENT_DUE_DATE_FORMAT_DESCRIPTION),
extra_schema_dict_values={'_type': 'datetime'}))
course_opts.add_property(schema_fields.SchemaField(
workflow_key(courses.SHOW_FEEDBACK_KEY), 'Show Feedback',
'boolean', optional=True,
description=messages.ASSESSMENT_SHOW_FEEDBACK_DESCRIPTION))
course_opts.add_property(schema_fields.SchemaField(
workflow_key(courses.GRADER_KEY), 'Grading Method', 'string',
select_data=ALLOWED_GRADERS_NAMES.items(), optional=True,
description=services.help_urls.make_learn_more_message(
messages.ASSESSMENT_GRADING_METHOD_DESCRIPTION,
'course:%s:%s' % (
ResourceUnitBase.ASSESSMENT_TYPE,
workflow_key(courses.GRADER_KEY)))))
reg.add_sub_registry('assessment', 'Assessment Config',
registry=course_opts)
review_opts = reg.add_sub_registry('review_opts', 'Peer review',
description=services.help_urls.make_learn_more_message(
messages.ASSESSMENT_DETAILS_DESCRIPTION,
'course:%s:review_opts' % ResourceUnitBase.ASSESSMENT_TYPE),
extra_schema_dict_values={'id': 'peer-review-group'})
if len(courses.ALLOWED_MATCHERS_NAMES) > 1:
review_opts.add_property(schema_fields.SchemaField(
workflow_key(courses.MATCHER_KEY), 'Review Matcher', 'string',
optional=True,
select_data=courses.ALLOWED_MATCHERS_NAMES.items()))
review_opts.add_property(schema_fields.SchemaField(
'review_form', 'Reviewer Feedback Form (JavaScript)', 'text',
optional=True,
description=services.help_urls.make_learn_more_message(
messages.ASSESSMENT_REVIEWER_FEEDBACK_FORM_DESCRIPTION,
'course:%s:review_form' % ResourceUnitBase.ASSESSMENT_TYPE),
extra_schema_dict_values={
'className': 'inputEx-Field review-form'}))
review_opts.add_property(schema_fields.SchemaField(
'html_review_form', 'Reviewer Feedback Form', 'html',
optional=True,
description=(
messages.ASSESSMENT_REVIEWER_FEEDBACK_FORM_HTML_DESCRIPTION),
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.ASSESSMENT_SCOPE,
'className': 'inputEx-Field html-review-form'}))
review_opts.add_property(schema_fields.SchemaField(
workflow_key(courses.REVIEW_DUE_DATE_KEY),
'Review Due Date', 'datetime', optional=True,
description=messages.ASSESSMENT_REVIEW_DUE_DATE_FORMAT_DESCRIPTION,
extra_schema_dict_values={'_type': 'datetime'}))
review_opts.add_property(schema_fields.SchemaField(
workflow_key(courses.REVIEW_MIN_COUNT_KEY),
'Review Min Count', 'integer', optional=True,
description=messages.ASSESSMENT_REVIEW_MIN_COUNT_DESCRIPTION))
review_opts.add_property(schema_fields.SchemaField(
workflow_key(courses.REVIEW_WINDOW_MINS_KEY),
'Review Window Timeout', 'integer', optional=True,
description=services.help_urls.make_learn_more_message(
messages.ASSESSMENT_REVIEW_TIMEOUT_IN_MINUTES,
workflow_key(courses.REVIEW_WINDOW_MINS_KEY))))
return reg
@classmethod
def get_view_url(cls, rsrc):
return 'assessment?name=%s' % rsrc.unit_id
@classmethod
def get_edit_url(cls, key):
return 'dashboard?action=edit_assessment&key=%s' % key
class ResourceLink(ResourceUnitBase):
TYPE = ResourceUnitBase.LINK_TYPE
TITLE_DESCRIPTION = messages.LINK_TITLE_DESCRIPTION
DESCRIPTION_DESCRIPTION = messages.LINK_DESCRIPTION_DESCRIPTION
AVAILABILITY_DESCRIPTION = messages.LINK_AVAILABILITY_DESCRIPTION
SYLLABUS_VISIBILITY_DESCRIPTION = (
messages.LINK_SYLLABUS_VISIBILITY_DESCRIPTION)
@classmethod
def get_schema(cls, course, key):
schema = cls._generate_common_schema('Link')
LabelGroupsHelper.add_labels_schema_fields(schema, 'link')
schema.add_property(schema_fields.SchemaField(
'url', 'URL', 'string', description=messages.LINK_URL_DESCRIPTION,
extra_schema_dict_values={'_type': 'url', 'showMsg': True}))
return schema
@classmethod
def get_view_url(cls, rsrc):
return rsrc.href
@classmethod
def get_edit_url(cls, key):
return 'dashboard?action=edit_link&key=%s' % key
class ResourceLesson(resource.AbstractResourceHandler):
TYPE = 'lesson'
@classmethod
def get_key(cls, lesson):
return resource.Key(cls.TYPE, lesson.lesson_id)
@classmethod
def get_resource(cls, course, key):
lesson = course.find_lesson_by_id(None, key)
unit = course.get_unit_for_lesson(lesson)
return (unit, lesson)
@classmethod
def get_resource_title(cls, rsrc):
return rsrc[1].title
@classmethod
def get_schema(cls, course, key):
units = course.get_units()
# Note GcbRte relies on the structure of this schema. Do not change
# without checking the dependency.
unit_list = []
for unit in units:
if unit.type == 'U':
unit_list.append(
(unit.unit_id,
cgi.escape(display_unit_title(unit, course.app_context))))
lesson_data = cls.get_data_dict(course, key)
has_video_id = bool(lesson_data.get('video'))
lesson = schema_fields.FieldRegistry(
'Lesson', description='Lesson', extra_schema_dict_values={
'className': 'inputEx-Group new-form-layout'})
lesson.add_property(schema_fields.SchemaField(
'key', 'ID', 'string', editable=False,
extra_schema_dict_values={'className': 'inputEx-Field keyHolder'},
hidden=True))
lesson.add_property(schema_fields.SchemaField(
'title', 'Title', 'string', extra_schema_dict_values={
'className': 'inputEx-Field content-holder'},
description=messages.LESSON_TITLE_DESCRIPTION))
lesson.add_property(schema_fields.SchemaField(
'unit_id', 'Unit', 'string', i18n=False,
description=messages.LESSON_PARENT_UNIT_DESCRIPTION,
select_data=unit_list,
extra_schema_dict_values={'required': False}))
lesson.add_property(schema_fields.SchemaField(
'video', 'Video ID', 'string', hidden=not has_video_id,
optional=True, description=messages.LESSON_VIDEO_ID_DESCRIPTION))
lesson.add_property(schema_fields.SchemaField(
'scored', 'Question Scoring', 'string', optional=True, i18n=False,
description=messages.LESSON_SCORED_DESCRIPTION,
select_data=[
('scored', 'Questions are scored'),
('not_scored', 'Questions only give feedback')]))
lesson.add_property(schema_fields.SchemaField(
'objectives', 'Lesson Body', 'html', optional=True,
extra_schema_dict_values={
'className': 'content-holder',
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value}))
lesson.add_property(schema_fields.SchemaField(
'notes', 'Text Version URL', 'string', optional=True,
description=messages.LESSON_TEXT_VERSION_URL_DESCRIPTION,
extra_schema_dict_values={'_type': 'url', 'showMsg': True}))
lesson.add_property(schema_fields.SchemaField(
'auto_index', 'Auto-Number', 'boolean',
description=messages.LESSON_AUTO_NUMBER_DESCRIPTION, optional=True))
lesson.add_property(schema_fields.SchemaField(
'activity_title', 'Activity Title', 'string', optional=True,
description=messages.LESSON_ACTIVITY_TITLE_DESCRIPTION))
lesson.add_property(schema_fields.SchemaField(
'activity_listed', 'Activity Listed', 'boolean', optional=True,
description=messages.LESSON_ACTIVITY_LISTED_DESCRIPTION))
lesson.add_property(schema_fields.SchemaField(
'activity', 'Activity', 'text', optional=True,
description=services.help_urls.make_learn_more_message(
messages.LESSON_ACTIVITY_DESCRIPTION,
'course:lesson:activity'),
extra_schema_dict_values={
'className': 'inputEx-Field activityHolder'}))
lesson.add_property(schema_fields.SchemaField(
'manual_progress', 'Allow Progress Override', 'boolean',
description=services.help_urls.make_learn_more_message(
messages.LESSON_ALLOW_PROGRESS_OVERRIDE_DESCRIPTION,
'course:lesson:manual_progress'),
optional=True))
return lesson
@classmethod
def get_data_dict(cls, course, key):
lesson = course.find_lesson_by_id(None, key)
fs = course.app_context.fs
path = fs.impl.physical_to_logical(course.get_activity_filename(
lesson.unit_id, lesson.lesson_id))
if lesson.has_activity and fs.isfile(path):
activity = fs.get(path)
else:
activity = ''
lesson_dict = {
'key': lesson.lesson_id,
'title': lesson.title,
'unit_id': lesson.unit_id,
'scored': 'scored' if lesson.scored else 'not_scored',
'objectives': lesson.objectives,
'video': lesson.video,
'notes': lesson.notes,
'auto_index': lesson.auto_index,
'activity_title': lesson.activity_title,
'activity_listed': lesson.activity_listed,
'activity': activity,
'manual_progress': lesson.manual_progress or False,
}
return lesson_dict
@classmethod
def get_view_url(cls, rsrc):
return 'unit?unit=%s&lesson=%s' % (rsrc[0].unit_id, rsrc[1].lesson_id)
@classmethod
def get_edit_url(cls, key):
return 'dashboard?action=edit_lesson&key=%s' % key
def get_unit_title_template(app_context):
"""Prepare an internationalized display for the unit title."""
course_properties = app_context.get_environ()
if course_properties['course'].get('display_unit_title_without_index'):
return '%(title)s'
else:
# I18N: Message displayed as title for unit within a course.
# Note that the items %(index) and %(title). The %(index)
# will be replaced with a number indicating the unit's
# sequence I18N: number within the course, and the %(title)
# with the unit's title.
return app_context.gettext('Unit %(index)s - %(title)s')
def display_unit_title(unit, app_context):
"""Prepare an internationalized display for the unit title."""
course_properties = app_context.get_environ()
template = get_unit_title_template(app_context)
return template % {'index': unit.index, 'title': unit.title}
def display_short_unit_title(unit, app_context):
"""Prepare a short unit title."""
course_properties = app_context.get_environ()
if course_properties['course'].get('display_unit_title_without_index'):
return unit.title
if unit.type != 'U':
return unit.title
# I18N: Message displayed as title for unit within a course. The
# "%s" will be replaced with the index number of the unit within
# the course. E.g., "Unit 1", "Unit 2" and so on.
unit_title = app_context.gettext('Unit %s')
return unit_title % unit.index
def display_lesson_title(unit, lesson, app_context):
"""Prepare an internationalized display for the unit title."""
course_properties = app_context.get_environ()
content = safe_dom.NodeList()
span = safe_dom.Element('span')
content.append(span)
if lesson.auto_index:
prefix = ''
if course_properties['course'].get('display_unit_title_without_index'):
prefix = '%s ' % lesson.index
else:
prefix = '%s.%s ' % (unit.index, lesson.index)
span.add_text(prefix)
_class = ''
else:
_class = 'no-index'
span.add_text(lesson.title)
span.set_attribute('className', _class)
return content
|
ram8647/gcb-mobilecsp
|
models/resources_display.py
|
Python
|
apache-2.0
| 51,422
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
|
edgedb/edgedb
|
edb/pgsql/__init__.py
|
Python
|
apache-2.0
| 715
|
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Self test for Hitachi Unified Storage (HUS-HNAS) platform.
"""
import os
from StringIO import StringIO
import tempfile
import mock
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.hds import iscsi
LOG = logging.getLogger(__name__)
HNASCONF = """<?xml version="1.0" encoding="UTF-8" ?>
<config>
<hnas_cmd>ssc</hnas_cmd>
<chap_enabled>True</chap_enabled>
<mgmt_ip0>172.17.44.15</mgmt_ip0>
<username>supervisor</username>
<password>supervisor</password>
<svc_0>
<volume_type>default</volume_type>
<iscsi_ip>172.17.39.132</iscsi_ip>
<hdp>fs2</hdp>
</svc_0>
<svc_1>
<volume_type>silver</volume_type>
<iscsi_ip>172.17.39.133</iscsi_ip>
<hdp>fs2</hdp>
</svc_1>
</config>
"""
HNAS_WRONG_CONF1 = """<?xml version="1.0" encoding="UTF-8" ?>
<config>
<hnas_cmd>ssc</hnas_cmd>
<mgmt_ip0>172.17.44.15</mgmt_ip0>
<username>supervisor</username>
<password>supervisor</password>
<volume_type>default</volume_type>
<hdp>172.17.39.132:/cinder</hdp>
</svc_0>
</config>
"""
HNAS_WRONG_CONF2 = """<?xml version="1.0" encoding="UTF-8" ?>
<config>
<hnas_cmd>ssc</hnas_cmd>
<mgmt_ip0>172.17.44.15</mgmt_ip0>
<username>supervisor</username>
<password>supervisor</password>
<svc_0>
<volume_type>default</volume_type>
</svc_0>
<svc_1>
<volume_type>silver</volume_type>
</svc_1>
</config>
"""
# The following information is passed on to tests, when creating a volume
_VOLUME = {'name': 'testvol', 'volume_id': '1234567890', 'size': 128,
'volume_type': None, 'provider_location': None, 'id': 'abcdefg'}
class SimulatedHnasBackend(object):
"""Simulation Back end. Talks to HNAS."""
# these attributes are shared across object instances
start_lun = 0
init_index = 0
target_index = 0
hlun = 0
def __init__(self):
self.type = 'HNAS'
self.out = ''
self.volumes = []
# iSCSI connections
self.connections = []
def deleteVolume(self, name):
LOG.info("delVolume: name %s" % name)
volume = self.getVolume(name)
if volume:
LOG.info("deleteVolume: deleted name %s provider %s"
% (volume['name'], volume['provider_location']))
self.volumes.remove(volume)
return True
else:
return False
def deleteVolumebyProvider(self, provider):
LOG.info("delVolumeP: provider %s" % provider)
volume = self.getVolumebyProvider(provider)
if volume:
LOG.info("deleteVolumeP: deleted name %s provider %s"
% (volume['name'], volume['provider_location']))
self.volumes.remove(volume)
return True
else:
return False
def getVolumes(self):
return self.volumes
def getVolume(self, name):
LOG.info("getVolume: find by name %s" % name)
if self.volumes:
for volume in self.volumes:
if str(volume['name']) == name:
LOG.info("getVolume: found name %s provider %s"
% (volume['name'], volume['provider_location']))
return volume
else:
LOG.info("getVolume: no volumes")
LOG.info("getVolume: not found")
return None
def getVolumebyProvider(self, provider):
LOG.info("getVolumeP: find by provider %s" % provider)
if self.volumes:
for volume in self.volumes:
if str(volume['provider_location']) == provider:
LOG.info("getVolumeP: found name %s provider %s"
% (volume['name'], volume['provider_location']))
return volume
else:
LOG.info("getVolumeP: no volumes")
LOG.info("getVolumeP: not found")
return None
def createVolume(self, name, provider, sizeMiB, comment):
LOG.info("createVolume: name %s provider %s comment %s"
% (name, provider, comment))
new_vol = {'additionalStates': [],
'adminSpace': {'freeMiB': 0,
'rawReservedMiB': 384,
'reservedMiB': 128,
'usedMiB': 128},
'baseId': 115,
'copyType': 1,
'creationTime8601': '2012-10-22T16:37:57-07:00',
'creationTimeSec': 1350949077,
'failedStates': [],
'id': 115,
'provider_location': provider,
'name': name,
'comment': comment,
'provisioningType': 1,
'readOnly': False,
'sizeMiB': sizeMiB,
'state': 1,
'userSpace': {'freeMiB': 0,
'rawReservedMiB': 41984,
'reservedMiB': 31488,
'usedMiB': 31488},
'usrSpcAllocLimitPct': 0,
'usrSpcAllocWarningPct': 0,
'uuid': '1e7daee4-49f4-4d07-9ab8-2b6a4319e243',
'wwn': '50002AC00073383D'}
self.volumes.append(new_vol)
def create_lu(self, cmd, ip0, user, pw, hdp, size, name):
vol_id = name
_out = ("LUN: %d HDP: fs2 size: %s MB, is successfully created" %
(self.start_lun, size))
self.createVolume(name, vol_id, size, "create-lu")
self.start_lun += 1
return _out
def delete_lu(self, cmd, ip0, user, pw, hdp, lun):
_out = ""
id = "myID"
LOG.info("Delete_Lu: check lun %s id %s" % (lun, id))
if self.deleteVolumebyProvider(id + '.' + str(lun)):
LOG.warn("Delete_Lu: failed to delete lun %s id %s" % (lun, id))
return _out
def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name):
_out = ("LUN: %s HDP: 9 size: %s MB, is successfully created" %
(self.start_lun, size))
id = name
LOG.info("HNAS Create_Dup: %d" % self.start_lun)
self.createVolume(name, id + '.' + str(self.start_lun), size,
"create-dup")
self.start_lun += 1
return _out
def add_iscsi_conn(self, cmd, ip0, user, pw, lun, hdp,
port, iqn, initiator):
ctl = ""
conn = (self.hlun, lun, initiator, self.init_index, iqn,
self.target_index, ctl, port)
_out = ("H-LUN: %d mapped. LUN: %s, iSCSI Initiator: %s @ index: %d, \
and Target: %s @ index %d is successfully paired @ CTL: %s, \
Port: %s" % conn)
self.init_index += 1
self.target_index += 1
self.hlun += 1
LOG.debug("Created connection %d" % self.init_index)
self.connections.append(conn)
return _out
def del_iscsi_conn(self, cmd, ip0, user, pw, port, iqn, initiator):
self.connections.pop()
_out = ("H-LUN: successfully deleted from target")
return _out
def extend_vol(self, cmd, ip0, user, pw, hdp, lu, size, name):
_out = ("LUN: %s successfully extended to %s MB" % (lu, size))
id = name
self.out = _out
LOG.info("extend_vol: lu: %s %d -> %s" % (lu, int(size), self.out))
v = self.getVolumebyProvider(id + '.' + str(lu))
if v:
v['sizeMiB'] = size
LOG.info("extend_vol: out %s %s" % (self.out, self))
return _out
def get_luns(self):
return len(self.alloc_lun)
def get_conns(self):
return len(self.connections)
def get_out(self):
return str(self.out)
def get_version(self, cmd, ver, ip0, user, pw):
self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \
"version: 11.2.3319.09 LU: 256" \
" RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01"
return self.out
def get_iscsi_info(self, cmd, ip0, user, pw):
self.out = "CTL: 0 Port: 4 IP: 172.17.39.132 Port: 3260 Link: Up\n" \
"CTL: 1 Port: 5 IP: 172.17.39.133 Port: 3260 Link: Up"
return self.out
def get_hdp_info(self, cmd, ip0, user, pw):
self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: " \
"70 Normal fs1\n" \
"HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 Normal fs2"
return self.out
def get_targetiqn(self, cmd, ip0, user, pw, id, hdp, secret):
self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget"""
return self.out
def set_targetsecret(self, cmd, ip0, user, pw, target, hdp, secret):
self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget"""
return self.out
def get_targetsecret(self, cmd, ip0, user, pw, target, hdp):
self.out = """wGkJhTpXaaYJ5Rv"""
return self.out
class HNASiSCSIDriverTest(test.TestCase):
"""Test HNAS iSCSI volume driver."""
def __init__(self, *args, **kwargs):
super(HNASiSCSIDriverTest, self).__init__(*args, **kwargs)
@mock.patch.object(iscsi, 'factory_bend')
def setUp(self, _factory_bend):
super(HNASiSCSIDriverTest, self).setUp()
self.backend = SimulatedHnasBackend()
_factory_bend.return_value = self.backend
(handle, self.config_file) = tempfile.mkstemp('.xml')
os.write(handle, HNASCONF)
os.close(handle)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.hds_hnas_iscsi_config_file = self.config_file
self.configuration.hds_svc_iscsi_chap_enabled = True
self.driver = iscsi.HDSISCSIDriver(configuration=self.configuration)
self.driver.do_setup("")
self.addCleanup(self._clean)
def _clean(self):
os.remove(self.config_file)
def _create_volume(self):
loc = self.driver.create_volume(_VOLUME)
vol = _VOLUME.copy()
vol['provider_location'] = loc['provider_location']
return vol
@mock.patch('__builtin__.open')
@mock.patch.object(os, 'access')
def test_read_config(self, m_access, m_open):
# Test exception when file is not found
m_access.return_value = False
m_open.return_value = StringIO(HNASCONF)
self.assertRaises(exception.NotFound, iscsi._read_config, '')
# Test exception when config file has parsing errors
# due to missing <svc> tag
m_access.return_value = True
m_open.return_value = StringIO(HNAS_WRONG_CONF1)
self.assertRaises(exception.ConfigNotFound, iscsi._read_config, '')
# Test exception when config file has parsing errors
# due to missing <hdp> tag
m_open.return_value = StringIO(HNAS_WRONG_CONF2)
self.configuration.hds_hnas_iscsi_config_file = ''
self.assertRaises(exception.ParameterNotFound, iscsi._read_config, '')
def test_create_volume(self):
loc = self.driver.create_volume(_VOLUME)
self.assertNotEqual(loc, None)
self.assertNotEqual(loc['provider_location'], None)
# cleanup
self.backend.deleteVolumebyProvider(loc['provider_location'])
def test_get_volume_stats(self):
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats["vendor_name"], "HDS")
self.assertEqual(stats["storage_protocol"], "iSCSI")
self.assertTrue(stats["total_capacity_gb"] > 0)
def test_delete_volume(self):
vol = self._create_volume()
self.driver.delete_volume(vol)
# should not be deletable twice
prov_loc = self.backend.getVolumebyProvider(vol['provider_location'])
self.assertTrue(prov_loc is None)
def test_extend_volume(self):
vol = self._create_volume()
new_size = _VOLUME['size'] * 2
self.driver.extend_volume(vol, new_size)
# cleanup
self.backend.deleteVolumebyProvider(vol['provider_location'])
@mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol')
def test_create_snapshot(self, m_id_to_vol):
vol = self._create_volume()
m_id_to_vol.return_value = vol
svol = vol.copy()
svol['volume_size'] = svol['size']
loc = self.driver.create_snapshot(svol)
self.assertNotEqual(loc, None)
svol['provider_location'] = loc['provider_location']
# cleanup
self.backend.deleteVolumebyProvider(svol['provider_location'])
self.backend.deleteVolumebyProvider(vol['provider_location'])
@mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol')
def test_create_clone(self, m_id_to_vol):
src_vol = self._create_volume()
m_id_to_vol.return_value = src_vol
src_vol['volume_size'] = src_vol['size']
dst_vol = self._create_volume()
dst_vol['volume_size'] = dst_vol['size']
loc = self.driver.create_cloned_volume(dst_vol, src_vol)
self.assertNotEqual(loc, None)
# cleanup
self.backend.deleteVolumebyProvider(src_vol['provider_location'])
self.backend.deleteVolumebyProvider(loc['provider_location'])
@mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol')
def test_delete_snapshot(self, m_id_to_vol):
svol = self._create_volume()
lun = svol['provider_location']
m_id_to_vol.return_value = svol
self.driver.delete_snapshot(svol)
self.assertTrue(self.backend.getVolumebyProvider(lun) is None)
def test_create_volume_from_snapshot(self):
svol = self._create_volume()
svol['volume_size'] = svol['size']
vol = self.driver.create_volume_from_snapshot(_VOLUME, svol)
self.assertNotEqual(vol, None)
# cleanup
self.backend.deleteVolumebyProvider(svol['provider_location'])
self.backend.deleteVolumebyProvider(vol['provider_location'])
@mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location')
def test_initialize_connection(self, m_update_vol_location):
connector = {}
connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2'
connector['host'] = 'dut_1.lab.hds.com'
vol = self._create_volume()
conn = self.driver.initialize_connection(vol, connector)
self.assertTrue('3260' in conn['data']['target_portal'])
# cleanup
self.backend.deleteVolumebyProvider(vol['provider_location'])
@mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location')
def test_terminate_connection(self, m_update_vol_location):
connector = {}
connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2'
connector['host'] = 'dut_1.lab.hds.com'
vol = self._create_volume()
vol['provider_location'] = "portal," +\
connector['initiator'] +\
",18-48-A5-A1-80-13.0,ctl,port,hlun"
conn = self.driver.initialize_connection(vol, connector)
num_conn_before = self.backend.get_conns()
self.driver.terminate_connection(vol, conn)
num_conn_after = self.backend.get_conns()
self.assertNotEqual(num_conn_before, num_conn_after)
# cleanup
self.backend.deleteVolumebyProvider(vol['provider_location'])
|
abusse/cinder
|
cinder/tests/test_hds_iscsi.py
|
Python
|
apache-2.0
| 16,039
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import unittest
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
def testNonBatchMatrix(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
with self.test_session():
transposed = array_ops.matrix_transpose(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
def testConjugate(self):
m = [[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]
expected_transposed = [[1 - 1j, 4 - 4j], [2 - 2j, 5 - 5j], [3 - 3j, 6 - 6j]]
with self.test_session():
matrix = ops.convert_to_tensor(m)
transposed = array_ops.matrix_transpose(matrix, conjugate=True)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
def testBatchMatrix(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
with self.test_session():
transposed = array_ops.matrix_transpose(batch_matrix)
self.assertEqual((2, 3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
def testNonBatchMatrixDynamicallyDefined(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
with self.test_session():
matrix_ph = array_ops.placeholder(dtypes.int32)
transposed = array_ops.matrix_transpose(matrix_ph)
self.assertAllEqual(
expected_transposed, transposed.eval(feed_dict={
matrix_ph: matrix
}))
def testBatchMatrixDynamicallyDefined(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
with self.test_session():
batch_matrix_ph = array_ops.placeholder(dtypes.int32)
transposed = array_ops.matrix_transpose(batch_matrix_ph)
self.assertAllEqual(
expected_transposed,
transposed.eval(feed_dict={
batch_matrix_ph: batch_matrix
}))
def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self):
vector = [1, 2, 3]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "should be a "):
array_ops.matrix_transpose(vector)
class BooleanMaskTest(test_util.TensorFlowTestCase):
def setUp(self):
self.rng = np.random.RandomState(42)
def CheckVersusNumpy(self, ndims_mask, arr_shape, make_mask=None, axis=None):
"""Check equivalence between boolean_mask and numpy masking."""
if make_mask is None:
make_mask = lambda shape: self.rng.randint(0, 2, size=shape).astype(bool)
arr = np.random.rand(*arr_shape)
mask = make_mask(arr_shape[:ndims_mask])
if axis is not None:
mask = make_mask(arr_shape[axis:ndims_mask + axis])
if axis is None or axis == 0:
masked_arr = arr[mask]
elif axis == 1:
masked_arr = arr[:, mask]
elif axis == 2:
masked_arr = arr[:, :, mask]
with self.test_session():
masked_tensor = array_ops.boolean_mask(arr, mask, axis=axis)
# Leading dimension size of masked_tensor is always unknown until runtime
# since we don't how many elements will be kept.
leading = 1 if axis is None else axis + 1
self.assertAllEqual(masked_tensor.get_shape()[leading:],
masked_arr.shape[leading:])
self.assertAllClose(masked_arr, masked_tensor.eval())
def testMaskDim1ArrDim2Axis1(self):
ndims_mask = 1
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
self.CheckVersusNumpy(ndims_mask, arr_shape, axis=1)
def testMaskDim2ArrDim2Axis1(self):
ndims_mask = 2
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
self.CheckVersusNumpy(ndims_mask, arr_shape, axis=1)
def testMaskDim1ArrDim1(self):
ndims_mask = 1
for arr_shape in [(1,), (2,), (3,), (10,)]:
self.CheckVersusNumpy(ndims_mask, arr_shape)
def testMaskDim1ArrDim2(self):
ndims_mask = 1
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
self.CheckVersusNumpy(ndims_mask, arr_shape)
def testMaskDim2ArrDim2(self):
ndims_mask = 2
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
self.CheckVersusNumpy(ndims_mask, arr_shape)
def testMaskDim2ArrDim3(self):
ndims_mask = 2
for arr_shape in [(1, 1, 1), (1, 2, 2), (2, 2, 1)]:
self.CheckVersusNumpy(ndims_mask, arr_shape)
def testEmptyInput2D(self):
mask = np.array([True, False])
arr = np.array([[], []]).astype(np.float32)
numpy_result = arr[mask]
tf_result = array_ops.boolean_mask(arr, mask)
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
with self.test_session():
self.assertAllClose(numpy_result, tf_result.eval())
def testEmptyInput1D(self):
mask = np.array([]).astype(bool)
arr = np.array([]).astype(np.float32)
numpy_result = arr[mask]
tf_result = array_ops.boolean_mask(arr, mask)
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
with self.test_session():
self.assertAllClose(numpy_result, tf_result.eval())
def testEmptyOutput(self):
make_mask = lambda shape: np.zeros(shape, dtype=bool)
for ndims_mask in range(1, 4):
for ndims_arr in range(ndims_mask, ndims_mask + 3):
for _ in range(3):
arr_shape = np.random.randint(1, 5, size=ndims_arr)
self.CheckVersusNumpy(ndims_mask, arr_shape, make_mask=make_mask)
def testWorksWithDimensionsEqualToNoneDuringGraphBuild(self):
# The rank of the mask tensor must be specified. This is explained
# in the docstring as well.
with self.test_session() as sess:
ph_tensor = array_ops.placeholder(dtypes.int32, shape=None)
ph_mask = array_ops.placeholder(dtypes.bool, shape=[None])
arr = np.array([[1, 2], [3, 4]])
mask = np.array([False, True])
masked_tensor = sess.run(
array_ops.boolean_mask(ph_tensor, ph_mask),
feed_dict={
ph_tensor: arr,
ph_mask: mask
})
np.testing.assert_allclose(masked_tensor, arr[mask])
def testMaskDimensionsSetToNoneRaises(self):
# The rank of the mask tensor must be specified. This is explained
# in the docstring as well.
with self.test_session():
tensor = array_ops.placeholder(dtypes.int32, shape=[None, 2])
mask = array_ops.placeholder(dtypes.bool, shape=None)
with self.assertRaisesRegexp(ValueError, "dimensions must be specified"):
array_ops.boolean_mask(tensor, mask)
def testMaskHasMoreDimsThanTensorRaises(self):
mask = [[True, True], [False, False]]
tensor = [1, 2, 3, 4]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "incompatible"):
array_ops.boolean_mask(tensor, mask).eval()
def testMaskIsScalarRaises(self):
mask = True
tensor = 1
with self.test_session():
with self.assertRaisesRegexp(ValueError, "mask.*scalar"):
array_ops.boolean_mask(tensor, mask).eval()
def testMaskShapeDifferentThanFirstPartOfTensorShapeRaises(self):
mask = [True, True, True]
tensor = [[1, 2], [3, 4]]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "incompatible"):
array_ops.boolean_mask(tensor, mask).eval()
@test_util.run_all_in_graph_and_eager_modes
class OperatorShapeTest(test_util.TensorFlowTestCase):
def testExpandScalar(self):
scalar = "hello"
scalar_expanded = array_ops.expand_dims(scalar, [0])
self.assertEqual(scalar_expanded.get_shape(), (1,))
def testSqueezeScalar(self):
scalar = "hello"
scalar_squeezed = array_ops.squeeze(scalar, ())
self.assertEqual(scalar_squeezed.get_shape(), ())
def testSqueezeMatrix(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, [0])
self.assertEqual(matrix_squeezed.get_shape(), (3))
with self.assertRaisesRegexp(
Exception, "Can not squeeze dim.1., expected a dimension of 1, got 3"):
matrix_squeezed = array_ops.squeeze(matrix, [1])
def testSqueezeScalarDim(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, 0)
self.assertEqual(matrix_squeezed.get_shape(), (3))
def testExpandDimsWithNonScalarDim(self):
with self.assertRaisesRegexp(Exception,
"must be a tensor with a single value"):
array_ops.expand_dims(1, axis=[0, 1])
class ReverseV2Test(test_util.TensorFlowTestCase):
def testReverse0DimAuto(self):
x_np = 4
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.reverse_v2(x_np, []).eval()
self.assertAllEqual(x_tf, x_np)
def _reverse1DimAuto(self, np_dtype):
x_np = np.array([1, 200, 3, 40, 5], dtype=np_dtype)
for use_gpu in [False, True]:
for axis_dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.reverse_v2(x_np,
constant_op.constant(
[0], dtype=axis_dtype)).eval()
self.assertAllEqual(x_tf, np.asarray(x_np)[::-1])
def _reverse2DimAuto(self, np_dtype):
x_np = np.array([[1, 200, 3], [4, 5, 60]], dtype=np_dtype)
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for use_gpu in [False, True]:
for axis_dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(use_gpu=use_gpu):
x_tf_1 = reverse_f(x_np, constant_op.constant(
[0], dtype=axis_dtype)).eval()
x_tf_2 = reverse_f(x_np, constant_op.constant(
[-2], dtype=axis_dtype)).eval()
x_tf_3 = reverse_f(x_np, constant_op.constant(
[1], dtype=axis_dtype)).eval()
x_tf_4 = reverse_f(x_np, constant_op.constant(
[-1], dtype=axis_dtype)).eval()
x_tf_5 = reverse_f(x_np,
constant_op.constant([1, 0],
dtype=axis_dtype)).eval()
self.assertAllEqual(x_tf_1, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_2, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_3, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_4, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_5, np.asarray(x_np)[::-1, ::-1])
# This test covers the axis validation in the shape function
# (no eval())
def testInvalidAxis(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
with self.assertRaisesRegexp(ValueError,
"is out of valid range"):
array_ops.reverse_v2(x_np, [-30])
with self.assertRaisesRegexp(ValueError,
"is out of valid range"):
array_ops.reverse_v2(x_np, [2])
with self.assertRaisesRegexp(ValueError,
"axis 0 specified more than once"):
array_ops.reverse_v2(x_np, [0, -2])
# This is the version of reverse that uses axis indices rather than
# bool tensors
# TODO(b/32254538): Change this test to use array_ops.reverse
#
# Note: this test passes placeholder as constant axis is validated
# in shape function (see testInvalidAxis)
def testInvalid(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
axis = array_ops.placeholder(dtypes.int32)
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of valid range"):
array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [-30]})
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of valid range"):
array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [2]})
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"axis 0 specified more than once"):
array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [0, -2]})
def testReverse1DimAuto(self):
for dtype in [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.bool,
np.float16, np.float32, np.float64, np.complex64, np.complex128,
np.array(b"").dtype.type
]:
self._reverse1DimAuto(dtype)
def testReverse2DimAuto(self):
for dtype in [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.bool,
np.float16, np.float32, np.float64, np.complex64, np.complex128,
np.array(b"").dtype.type
]:
self._reverse2DimAuto(dtype)
def testUnknownDims(self):
reverse_v2 = array_ops.reverse_v2
data_t = array_ops.placeholder(dtypes.float32)
axis_known_t = array_ops.placeholder(dtypes.int32, shape=[3])
reverse_known_t = reverse_v2(data_t, axis_known_t)
# Unlike V1 we cannot know this anymore
self.assertEqual(None, reverse_known_t.get_shape().ndims)
axis_unknown_t = array_ops.placeholder(dtypes.int32)
reverse_unknown_t = reverse_v2(data_t, axis_unknown_t)
self.assertIs(None, reverse_unknown_t.get_shape().ndims)
data_2d_t = array_ops.placeholder(dtypes.float32, shape=[None, None])
axis_2d_t = array_ops.placeholder(dtypes.int32, shape=[3])
reverse_2d_t = reverse_v2(data_2d_t, axis_2d_t)
self.assertEqual(2, reverse_2d_t.get_shape().ndims)
def testReverseRowsOf3Channels(self):
"""Tests optimized code for reversing rows with last dim size = 3."""
with self.test_session(use_gpu=True):
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in (1, 2):
for middle_size in list(range(50)) + [100000]:
x_np = np.reshape(
np.arange(outer_size * middle_size * 3, dtype=np.float32),
newshape=(outer_size, middle_size, 3))
x_tf = reverse_f(x_np, [1]).eval()
np_answer = x_np[:, ::-1, :]
self.assertAllEqual(x_tf, np_answer)
def testReverseRowsOf4Channels(self):
with self.test_session(use_gpu=True):
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in (1, 2):
for middle_size in list(range(50)) + [100000]:
x_np = np.reshape(
np.arange(outer_size * middle_size * 4, dtype=np.float32),
newshape=(outer_size, middle_size, 4))
x_tf = reverse_f(x_np, [1]).eval()
np_answer = x_np[:, ::-1, :]
self.assertAllEqual(x_tf, np_answer)
def testReverseColumnsOf3Channels(self):
with self.test_session(use_gpu=True):
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in list(range(50)) + [100000]:
for middle_size in (1, 2):
x_np = np.reshape(
np.arange(outer_size * middle_size * 3, dtype=np.float32),
newshape=(outer_size, middle_size, 3))
x_tf = reverse_f(x_np, [0]).eval()
np_answer = x_np[::-1, :, :]
self.assertAllEqual(x_tf, np_answer)
class MeshgridTest(test_util.TensorFlowTestCase):
def _compareDiff(self, x, y, use_gpu):
for index in ("ij", "xy"):
numpy_out = np.meshgrid(x, y, indexing=index)
tf_out = array_ops.meshgrid(x, y, indexing=index)
with self.test_session(use_gpu=use_gpu):
for xx, yy in zip(numpy_out, tf_out):
self.assertAllEqual(xx, yy.eval())
def _compareDiffType(self, n, np_dtype, use_gpu):
inputs = []
for index in ("ij", "xy"):
for _ in range(n):
x = np.linspace(-10, 10, 5).astype(np_dtype)
if np_dtype in (np.complex64, np.complex128):
x += 1j
inputs.append(x)
numpy_out = np.meshgrid(*inputs, indexing=index)
with self.test_session(use_gpu=use_gpu):
tf_out = array_ops.meshgrid(*inputs, indexing=index)
for x_np, x_tf in zip(numpy_out, tf_out):
self.assertAllEqual(x_np, x_tf.eval())
def testCompare(self):
for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128):
self._compareDiffType(2, t, False)
self._compareDiffType(3, t, False)
x = [1, 2, 3]
y = [4, 5]
a = [[1, 1], [1, 1]]
self._compareDiff(x, y, False)
self._compareDiff(x, a, False)
class StridedSliceChecker(object):
"""Check a given tensor against the numpy result."""
REF_TENSOR = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
REF_TENSOR_ALIGNED = np.arange(1, 97, dtype=np.float32).reshape(3, 4, 8)
def __init__(self, test, x, tensor_type=dtypes.int32, check_type_infer=True):
self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype)
# Give the value a non-zero imaginary component for complex types.
if tensor_type.is_complex:
self.x_np -= 1j * self.x_np
self.test = test
self.x = constant_op.constant(self.x_np, dtype=tensor_type)
self.check_type_infer = check_type_infer
def __getitem__(self, spec):
op = self.x.__getitem__(spec)
if not isinstance(spec, (list, tuple)):
spec = [spec]
tensor = op.eval()
# Make a numpy spec that pre-evals the tensors
np_specs = []
def eval_if_tensor(x):
try:
return x.eval()
except AttributeError:
return x
for s in spec:
if isinstance(s, slice):
start = eval_if_tensor(s.start)
stop = eval_if_tensor(s.stop)
step = eval_if_tensor(s.step)
np_specs.append(slice(start, stop, step))
else:
np_specs.append(eval_if_tensor(s))
self.test.assertAllEqual(self.x_np[tuple(np_specs)], tensor)
if self.check_type_infer:
self.test.assertAllEqual(tensor.shape, op.get_shape())
return tensor
STRIDED_SLICE_TYPES = [
dtypes.int32, dtypes.int64, dtypes.int16, dtypes.int8, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128
]
class StridedSliceTest(test_util.TensorFlowTestCase):
"""Test the strided slice operation with variants of slices."""
def test_basic_slice(self):
for tensor_type in STRIDED_SLICE_TYPES:
with self.test_session(use_gpu=not tensor_type.is_integer):
checker = StridedSliceChecker(
self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
_ = checker[:, :, :]
# Various ways of representing identity slice
_ = checker[:, :, :]
_ = checker[::, ::, ::]
_ = checker[::1, ::1, ::1]
# Not zero slice
_ = checker[::1, ::5, ::2]
# Reverse in each dimension independently
_ = checker[::-1, :, :]
_ = checker[:, ::-1, :]
_ = checker[:, :, ::-1]
## negative index tests i.e. n-2 in first component
_ = checker[-2::-1, :, ::1]
# negative index tests i.e. n-2 in first component, non-unit stride
_ = checker[-2::-1, :, ::2]
# Check rank-0 examples
checker2 = StridedSliceChecker(self, 5, tensor_type=tensor_type)
_ = checker2[None]
_ = checker2[...]
_ = checker2[tuple()]
def testInt64GPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with self.test_session(use_gpu=True, force_gpu=True):
x = constant_op.constant([1., 2., 3.])
begin = constant_op.constant([2], dtype=dtypes.int64)
end = constant_op.constant([3], dtype=dtypes.int64)
strides = constant_op.constant([1], dtype=dtypes.int64)
s = array_ops.strided_slice(x, begin, end, strides)
self.assertAllEqual([3.], self.evaluate(s))
def testDegenerateSlices(self):
with self.test_session(use_gpu=True):
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
# degenerate by offering a forward interval with a negative stride
_ = checker[0:-1:-1, :, :]
# degenerate with a reverse interval with a positive stride
_ = checker[-1:0, :, :]
# empty interval in every dimension
_ = checker[-1:0, 2:2, 2:3:-1]
def testEllipsis(self):
with self.test_session(use_gpu=True):
raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]
checker = StridedSliceChecker(self, raw)
_ = checker[0:]
# implicit ellipsis
_ = checker[0:, ...]
# ellipsis alone
_ = checker[...]
# ellipsis at end
_ = checker[0:1, ...]
# ellipsis at begin
_ = checker[..., 0:1]
# ellipsis at middle
_ = checker[0:1, ..., 0:1]
# multiple ellipses not allowed
with self.assertRaisesRegexp(ValueError, "Multiple ellipses"):
_ = checker[..., :, ...].eval()
def testShrink(self):
with self.test_session(use_gpu=True):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw)
_ = checker[:, :, :, :, 3]
_ = checker[..., 3]
_ = checker[:, 0]
_ = checker[:, :, 0]
def testTensorIndexing(self):
with self.test_session(use_gpu=True):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw, check_type_infer=False)
bar = constant_op.constant(2)
bar2 = constant_op.constant(3)
_ = checker[..., bar:bar2]
_ = checker[..., bar]
with self.assertRaisesRegexp(
TypeError,
"Value passed to parameter 'begin' has DataType float32 not in "
"list of allowed values"):
_ = checker[..., 3.0]
_ = checker[..., 3]
def testExpand(self):
with self.test_session(use_gpu=True):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw)
# new axis (followed by implicit ellipsis)
_ = checker[np.newaxis]
# newaxis after ellipsis
_ = checker[..., np.newaxis]
# newaxis in between ellipsis and explicit range
_ = checker[..., np.newaxis, :]
_ = checker[:, ..., np.newaxis, :, :]
# Reverse final dimension with new axis
_ = checker[:, :, np.newaxis, :, 2::-1]
# Ellipsis in middle of two newaxis
_ = checker[np.newaxis, ..., np.newaxis]
def testExpandVariable(self):
with self.test_session(use_gpu=True):
x = variables.Variable(7, dtype=dtypes.int32)
x.initializer.run()
y = x[None].eval()
self.assertEqual(y.shape, (1,))
self.assertAllEqual(y, (7,))
def testOptimizedCases(self):
with self.test_session(use_gpu=True):
checker = StridedSliceChecker(self,
StridedSliceChecker.REF_TENSOR_ALIGNED)
# Identity
_ = checker[:]
# Identity
_ = checker[...]
# Identity
_ = checker[np.newaxis, ..., np.newaxis]
# First axis slice
_ = checker[1:]
# First axis slice
_ = checker[np.newaxis, 1:]
class StridedSliceShapeChecker(object):
def __init__(self, x):
self.x = x
def __getitem__(self, spec):
op = self.x.__getitem__(spec)
return op.get_shape()
class StridedSliceShapeTest(test_util.TensorFlowTestCase):
"""Test the shape inference of StridedSliceShapes."""
def testUnknown(self):
with self.test_session(use_gpu=True):
uncertain_tensor = array_ops.placeholder(dtypes.float32)
a = StridedSliceShapeChecker(uncertain_tensor)
a_slice_shape = a[...]
self.assertAllEqual(a_slice_shape.ndims, None)
def tensorShapeEqual(self, x, y):
self.assertTrue(x is not None and y is not None or x is None and y is None)
self.assertEqual(x.as_list(), y.as_list())
def testTensorShapeUncertain(self):
with self.test_session(use_gpu=True):
uncertain_tensor = array_ops.placeholder(
dtypes.float32, shape=(5, None, 7))
a = StridedSliceShapeChecker(uncertain_tensor)
self.tensorShapeEqual(a[3:5], tensor_shape.TensorShape([2, None, 7]))
self.tensorShapeEqual(a[3:5, :, 4], tensor_shape.TensorShape([2, None]))
self.tensorShapeEqual(a[3:5, 3:4, 4], tensor_shape.TensorShape([2, None]))
self.tensorShapeEqual(a[3:5, :, 5:10],
tensor_shape.TensorShape([2, None, 2]))
self.tensorShapeEqual(a[3:5, :, 50:3],
tensor_shape.TensorShape([2, None, 0]))
self.tensorShapeEqual(a[3:5, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[1:5:2, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[:5:3, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[:2:3, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([1, None, 1, 0]))
self.tensorShapeEqual(a[::-1, :, array_ops.newaxis, ::-2],
tensor_shape.TensorShape([5, None, 1, 4]))
def testTensorValuedIndexShape(self):
with self.test_session(use_gpu=True):
defined_shape_tensor = array_ops.placeholder(
dtypes.float32, shape=(5, 3, 7))
index_value = array_ops.placeholder(dtypes.int32, shape=())
a = StridedSliceShapeChecker(defined_shape_tensor)
self.tensorShapeEqual(a[index_value], tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-1],
tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-2],
tensor_shape.TensorShape([2, 7]))
other_scalar = array_ops.placeholder(dtypes.int32, shape=())
self.tensorShapeEqual(a[index_value, other_scalar:2],
tensor_shape.TensorShape([None, 7]))
class GradSliceChecker(object):
"""Tests that we can compute a gradient for var^2."""
def __init__(self, test, sess, var, varnp):
self.test = test
self.sess = sess
self.val = var * var
self.var = var
self.varnp = varnp
def __getitem__(self, spec):
slice_var = self.var[spec]
slice_val = self.val[spec]
# compute analytic 2nd derivative
analytic_grad2 = 2 * slice_val
dy = variables.Variable(
array_ops.ones(shape=slice_var.get_shape(), dtype=dtypes.float32))
assign = dy.assign(slice_var)
slice_val_grad, = gradients_impl.gradients(slice_val, self.var, grad_ys=dy)
slice_val_grad2, = gradients_impl.gradients(
slice_val_grad, dy, grad_ys=self.var)
self.sess.run(assign)
slice_val_grad_evaled, slice_val_grad2_evaled = (
self.sess.run([slice_val_grad, slice_val_grad2]))
analytic_grad2_evaled = analytic_grad2.eval()
self.test.assertAllEqual(slice_val_grad2_evaled, analytic_grad2_evaled)
# compute analytic gradient for slice
np_val_grad = (2 * self.varnp * self.varnp)
np_sliceval_grad = np.zeros(self.var.get_shape())
np_sliceval_grad[spec] = np_val_grad[spec]
# verify gradient
self.test.assertAllEqual(slice_val_grad_evaled, np_sliceval_grad)
class StridedSliceGradTest(test_util.TensorFlowTestCase):
"""Test that strided slice's custom gradient produces correct gradients."""
def testGradient(self):
with self.test_session(use_gpu=True) as sess:
var = variables.Variable(
array_ops.reshape(
math_ops.range(1, 97, 1, dtype=dtypes.float32), shape=(6, 4, 4)))
init = variables.global_variables_initializer()
sess.run(init)
grad = GradSliceChecker(self, sess, var,
np.array(range(1, 97, 1)).reshape((6, 4, 4)))
_ = grad[2:6:2, 1:3, 1:3]
_ = grad[3:0:-2, 1:3, 1:3]
_ = grad[3:0:-2, array_ops.newaxis, 1:3, 2, array_ops.newaxis]
_ = grad[3:0:-2, 1:3, 2]
_ = grad[:, -1, :]
_ = grad[:, -2, :]
with self.assertRaisesRegexp(ValueError, "out of bounds"):
_ = grad[:, -200, :]
with self.assertRaisesRegexp(ValueError, "out of bounds"):
_ = grad[:, 200, :]
def testGradientZero(self):
with self.test_session(use_gpu=True) as sess:
var = variables.Variable(8.)
init = variables.global_variables_initializer()
sess.run(init)
grad = GradSliceChecker(self, sess, var, np.array(8))
_ = grad[tuple()]
def testInt64Indices(self):
with self.test_session(use_gpu=True) as sess:
a = math_ops.range(3, dtype=dtypes.float32)
index = constant_op.constant(1, dtype=dtypes.int64)
b = 2. * a[index]
grad, = gradients_impl.gradients(b, a)
self.assertAllEqual(sess.run(grad), [0., 2., 0.])
class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
"""Test varied index types and host located memory."""
def testHostVsDevice(self):
with self.test_session(use_gpu=True) as sess:
var2 = variables.Variable(
array_ops.reshape(
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
shape=(4, 1, 1)))
varshape = variables.Variable([6, 4, 4], dtype=dtypes.int32)
sess.run(variables.global_variables_initializer())
begin = constant_op.constant([0, 0, 0])
end = constant_op.constant([4, 1, 1])
strides = constant_op.constant([1, 1, 1])
foo = array_ops.strided_slice_grad(varshape, begin, end, strides, var2)
sess.run(foo)
def testInt64Shape(self):
with self.test_session(use_gpu=True) as sess:
original_dy = array_ops.reshape(
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
shape=(4, 1, 1))
original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
sess.run(variables.global_variables_initializer())
begin = constant_op.constant([0, 0, 0], dtype=dtypes.int64)
end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
def testMixedIndexTypes(self):
with self.test_session(use_gpu=True) as sess:
original_dy = array_ops.reshape(
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
shape=(4, 1, 1))
original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
sess.run(variables.global_variables_initializer())
begin = constant_op.constant([0, 0, 0], dtype=dtypes.int32)
end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
with self.assertRaisesRegexp(
TypeError, "Input 'begin' of 'StridedSliceGrad' Op has type int32"
" that does not match type int64 of argument 'shape'"):
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
class BenchmarkSlice(object):
def __init__(self, tensor):
self.tensor = tensor
def __getitem__(self, x):
return self.tensor[x]
class StridedSliceBenchmark(test_lib.Benchmark):
"""Benchmark new strided slice operation on non-trivial case."""
def run_and_time(self, slice_op):
variables.global_variables_initializer().run()
for _ in range(10):
_ = slice_op.eval()
iters = 1000
t0 = time.time()
for _ in range(iters):
slice_op.eval()
t1 = time.time()
self.report_benchmark(iters=iters, wall_time=(t1 - t0) / 1000.0)
def make_variable(self):
n = 256
shape = (n, n, n)
items = n**3
var = variables.Variable(
array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
dtype=dtypes.float32)
return var
def benchmark_strided_slice_skip(self):
with session.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[::2, ::1, ::2]
self.run_and_time(slice_op)
def benchmark_strided_slice_easy(self):
with session.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
def benchmark_slice_easy(self):
with session.Session():
var = self.make_variable()
slice_op = var[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
class StridedSliceAssignChecker(object):
def __init__(self, test, x, tensor_type=dtypes.float32, use_resource=False):
self.tensor_type = tensor_type
self.test = test
self._use_resource = use_resource
self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype)
# Give the value a non-zero imaginary component for complex types.
if tensor_type.is_complex:
self.x_np -= 1j * self.x_np
self.x = constant_op.constant(self.x_np, dtype=tensor_type)
def __setitem__(self, index, value):
value = np.array(value).astype(self.tensor_type.as_numpy_dtype)
# Give the value a non-zero imaginary component for complex types.
if self.tensor_type.is_complex:
value -= 1j * value
with self.test.test_session(
use_gpu=not self.tensor_type.is_integer) as sess:
if self._use_resource:
var = resource_variable_ops.ResourceVariable(self.x)
else:
var = variables.Variable(self.x)
sess.run(variables.variables_initializer([var]))
val = sess.run(var[index].assign(value))
# val_copy is used to check that tf.assign works equivalently to the
# assign method above.
val_copy = sess.run(state_ops.assign(var[index], value))
valnp = np.copy(self.x_np)
valnp[index] = np.array(value)
self.test.assertAllEqual(val, valnp)
self.test.assertAllEqual(val_copy, valnp)
class SliceAssignTest(test_util.TensorFlowTestCase):
def testInvalidSlice(self):
with self.test_session() as sess:
foo = constant_op.constant([1, 2, 3])
with self.assertRaisesRegexp(ValueError, "Sliced assignment"
" is only supported for variables"):
bar = foo[:2].assign(constant_op.constant([1, 2]))
sess.run(bar)
def doTestSliceAssign(self, use_resource):
for dtype in STRIDED_SLICE_TYPES:
checker = StridedSliceAssignChecker(
self, [[1, 2, 3], [4, 5, 6]],
use_resource=use_resource,
tensor_type=dtype)
# Check if equal
checker[:] = [[10, 20, 30], [40, 50, 60]]
# Check trivial (1,1) shape tensor
checker[1:2, 1:2] = [[66]]
# shrinks shape changes
checker[1:2, 1] = [66]
checker[1, 1:2] = [66]
checker[1, 1] = 66
# newaxis shape changes
checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
# shrink and newaxis
checker[None, None, 0, 0:1] = [[[99]]]
# Non unit strides
checker[::1, ::-2] = [[3, 33], [4, 44]]
# degenerate interval
checker[8:10, 0] = []
checker[8:10, 8:10] = [[]]
# Assign vector to scalar (rank-0) using newaxis
checker2 = StridedSliceAssignChecker(self, 222)
checker2[()] = 6 # no indices
checker2[...] = 6 # ellipsis
checker2[None] = [6] # new axis
def testSliceAssign(self):
self.doTestSliceAssign(use_resource=False)
def testSliceAssignResource(self):
self.doTestSliceAssign(use_resource=True)
def testUninitialized(self):
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
"Attempting to use uninitialized value Variable"):
with self.test_session() as sess:
v = variables.Variable([1, 2])
sess.run(v[:].assign([1, 2]))
def testTypeError(self):
init_val = constant_op.constant([1, 2], dtype=dtypes.int32)
too_small_val = constant_op.constant([3, 4], dtype=dtypes.int8)
too_large_val = constant_op.constant([3, 4], dtype=dtypes.int64)
v = variables.Variable(init_val)
with self.assertRaises(TypeError):
v[:].assign(too_small_val)
with self.assertRaises(TypeError):
v[:].assign(too_large_val)
def testTypeErrorResource(self):
init_val = constant_op.constant([1, 2], dtype=dtypes.int32)
too_small_val = constant_op.constant([3, 4], dtype=dtypes.int8)
too_large_val = constant_op.constant([3, 4], dtype=dtypes.int64)
v = resource_variable_ops.ResourceVariable(init_val)
with self.test_session() as sess:
sess.run(v.initializer)
with self.assertRaises(ValueError):
sess.run(v[:].assign(too_large_val))
with self.assertRaises(ValueError):
sess.run(v[:].assign(too_small_val))
class ShapeSizeRankTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDenseShape(self):
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t_value)))
self.assertEqual(4, self.evaluate(array_ops.size(t_value)))
self.assertEqual(2, self.evaluate(array_ops.rank(t_value)))
t = constant_op.constant(t_value)
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t)))
self.assertEqual(4, self.evaluate(array_ops.size(t)))
self.assertEqual(2, self.evaluate(array_ops.rank(t)))
@test_util.run_in_graph_and_eager_modes
def testSparseShape(self):
sp_value = sparse_tensor.SparseTensorValue(
indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp_value)))
self.assertEqual(4, self.evaluate(array_ops.size(sp_value)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp_value)))
sp = sparse_tensor.SparseTensor.from_value(sp_value)
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp)))
self.assertEqual(4, self.evaluate(array_ops.size(sp)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp)))
@test_util.run_in_graph_and_eager_modes
def testSizeDtype(self):
tensor = [1]
self.assertEqual(dtypes.int32, self.evaluate(array_ops.size(tensor)).dtype)
self.assertEqual(
dtypes.int64,
self.evaluate(array_ops.size(tensor, out_type=dtypes.int64)).dtype)
class SequenceMaskTest(test_util.TensorFlowTestCase):
def testExceptions(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "maxlen must be scalar"):
array_ops.sequence_mask([10, 20], [10, 20])
def testOneDimensionalWithMaxlen(self):
with self.test_session():
res = array_ops.sequence_mask(constant_op.constant([1, 3, 2]), 5)
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(
res.eval(),
[[True, False, False, False, False], [True, True, True, False, False],
[True, True, False, False, False]])
@test_util.enable_c_shapes
def testOneDimensionalDtypeWithoutMaxlen(self):
with self.test_session():
# test dtype and default maxlen:
res = array_ops.sequence_mask(constant_op.constant([0, 1, 4]),
dtype=dtypes.float32)
self.assertAllEqual(res.get_shape().as_list(), [3, 4])
self.assertAllEqual(
res.eval(),
[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]])
@test_util.enable_c_shapes
def testOneDimensionalWithoutMaxlen(self):
with self.test_session():
res = array_ops.sequence_mask(
constant_op.constant([0, 1, 4]))
self.assertAllEqual(res.get_shape().as_list(), [3, 4])
self.assertAllEqual(
res.eval(),
[[False, False, False, False],
[True, False, False, False],
[True, True, True, True]])
@test_util.enable_c_shapes
def testTwoDimensional(self):
with self.test_session():
res = array_ops.sequence_mask(constant_op.constant([[1, 3, 2]]), 5)
self.assertAllEqual(res.get_shape(), [1, 3, 5])
self.assertAllEqual(res.eval(), [[[True, False, False, False, False], [
True, True, True, False, False
], [True, True, False, False, False]]])
# test dtype and default maxlen:
res = array_ops.sequence_mask(
constant_op.constant([[0, 1, 4], [1, 2, 3]]), dtype=dtypes.float32)
self.assertAllEqual(res.get_shape().as_list(), [2, 3, 4])
self.assertAllEqual(
res.eval(),
[[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]],
[[1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0]]])
def testUnknownShape(self):
lengths = array_ops.placeholder(dtype=dtypes.int32)
res = array_ops.sequence_mask(lengths)
self.assertEqual(res.shape, None)
def testDtypes(self):
def check_dtypes(lengths_dtype, maxlen_dtype):
res = array_ops.sequence_mask(
constant_op.constant([1, 3, 2], dtype=lengths_dtype),
constant_op.constant(5, dtype=maxlen_dtype))
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(
res.eval(),
[[True, False, False, False, False], [True, True, True, False, False],
[True, True, False, False, False]])
with self.test_session():
check_dtypes(dtypes.int32, dtypes.int32)
check_dtypes(dtypes.int32, dtypes.int64)
check_dtypes(dtypes.int64, dtypes.int32)
check_dtypes(dtypes.int64, dtypes.int64)
class ConcatSliceResourceTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testConcatSlice(self):
r1 = test_ops.stub_resource_handle_op(container="a", shared_name="b")
r2 = test_ops.stub_resource_handle_op(container="a", shared_name="c")
c = array_ops.stack([r1, r2])
s = array_ops.strided_slice(c, [1], [2])
self.evaluate(test_ops.resource_create_op(s))
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(test_ops.resource_create_op(r2))
class IdentityTest(test_util.TensorFlowTestCase):
def testEagerIdentity(self):
with context.eager_mode():
ctx = context.get_default_context()
if not ctx.num_gpus():
self.skipTest("No GPUs found")
def _test(x, y, device):
self.assertAllEqual(x.numpy(), y.numpy())
self.assertTrue(device in y.device.lower())
with ops.device("gpu:0"):
a = constant_op.constant([[2], [3]], dtype=dtypes.float32)
with ops.device("gpu:0"):
b = array_ops.identity(a)
_test(a, b, "gpu")
with ops.device("cpu:0"):
c = array_ops.identity(b)
_test(b, c, "cpu")
with ops.device("cpu:0"):
d = array_ops.identity(c)
_test(c, d, "cpu")
with ops.device("gpu:0"):
e = array_ops.identity(d)
_test(d, e, "gpu")
class PadTest(test_util.TensorFlowTestCase):
def testEager(self):
with context.eager_mode():
t = constant_op.constant([[1, 2, 3], [4, 5, 6]])
paddings = constant_op.constant([[
1,
1,
], [2, 2]])
padded = array_ops.pad(t, paddings, "CONSTANT")
self.assertAllEqual(padded.numpy(),
[[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
class InvertPermutationTest(test_util.TensorFlowTestCase):
def testInvertPermutation(self):
for dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(use_gpu=True):
x = constant_op.constant([3, 4, 0, 2, 1], dtype=dtype)
y = array_ops.invert_permutation(x)
self.assertAllEqual(y.get_shape(), [5])
self.assertAllEqual(y.eval(), [2, 4, 3, 0, 1])
class UnravelIndexTest(test_util.TensorFlowTestCase):
# TODO(b/73086570): Reenable test.
@unittest.skip("Test does not pass internally.")
def testUnravelIndex(self):
with self.test_session():
for dtype in [dtypes.int32, dtypes.int64]:
indices_1 = constant_op.constant(1621, dtype=dtype)
dims_1 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
self.assertAllEqual(out_1.eval(), [3, 1, 4, 1])
indices_2 = constant_op.constant([1621], dtype=dtype)
dims_2 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
out_2 = array_ops.unravel_index(indices_2, dims_2)
self.assertAllEqual(out_2.eval(), [[3], [1], [4], [1]])
indices_3 = constant_op.constant([22, 41, 37], dtype=dtype)
dims_3 = constant_op.constant([7, 6], dtype=dtype)
out_3 = array_ops.unravel_index(indices_3, dims_3)
self.assertAllEqual(out_3.eval(), [[3, 6, 6], [4, 5, 1]])
class GuaranteeConstOpTest(test_util.TensorFlowTestCase):
def testSimple(self):
with self.test_session():
a = array_ops.constant(10)
guarantee_a = array_ops.guarantee_const(a)
self.assertEqual(10, guarantee_a.eval())
def testVariables(self):
with self.test_session() as sess:
for use_resource in [False, True]:
a = variable_scope.get_variable(
"var_{}".format(use_resource), [],
initializer=init_ops.constant_initializer(10.0),
use_resource=use_resource)
guarantee_a = array_ops.guarantee_const(a)
sess.run(variables.global_variables_initializer())
self.assertEqual(10.0, guarantee_a.eval())
def testResourceRejection(self):
with self.test_session() as sess:
a = variable_scope.get_variable(
"resource_var", [],
initializer=init_ops.constant_initializer(10.0),
use_resource=True)
guarantee_a = array_ops.guarantee_const(a.handle)
sess.run(variables.global_variables_initializer())
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"cannot be a resource variable"):
guarantee_a.eval()
class SnapshotOpTest(test_util.TensorFlowTestCase):
def testInvertPermutation(self):
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
with self.test_session(use_gpu=True):
x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
y = gen_array_ops.snapshot(x)
self.assertAllEqual(y.eval(), [0, 1, 2, 3])
if __name__ == "__main__":
test_lib.main()
|
ZhangXinNan/tensorflow
|
tensorflow/python/kernel_tests/array_ops_test.py
|
Python
|
apache-2.0
| 48,533
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-14 12:27
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("peering", "0004_auto_20171004_2323")]
operations = [
migrations.CreateModel(
name="Router",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=128)),
("hostname", models.CharField(max_length=256)),
(
"platform",
models.CharField(
choices=[
("junos", "Juniper JUNOS"),
("iosxr", "Cisco IOS-XR"),
(None, "Other"),
],
help_text="The router platform, used to interact with it",
max_length=50,
),
),
("comment", models.TextField(blank=True)),
],
options={"ordering": ["name"]},
),
migrations.AddField(
model_name="internetexchange",
name="router",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peering.Router",
),
),
]
|
respawner/peering-manager
|
peering/migrations/0005_auto_20171014_1427.py
|
Python
|
apache-2.0
| 1,688
|
from os.path import dirname, basename, isfile, realpath
import glob
import sys
sys.path.append(dirname(realpath(__file__)))
modules = glob.glob(dirname(__file__)+"/*.py")
__all__ = []
for f in modules:
m = basename(f)[:-3]
if m == '__init__' or m == '_export' or not isfile(f): continue
module = __import__(m)
for name in dir(module):
exec(name+'=module.'+name)
from model import exported
|
uperetz/AstroTools
|
models/_export.py
|
Python
|
apache-2.0
| 419
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def weights_gamma():
htable = h2o.upload_file(pyunit_utils.locate("smalldata/gbm_test/moppe.csv"))
htable["premiekl"] = htable["premiekl"].asfactor()
htable["moptva"] = htable["moptva"].asfactor()
htable["zon"] = htable["zon"]
#gg = gbm(formula = medskad ~ premiekl + moptva + zon,data = table.1.2,distribution = "gamma", weights = table.1.2$antskad ,
# n.trees = 20,interaction.depth = 1,n.minobsinnode = 1,shrinkage = 1,bag.fraction = 1,train.fraction = 1)
#pr = predict(gg,newdata = table.1.2,type = "response")
#htable= as.h2o(table.1.2,destination_frame = "htable")
hh = h2o.gbm(x=htable[0:3],y=htable["medskad"],training_frame=htable,distribution="gamma",weights_column="antskad",
ntrees=20,max_depth=1,min_rows=1,learn_rate=1)
ph = hh.predict(htable)
assert abs(8.804447-hh._model_json['output']['init_f']) < 1e-6*8.804447
assert abs(3751.01-ph[0].min()) < 1e-4*3751.01
assert abs(15298.87-ph[0].max()) < 1e-4*15298.87
assert abs(8121.98-ph[0].mean()) < 1e-4*8121.98
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_gamma)
else:
weights_gamma()
|
pchmieli/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_DEPRECATED_weights_gammaGBM.py
|
Python
|
apache-2.0
| 1,246
|
from __future__ import absolute_import, print_function
from django.conf import settings
class NoIfModifiedSinceMiddleware(object):
def __init__(self):
if not settings.DEBUG:
from django.core.exceptions import MiddlewareNotUsed
raise MiddlewareNotUsed
def process_request(self, request):
request.META.pop('HTTP_IF_MODIFIED_SINCE', None)
|
lyoniionly/django-cobra
|
src/cobra/middleware/debug.py
|
Python
|
apache-2.0
| 389
|
# Copyright 2018 Rackspace US Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia.common import constants as consts
# This is a JSON schema validation dictionary
# https://json-schema.org/latest/json-schema-validation.html
#
# Note: This is used to generate the amphora driver "supported flavor
# metadata" dictionary. Each property should include a description
# for the user to understand what this flavor setting does.
#
# Where possible, the property name should match the configuration file name
# for the setting. The configuration file setting is the default when a
# setting is not defined in a flavor profile.
SUPPORTED_FLAVOR_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Octavia Amphora Driver Flavor Metadata Schema",
"description": "This schema is used to validate new flavor profiles "
"submitted for use in an amphora driver flavor profile.",
"type": "object",
"additionalProperties": False,
"properties": {
consts.LOADBALANCER_TOPOLOGY: {
"type": "string",
"description": "The load balancer topology. One of: "
"SINGLE - One amphora per load balancer. "
"ACTIVE_STANDBY - Two amphora per load balancer.",
"enum": list(consts.SUPPORTED_LB_TOPOLOGIES)
},
consts.COMPUTE_FLAVOR: {
"type": "string",
"description": "The compute driver flavor ID."
},
consts.AMP_IMAGE_TAG: {
"type": "string",
"description": "The amphora image tag."
}
}
}
|
openstack/octavia
|
octavia/api/drivers/amphora_driver/flavor_schema.py
|
Python
|
apache-2.0
| 2,189
|
#
# Copyright 2014-15 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import pkg_resources
import mock
from cabalgata.silla import factories
from cabalgata.silla.util import disk
def plugin_entry_points():
return (
pkg_resources.EntryPoint.parse('a=data.factories:FactoryA'),
)
@mock.patch('pkg_resources.iter_entry_points')
def test_load_factory(mock_pkg_resources):
mock_pkg_resources.return_value = plugin_entry_points()
with disk.temp_directory() as temp_directory:
factory = factories.load_factory('a', temp_directory)
assert factory.versions
assert factory.definitions
try:
factories.load_factory('does_not_exist', temp_directory)
assert False, 'Should have raised an KeyError'
except KeyError:
pass
@mock.patch('pkg_resources.iter_entry_points')
def test_install(mock_pkg_resources):
mock_pkg_resources.return_value = plugin_entry_points()
with disk.temp_directory() as temp_directory:
factory = factories.load_factory('a', temp_directory)
assert not factory.installed
factory.install('test', '1.2.3')
installed = factory.installed
assert installed['test'] == '1.2.3'
p = factory.load('test')
assert p.version == '1.2.3'
factory.uninstall('test')
assert not factory.installed
@mock.patch('pkg_resources.iter_entry_points')
def test_start(mock_pkg_resources):
mock_pkg_resources.return_value = plugin_entry_points()
with disk.temp_directory() as temp_directory:
factory = factories.load_factory('a', temp_directory)
factory.install('test', '1.2.3')
p = factory.load('test')
assert not p.running
p.start()
assert p.running
p.stop()
assert not p.running
p.start()
assert p.running
p.kill()
assert not p.running
factory.uninstall('test')
|
cabalgata/cabalgata-silla-de-montar
|
test/test_factories.py
|
Python
|
apache-2.0
| 2,490
|
#!/usr/bin/env python
'''
Scriot that gets
# Uptime when running config last changed
ccmHistoryRunningLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0'
and
# Uptime when startup config last saved
ccmHistoryStartupLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0'
compare them and decided whether or not latest changes in running-config are saved in startup-config
'''
# Import Kirk`s module
from snmp_helper import snmp_get_oid, snmp_extract
# Define variables
device = "1.1.1.1"
community = "*****"
snmp_port = "161"
run_last_change_oid = "1.3.6.1.4.1.9.9.43.1.1.1.0"
start_last_change_oid = "1.3.6.1.4.1.9.9.43.1.1.3.0"
sys_uptime_oid = "1.3.6.1.2.1.1.3.0"
# Define device tuple
a_device = (device, community, snmp_port)
#Query data
run_last_change = snmp_extract(snmp_get_oid(a_device, run_last_change_oid,))
start_last_change = snmp_extract(snmp_get_oid(a_device, start_last_change_oid,))
sys_uptime = snmp_extract(snmp_get_oid(a_device, sys_uptime_oid,))
#Convert data in integer
if run_last_change.isdigit() and start_last_change.isdigit() and sys_uptime.isdigit():
run_last_change = int(run_last_change)
start_last_change = int(start_last_change)
sys_uptime = int(sys_uptime)
else:
exit("Error with SNMP response (non digits)")
if start_last_change == 0 and run_last_change > 3000:
exit("Config was never saved to startup since last reboot, but running-config was changed")
if start_last_change >= run_last_change:
exit("All right, last changes was saved to startup config")
else:
dif_time = (sys_uptime - start_last_change) / 100
exit("Last changes to running config wasn't saved to startup %d seconds already" % dif_time)
# The END
|
laetrid/learning
|
Second_course/ex1_2.py
|
Python
|
apache-2.0
| 1,680
|
"""Base configuration implementation."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import ipaddress
import json
from collections import OrderedDict
class InvalidConfigError(Exception):
"""This error is thrown when the config file is not valid."""
def test_config_condition(cond, msg):
"""
Evaluate condition and raise InvalidConfigError if condition True.
Args:
cond (bool): Condition on which to raise an error if it is true
msg (str): Message for the error if the condition is true
"""
if cond:
raise InvalidConfigError(msg)
class Conf:
"""Base class for FAUCET configuration."""
mutable_attrs = frozenset() # type: frozenset
defaults = {} # type: dict
defaults_types = {} # type: dict
dyn_finalized = False
dyn_hash = None
def __init__(self, _id, dp_id, conf=None):
self._id = _id
self.dp_id = dp_id
if conf is None:
conf = {}
if self.defaults is not None and self.defaults_types is not None:
diff = set(self.defaults.keys()).symmetric_difference(set(self.defaults_types.keys()))
assert not diff, diff
if isinstance(conf, dict):
self.update(conf)
self.set_defaults()
self.check_config()
self.orig_conf = {k: self.__dict__[k] for k in self.defaults}
for k, conf_v in self.orig_conf.items():
if isinstance(conf_v, Conf):
self.orig_conf[k] = conf_v.orig_conf
def __setattr__(self, name, value):
if not self.dyn_finalized or name.startswith('dyn') or name in self.mutable_attrs:
super(Conf, self).__setattr__(name, value)
else:
raise ValueError('cannot update %s on finalized Conf object' % name)
def _set_default(self, key, value, conf=None):
if conf is None:
conf = self.__dict__
assert key in conf, key
if conf[key] is None:
conf[key] = value
def _set_conf_defaults(self, defaults, conf):
for key, value in defaults.items():
self._set_default(key, value, conf=conf)
def set_defaults(self, defaults=None, conf=None):
"""Set default values and run any basic sanity checks."""
self._set_conf_defaults(self.defaults, self.__dict__)
def _check_unknown_conf(self, conf):
"""Check that supplied conf dict doesn't specify keys not defined."""
sub_conf_names = set(conf.keys())
unknown_conf_names = sub_conf_names - set(self.defaults.keys())
test_config_condition(unknown_conf_names, '%s fields unknown in %s' % (
unknown_conf_names, self._id))
def _check_conf_types(self, conf, conf_types):
"""Check that conf value is of the correct type."""
test_config_condition(not isinstance(conf, dict), (
'Conf object %s contents %s must be type %s not %s' % (
self._id, conf, dict, type(conf))))
for conf_key, conf_value in conf.items():
test_config_condition(
conf_key not in conf_types, '%s field unknown in %s (known types %s)' % (
conf_key, self._id, conf_types))
if conf_value is not None:
conf_type = conf_types[conf_key]
test_config_condition(
not isinstance(conf_value, conf_type), '%s value %s must be %s not %s' % (
conf_key, conf_value,
conf_type, type(conf_value))) # pytype: disable=invalid-typevar
@staticmethod
def _set_unknown_conf(conf, conf_types):
for conf_key, conf_type in conf_types.items():
if conf_key not in conf:
if conf_type == list:
conf[conf_key] = []
else:
conf[conf_key] = None
return conf
def update(self, conf):
"""Parse supplied YAML config and sanity check."""
self.__dict__.update(conf)
self._check_unknown_conf(conf)
self._check_conf_types(conf, self.defaults_types)
@staticmethod
def check_config():
"""Check config at instantiation time for errors, typically via assert."""
return
def _conf_keys(self, conf, subconf=True, ignore_keys=None):
"""Return a list of key/values of attributes with dyn/Conf attributes/filtered."""
conf_keys = []
for key, value in sorted(((key, value) for key, value in conf.orig_conf.items() if key in self.defaults)):
if ignore_keys and key in ignore_keys:
continue
if not subconf and value:
if isinstance(value, Conf):
continue
if isinstance(value, (tuple, list, set)) and isinstance(value[0], Conf):
continue
conf_keys.append((key, self._str_conf(value)))
return conf_keys
@staticmethod
def _conf_dyn_keys(conf):
return [(key, value) for key, value in conf.__dict__.items() if key.startswith('dyn')]
def merge_dyn(self, other_conf):
"""Merge dynamic state from other conf object."""
self.__dict__.update({k: v for k, v in self._conf_dyn_keys(other_conf)})
def _str_conf(self, conf_v):
if isinstance(conf_v, (bool, str, int)):
return conf_v
if isinstance(conf_v, (
ipaddress.IPv4Address, ipaddress.IPv4Interface, ipaddress.IPv4Network,
ipaddress.IPv6Address, ipaddress.IPv6Interface, ipaddress.IPv6Network)):
return str(conf_v)
if isinstance(conf_v, (dict, OrderedDict)):
return {str(i): self._str_conf(j) for i, j in conf_v.items() if j is not None}
if isinstance(conf_v, (list, tuple, frozenset)):
return tuple([self._str_conf(i) for i in conf_v if i is not None])
if isinstance(conf_v, Conf):
for i in ('name', '_id'):
if hasattr(conf_v, i):
return getattr(conf_v, i)
return None
def to_conf(self):
"""Return configuration as a dict."""
conf = {
k: self.orig_conf[str(k)] for k in self.defaults if k != 'name'}
return json.dumps(self._str_conf(conf), sort_keys=True, indent=4, separators=(',', ': '))
def conf_diff(self, other):
"""Return text diff between two Confs."""
differ = difflib.Differ()
return '\n'.join(differ.compare(
self.to_conf().splitlines(), other.to_conf().splitlines()))
def conf_hash(self, subconf=True, ignore_keys=None):
"""Return hash of keys configurably filtering attributes."""
return hash(frozenset(list(map(
str, self._conf_keys(self, subconf=subconf, ignore_keys=ignore_keys)))))
def __hash__(self):
if self.dyn_hash is not None:
return self.dyn_hash
dyn_hash = self.conf_hash(subconf=True)
if self.dyn_finalized:
self.dyn_hash = dyn_hash
return dyn_hash
def _finalize_val(self, val):
if isinstance(val, list):
return tuple(
[self._finalize_val(v) for v in val])
if isinstance(val, set):
return frozenset(
[self._finalize_val(v) for v in val])
if isinstance(val, dict):
return OrderedDict([
(k, self._finalize_val(v)) for k, v in sorted(val.items(), key=str)])
return val
def finalize(self):
"""Configuration parsing marked complete."""
self.__dict__.update(
{k: self._finalize_val(v) for k, v in self.__dict__.items()
if not k.startswith('dyn')})
self.dyn_finalized = True
def ignore_subconf(self, other, ignore_keys=None):
"""Return True if this config same as other, ignoring sub config."""
return (self.conf_hash(
subconf=False, ignore_keys=ignore_keys) == other.conf_hash(
subconf=False, ignore_keys=ignore_keys))
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def _check_ip_str(ip_str, ip_method=ipaddress.ip_address):
try:
# bool type is deprecated by the library ipaddress
if not isinstance(ip_str, bool):
return ip_method(ip_str)
raise InvalidConfigError('Invalid IP address %s: IP address of type bool' % (ip_str))
except (ValueError, AttributeError, TypeError) as err:
raise InvalidConfigError('Invalid IP address %s: %s' % (ip_str, err))
@staticmethod
def _ipvs(ipas):
return frozenset([ipa.version for ipa in ipas])
@staticmethod
def _by_ipv(ipas, ipv):
return frozenset([ipa for ipa in ipas if ipa.version == ipv])
|
shivarammysore/faucet
|
faucet/conf.py
|
Python
|
apache-2.0
| 9,565
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from operator import itemgetter
from oslo_utils import uuidutils
from neutron import context
from neutron.db import rbac_db_models
from neutron.objects import base as obj_base
from neutron.objects.db import api as obj_db_api
from neutron.objects import subnet
from neutron.tests.unit.objects import test_base as obj_test_base
from neutron.tests.unit import testlib_api
class IPAllocationPoolObjectIfaceTestCase(
obj_test_base.BaseObjectIfaceTestCase):
_test_class = subnet.IPAllocationPool
class IPAllocationPoolDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = subnet.IPAllocationPool
def setUp(self):
super(IPAllocationPoolDbObjectTestCase, self).setUp()
self._create_test_network()
self._create_test_subnet(self._network)
for obj in itertools.chain(self.db_objs, self.obj_fields, self.objs):
obj['subnet_id'] = self._subnet['id']
class DNSNameServerObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase):
_test_class = subnet.DNSNameServer
def setUp(self):
super(DNSNameServerObjectIfaceTestCase, self).setUp()
self.pager_map[self._test_class.obj_name()] = (
obj_base.Pager(sorts=[('order', True)]))
class DNSNameServerDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = subnet.DNSNameServer
def setUp(self):
super(DNSNameServerDbObjectTestCase, self).setUp()
# (NOTE) If two object have the same value for a field and
# they are sorted using that field, the order is not deterministic.
# To avoid breaking the tests we ensure unique values for every field
while not self._is_objects_unique():
self.db_objs = list(self.get_random_fields() for _ in range(3))
self.obj_fields = [self._test_class.modify_fields_from_db(db_obj)
for db_obj in self.db_objs]
self._create_test_network()
self._create_test_subnet(self._network)
for obj in itertools.chain(self.db_objs, self.obj_fields, self.objs):
obj['subnet_id'] = self._subnet['id']
def _is_objects_unique(self):
order_set = set([x['order'] for x in self.db_objs])
subnet_id_set = set([x['subnet_id'] for x in self.db_objs])
address_set = set([x['address'] for x in self.db_objs])
return 3 == len(order_set) == len(subnet_id_set) == len(address_set)
def _create_dnsnameservers(self):
for obj in self.obj_fields:
dns = self._make_object(obj)
dns.create()
def test_get_objects_sort_by_order_asc(self):
self._create_dnsnameservers()
objs = self._test_class.get_objects(self.context)
fields_sorted = sorted([dict(obj) for obj in self.obj_fields],
key=itemgetter('order'))
self.assertEqual(
fields_sorted,
[obj_test_base.get_obj_db_fields(obj) for obj in objs])
def test_get_objects_sort_by_order_desc(self):
self._create_dnsnameservers()
pager = obj_base.Pager(sorts=[('order', False)])
objs = self._test_class.get_objects(self.context, _pager=pager,
subnet_id=self._subnet.id)
fields_sorted = sorted([dict(obj) for obj in self.obj_fields],
key=itemgetter('order'), reverse=True)
self.assertEqual(
fields_sorted,
[obj_test_base.get_obj_db_fields(obj) for obj in objs])
def test_get_objects_sort_by_address_asc_using_pager(self):
self._create_dnsnameservers()
pager = obj_base.Pager(sorts=[('address', True)])
objs = self._test_class.get_objects(self.context, _pager=pager)
fields_sorted = sorted([dict(obj) for obj in self.obj_fields],
key=itemgetter('address'))
self.assertEqual(
fields_sorted,
[obj_test_base.get_obj_db_fields(obj) for obj in objs])
class RouteObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase):
_test_class = subnet.Route
class RouteDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = subnet.Route
def setUp(self):
super(RouteDbObjectTestCase, self).setUp()
self._create_test_network()
self._create_test_subnet(self._network)
for obj in itertools.chain(self.db_objs, self.obj_fields, self.objs):
obj['subnet_id'] = self._subnet['id']
class SubnetObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase):
_test_class = subnet.Subnet
def setUp(self):
super(SubnetObjectIfaceTestCase, self).setUp()
self.pager_map[subnet.DNSNameServer.obj_name()] = (
obj_base.Pager(sorts=[('order', True)]))
class SubnetDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = subnet.Subnet
def setUp(self):
super(SubnetDbObjectTestCase, self).setUp()
self._create_test_network()
self._create_test_segment(self._network)
for obj in itertools.chain(self.db_objs, self.obj_fields, self.objs):
obj['network_id'] = self._network['id']
obj['segment_id'] = self._segment['id']
def test_get_dns_nameservers_in_order(self):
obj = self._make_object(self.obj_fields[0])
obj.create()
dns_nameservers = [(2, '1.2.3.4'), (1, '5.6.7.8'), (4, '7.7.7.7')]
for order, address in dns_nameservers:
dns = subnet.DNSNameServer(self.context, order=order,
address=address,
subnet_id=obj.id)
dns.create()
new = self._test_class.get_object(self.context, id=obj.id)
self.assertEqual(1, new.dns_nameservers[0].order)
self.assertEqual(2, new.dns_nameservers[1].order)
self.assertEqual(4, new.dns_nameservers[-1].order)
def _create_shared_network_rbac_entry(self, network):
attrs = {
'object_id': network['id'],
'target_tenant': '*',
'action': rbac_db_models.ACCESS_SHARED
}
obj_db_api.create_object(self.context, rbac_db_models.NetworkRBAC,
attrs)
def test_get_subnet_shared_true(self):
network = self._create_network()
self._create_shared_network_rbac_entry(network)
subnet_data = dict(self.obj_fields[0])
subnet_data['network_id'] = network['id']
obj = self._make_object(subnet_data)
# check if shared will be load by 'obj_load_attr' and using extra query
# by RbacNeutronDbObjectMixin get_shared_with_tenant
self.assertTrue(obj.shared)
obj.create()
# here the shared should be load by is_network_shared
self.assertTrue(obj.shared)
new = self._test_class.get_object(self.context,
**obj._get_composite_keys())
# again, the shared should be load by is_network_shared
self.assertTrue(new.shared)
def test_filter_by_shared(self):
network = self._create_network()
self._create_shared_network_rbac_entry(network)
subnet_data = dict(self.obj_fields[0])
subnet_data['network_id'] = network['id']
obj = self._make_object(subnet_data)
obj.create()
result = self._test_class.get_objects(self.context, shared=True)
self.assertEqual(obj, result[0])
def test_get_shared_subnet_with_another_tenant(self):
network_shared = self._create_network()
self._create_shared_network_rbac_entry(network_shared)
subnet_data = dict(self.obj_fields[0])
subnet_data['network_id'] = network_shared['id']
shared_subnet = self._make_object(subnet_data)
shared_subnet.create()
priv_subnet = self._make_object(self.obj_fields[1])
priv_subnet.create()
# Situation here:
# - we have one network with a subnet that are private
# - shared network with its subnet
# creating new context, user should have access to one shared network
all_subnets = self._test_class.get_objects(self.context)
self.assertEqual(2, len(all_subnets))
# access with new tenant_id, should be able to access to one subnet
new_ctx = context.Context('', uuidutils.generate_uuid())
public_subnets = self._test_class.get_objects(new_ctx)
self.assertEqual([shared_subnet], public_subnets)
# test get_object to fetch the private and then the shared subnet
fetched_private_subnet = self._test_class.get_object(new_ctx,
id=priv_subnet.id)
self.assertIsNone(fetched_private_subnet)
fetched_public_subnet = (
self._test_class.get_object(new_ctx, id=shared_subnet.id))
self.assertEqual(shared_subnet, fetched_public_subnet)
|
igor-toga/local-snat
|
neutron/tests/unit/objects/test_subnet.py
|
Python
|
apache-2.0
| 9,765
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a text ad with ad parameters.
To get ad groups, run get_ad_groups.py. To get keywords, run add_keywords.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
CRITERION_ID = 'INSERT_KEYWORD_CRITERION_ID_HERE'
def main(client, ad_group_id, criterion_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201802')
ad_param_service = client.GetService('AdParamService', version='v201802')
# Construct operations for adding text ad object and add to an ad group.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'finalUrls': ['http://www.example.com'],
'displayUrl': 'example.com',
'description1': 'Low-gravity fun for {param1:cheap}.',
'description2': 'Only {param2:a few} seats left!',
'headline': 'Luxury Mars Cruises'
},
'status': 'ENABLED'
}
}]
ads = ad_group_ad_service.mutate(operations)['value']
# Display results.
for ad in ads:
print ('Text ad with id "%s" was successfully added to an ad group with '
'id "%s".' % (ad['adGroupId'], ad['ad']['id']))
# Construct operations for setting ad parameters.
operations = [
{
'operator': 'SET',
'operand': {
'adGroupId': ad_group_id,
'criterionId': criterion_id,
'insertionText': u'£100',
'paramIndex': '1'
}
},
{
'operator': 'SET',
'operand': {
'adGroupId': ad_group_id,
'criterionId': criterion_id,
'insertionText': '50',
'paramIndex': '2'
}
}
]
ad_params = ad_param_service.mutate(operations)
# Display results.
for ad_param in ad_params:
print ('Ad parameter with text "%s" was successfully set for criterion '
'with id "%s" and ad group id "%s".'
% (ad_param['insertionText'], ad_param['criterionId'],
ad_param['adGroupId']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID, CRITERION_ID)
|
Aloomaio/googleads-python-lib
|
examples/adwords/v201802/campaign_management/set_ad_parameters.py
|
Python
|
apache-2.0
| 3,262
|
import json
import pytz
from unittest2 import skipIf
from datetime import date, datetime
from django.test.utils import override_settings
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from myuw.test.api import missing_url, get_user, get_user_pass
from myuw.dao.calendar import get_events
TRUMBA_PREFIX = 'http://www.trumba.com/calendar/5_current'
class TestCalendar(TestCase):
def setUp(self):
self.now = datetime(2013, 04, 15, 0, 0, 0, tzinfo=pytz.utc)
def test_far_future(self):
cal = {'far_future': None}
event_response = get_events(cal, self.now)
self.assertEqual(len(event_response['future_active_cals']), 0)
self.assertEqual(len(event_response['events']), 0)
def test_past_events(self):
cal = {'past': None}
event_response = get_events(cal, self.now)
self.assertEqual(len(event_response['future_active_cals']), 0)
self.assertEqual(len(event_response['events']), 0)
def test_future(self):
cal = {'future_1': None}
event_response = get_events(cal, self.now)
self.assertEqual(len(event_response['future_active_cals']), 1)
self.assertEqual(len(event_response['events']), 0)
def test_future_two(self):
cal = {'future_1': None,
'future_2': None}
event_response = get_events(cal, self.now)
self.assertTrue(True)
self.assertEqual(len(event_response['future_active_cals']), 2)
self.assertEqual(len(event_response['events']), 0)
self.assertEqual(event_response['future_active_cals'][0]['count'], 1)
self.assertEqual(event_response['future_active_cals'][1]['count'], 2)
def test_current(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertEqual(len(event_response['future_active_cals']), 0)
self.assertEqual(len(event_response['events']), 5)
def test_event_url(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
url = "%s?%s" % (TRUMBA_PREFIX,
'trumbaEmbed=eventid%3D1107241160%26view%3Devent')
self.assertEqual(event_response['events'][0]['event_url'], url)
def test_date_sort(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertEqual(event_response['events'][0]['summary'],
'Multi Day Event')
self.assertEqual(event_response['events'][4]['summary'],
'Organic Chemistry Seminar: Prof. Matthew Becker4')
def test_active_cals(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertEqual(len(event_response['active_cals']), 1)
self.assertEqual(event_response['active_cals'][0]['url'],
TRUMBA_PREFIX)
self.assertEqual(event_response['active_cals'][0]['title'],
"Department of Five Events")
def test_all_day(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertTrue(event_response['events'][3]['is_all_day'])
def test_no_location(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertEqual(event_response['events'][3]['event_location'], "")
def test_all_day(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertTrue(event_response['events'][3]['is_all_day'])
self.assertFalse(event_response['events'][2]['is_all_day'])
self.assertIn('2013-04-18', event_response['events'][3]['end'])
|
fanglinfang/myuw
|
myuw/test/dao/calendar.py
|
Python
|
apache-2.0
| 3,758
|
from highton.models import HightonModel
from highton.highton_constants import HightonConstants
from highton import (
fields,
call_mixins,
)
from highton.models.attachment import Attachment
class Email(
HightonModel,
call_mixins.DetailCallMixin,
call_mixins.CreateCallMixin,
call_mixins.UpdateCallMixin,
call_mixins.DeleteCallMixin,
call_mixins.ListCommentCallMixin,
):
"""
:ivar id: fields.IntegerField(name=HightonConstants.ID)
:ivar title: fields.StringField(name=HightonConstants.TITLE, required=True)
:ivar body: fields.StringField(name=HightonConstants.BODY, required=True)
:ivar subject_id: fields.IntegerField(name=HightonConstants.SUBJECT_ID, required=True)
:ivar subject_type: fields.StringField(name=HightonConstants.SUBJECT_TYPE, required=True)
:ivar subject_name: fields.StringField(name=HightonConstants.SUBJECT_NAME)
:ivar author_id: fields.IntegerField(name=HightonConstants.AUTHOR_ID)
:ivar collection_id: fields.IntegerField(name=HightonConstants.COLLECTION_ID)
:ivar collection_type: fields.StringField(name=HightonConstants.COLLECTION_TYPE)
:ivar visible_to: fields.StringField(name=HightonConstants.VISIBLE_TO)
:ivar owner_id: fields.IntegerField(name=HightonConstants.OWNER_ID)
:ivar group_id: fields.IntegerField(name=HightonConstants.GROUP_ID)
:ivar updated_at: fields.DatetimeField(name=HightonConstants.UPDATED_AT)
:ivar created_at: fields.DatetimeField(name=HightonConstants.CREATED_AT)
:ivar attachments: fields.ListField(name=HightonConstants.ATTACHMENTS, init_class=Attachment)
"""
TAG_NAME = HightonConstants.EMAIL
ENDPOINT = HightonConstants.EMAILS
def __init__(self, **kwargs):
self.title = fields.StringField(name=HightonConstants.TITLE, required=True)
self.body = fields.StringField(name=HightonConstants.BODY, required=True)
self.subject_id = fields.IntegerField(name=HightonConstants.SUBJECT_ID, required=True)
self.subject_type = fields.StringField(name=HightonConstants.SUBJECT_TYPE, required=True)
self.subject_name = fields.StringField(name=HightonConstants.SUBJECT_NAME)
self.author_id = fields.IntegerField(name=HightonConstants.AUTHOR_ID)
self.collection_id = fields.IntegerField(name=HightonConstants.COLLECTION_ID)
self.collection_type = fields.StringField(name=HightonConstants.COLLECTION_TYPE)
self.visible_to = fields.StringField(name=HightonConstants.VISIBLE_TO)
self.owner_id = fields.IntegerField(name=HightonConstants.OWNER_ID)
self.group_id = fields.IntegerField(name=HightonConstants.GROUP_ID)
self.updated_at = fields.DatetimeField(name=HightonConstants.UPDATED_AT)
self.created_at = fields.DatetimeField(name=HightonConstants.CREATED_AT)
self.attachments = fields.ListField(name=HightonConstants.ATTACHMENTS, init_class=Attachment)
super().__init__(**kwargs)
|
seibert-media/Highton
|
highton/models/email.py
|
Python
|
apache-2.0
| 2,952
|
#
# Copyright (c) 2013 Preet Kukreti
#
# Excerpt of code taken at
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
import os
import sys
import stat
import tempfile
def is_case_sensitive_filesystem():
tmphandle, tmppath = tempfile.mkstemp()
is_insensitive = os.path.exists(tmppath.upper())
os.close(tmphandle)
os.remove(tmppath)
return not is_insensitive
_IS_CASE_SENSITIVE_FILESYSTEM = is_case_sensitive_filesystem()
def which(program, case_sensitive=_IS_CASE_SENSITIVE_FILESYSTEM):
"""
Simulates unix `which` command.
Returns absolute path if program found
"""
def is_exe(fpath):
"""
Return true if fpath is a file we have access to
that is executable
"""
accessmode = os.F_OK | os.X_OK
if os.path.exists(fpath) and os.access(fpath, accessmode) and \
not os.path.isdir(fpath):
filemode = os.stat(fpath).st_mode
ret = bool(filemode & stat.S_IXUSR or
filemode & stat.S_IXGRP or
filemode & stat.S_IXOTH)
return ret
def list_file_exts(directory, search_filename=None, ignore_case=True):
"""
Return list of (filename, extension) tuples which match
the search_filename
"""
if ignore_case:
search_filename = search_filename.lower()
for root, dirs, files in os.walk(path):
for f in files:
filename, extension = os.path.splitext(f)
if ignore_case:
filename = filename.lower()
if not search_filename or filename == search_filename:
yield (filename, extension)
break
fpath, fname = os.path.split(program)
# is a path: try direct program path
if fpath:
if is_exe(program):
return program
elif "win" in sys.platform:
# isnt a path: try fname in current directory on windows
if is_exe(fname):
return program
paths = os.environ.get("PATH", "").split(os.pathsep)
paths = [path.strip('"') for path in paths]
exe_exts = [ext for ext in os.environ.get("PATHEXT", "").split(os.pathsep)]
if not case_sensitive:
exe_exts = map(str.lower, exe_exts)
# try append program path per directory
for path in paths:
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
# try with known executable extensions per program path per directory
for path in paths:
filepath = os.path.join(path, program)
for extension in exe_exts:
exe_file = filepath + extension
if is_exe(exe_file):
return exe_file
# try search program name with "soft" extension search
if len(os.path.splitext(fname)[1]) == 0:
for path in paths:
file_exts = list_file_exts(path, fname, not case_sensitive)
for file_ext in file_exts:
filename = "".join(file_ext)
exe_file = os.path.join(path, filename)
if is_exe(exe_file):
return exe_file
return None
|
hirokihamasaki/irma
|
common/plugins/which.py
|
Python
|
apache-2.0
| 3,234
|
Hi, <%= to_name %><br>
<br>
Someone asked to recover this account on <%= app_name %>.<br>If you want to recover this account, Please click the following link and set new password! Otherwise please ignore this e-mail.<br>
<a href="<%= app_url %>/token/verify/!recovery/!<%str= mail_verify_token %>">Click here to set new password!</a><br>
<%= value %><br>
<%= body_html %><br>
<%= children %><br>
<br>
<hr>
Thank you,<br>
<b><%= app_title %></b>
|
vinoth3v/In
|
In/templates/html/AccountRecoveryEmail.tpl.py
|
Python
|
apache-2.0
| 445
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient import client as heat_client
from heatclient import exc as heat_exc
from neutron._i18n import _LW
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
# We are overriding create and update for now because the upstream
# heat client class does not take timeout as argument
class HeatClient(object):
def __init__(self, user_name, tenant, heat_uri, password=None,
auth_token=None, timeout_mins=30):
api_version = "1"
endpoint = "%s/%s" % (heat_uri, tenant)
kwargs = {
'token': auth_token,
'username': user_name,
'password': password
}
self.client = heat_client.Client(api_version, endpoint, **kwargs)
self.stacks = self.client.stacks
self.timeout_mins = timeout_mins
# The base class is a old style class. We have to change when it is
# updated
#gbp_heat_api_client.HeatClient.__init__(
# self, context, heat_uri, password, auth_token)
def create(self, name, data, parameters=None):
fields = {
'stack_name': name,
'timeout_mins': self.timeout_mins,
'disable_rollback': True,
'password': data.get('password')
}
fields['template'] = data
fields['parameters'] = parameters
return self.stacks.create(**fields)
def update(self, stack_id, data, parameters=None):
fields = {
'timeout_mins': self.timeout_mins,
'password': data.get('password')
}
fields['template'] = data
fields['parameters'] = parameters
return self.stacks.update(stack_id, **fields)
def delete(self, stack_id):
try:
self.stacks.delete(stack_id)
except heat_exc.HTTPNotFound:
LOG.warning(_LW("Stack %(stack)s created by service chain driver "
"is not found at cleanup"), {'stack': stack_id})
def get(self, stack_id):
return self.stacks.get(stack_id)
|
jiahaoliang/group-based-policy
|
gbpservice/nfp/orchestrator/config_drivers/heat_client.py
|
Python
|
apache-2.0
| 2,625
|
"""Auto-generated file, do not edit by hand. MK metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MK = PhoneMetadata(id='MK', country_code=389, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-578]\\d{7}', possible_length=(8,), possible_length_local_only=(6, 7)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:2(?:[23]\\d|5[124578]|6[01])|3(?:1[3-6]|[23][2-6]|4[2356])|4(?:[23][2-6]|4[3-6]|5[256]|6[25-8]|7[24-6]|8[4-6]))\\d{5}', example_number='22212345', possible_length=(8,), possible_length_local_only=(6, 7)),
mobile=PhoneNumberDesc(national_number_pattern='7(?:[0-25-8]\\d{2}|32\\d|421|9[23]\\d)\\d{4}', example_number='72345678', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{5}', example_number='80012345', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='5[02-9]\\d{6}', example_number='50012345', possible_length=(8,)),
shared_cost=PhoneNumberDesc(national_number_pattern='8(?:0[1-9]|[1-9]\\d)\\d{5}', example_number='80123456', possible_length=(8,)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(2)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['2'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='([347]\\d)(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['[347]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='([58]\\d{2})(\\d)(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[58]'], national_prefix_formatting_rule='0\\1')],
mobile_number_portable_region=True)
|
gencer/python-phonenumbers
|
python/phonenumbers/data/region_MK.py
|
Python
|
apache-2.0
| 1,725
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class route_selection_options(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/route-selection-options. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Parameters relating to options for route selection
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "route-selection-options"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"route-selection-options",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/route_selection_options/config (container)
YANG Description: Configuration parameters relating to route selection
options
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/route_selection_options/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to route selection
options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/route_selection_options/state (container)
YANG Description: State information for the route selection options
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/route_selection_options/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information for the route selection options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class route_selection_options(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/route-selection-options. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Parameters relating to options for route selection
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "route-selection-options"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"route-selection-options",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/route_selection_options/config (container)
YANG Description: Configuration parameters relating to route selection
options
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/route_selection_options/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to route selection
options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/route_selection_options/state (container)
YANG Description: State information for the route selection options
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/route_selection_options/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information for the route selection options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/afi_safis/afi_safi/route_selection_options/__init__.py
|
Python
|
apache-2.0
| 18,590
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class unknown_tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/grace-lsa/tlvs/tlv/unknown-tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "unknown-tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"grace-lsa",
"tlvs",
"tlv",
"unknown-tlv",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/grace_lsa/tlvs/tlv/unknown_tlv/state (container)
YANG Description: Contents of an unknown TLV within the LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/grace_lsa/tlvs/tlv/unknown_tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Contents of an unknown TLV within the LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class unknown_tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/grace-lsa/tlvs/tlv/unknown-tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "unknown-tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"grace-lsa",
"tlvs",
"tlv",
"unknown-tlv",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/grace_lsa/tlvs/tlv/unknown_tlv/state (container)
YANG Description: Contents of an unknown TLV within the LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/grace_lsa/tlvs/tlv/unknown_tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Contents of an unknown TLV within the LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/grace_lsa/tlvs/tlv/unknown_tlv/__init__.py
|
Python
|
apache-2.0
| 12,510
|
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from rally.common.i18n import _
from rally.common import logging
from rally import consts
from rally.task import context
from rally_ovs.plugins.ovs import ovnclients
LOG = logging.getLogger(__name__)
@context.configure(name="ovn-nbctld", order=112)
class OvnNbctlDaemonContext(ovnclients.OvnClientMixin, context.Context):
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"daemon_mode": {"type": "boolean"},
},
"additionalProperties": True
}
DEFAULT_CONFIG = {
"daemon_mode": True,
}
@logging.log_task_wrapper(LOG.info, _("Enter context: `ovn-nbctld`"))
def setup(self):
super(OvnNbctlDaemonContext, self).setup()
if self.config["daemon_mode"]:
self.context["daemon_socket"] = self._restart_daemon()
else:
self._stop_daemon()
@logging.log_task_wrapper(LOG.info, _("Exit context: `ovn-nbctld`"))
def cleanup(self):
pass
|
openvswitch/ovn-scale-test
|
rally_ovs/plugins/ovs/context/ovnnbctl_daemon.py
|
Python
|
apache-2.0
| 1,593
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the gcp module - log.py"""
import typing
import unittest
import mock
from tests.providers.gcp import gcp_mocks
class GoogleCloudLogTest(unittest.TestCase):
"""Test Google Cloud Log class."""
# pylint: disable=line-too-long
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.log.GoogleCloudLog.GclApi')
def testListLogs(self, mock_gcl_api):
"""Test that logs of project are correctly listed."""
logs = mock_gcl_api.return_value.logs.return_value.list
logs.return_value.execute.return_value = gcp_mocks.MOCK_LOGS_LIST
list_logs = gcp_mocks.FAKE_LOGS.ListLogs()
self.assertEqual(2, len(list_logs))
self.assertEqual(gcp_mocks.FAKE_LOG_LIST[0], list_logs[0])
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.log.GoogleCloudLog.GclApi')
def testExecuteQuery(self, mock_gcl_api):
"""Test that logs of project are correctly queried."""
query = mock_gcl_api.return_value.entries.return_value.list
query.return_value.execute.return_value = gcp_mocks.MOCK_LOG_ENTRIES
qfilter = ['*']
query_logs = gcp_mocks.FAKE_LOGS.ExecuteQuery(qfilter)
self.assertEqual(2, len(query_logs))
self.assertEqual(gcp_mocks.FAKE_LOG_ENTRIES[0], query_logs[0])
|
google/cloud-forensics-utils
|
tests/providers/gcp/internal/test_log.py
|
Python
|
apache-2.0
| 1,867
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Localisation'
db.create_table(u'cms_localisation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('country_code', self.gf('django.db.models.fields.CharField')(max_length=2)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=3)),
))
db.send_create_signal(u'cms', ['Localisation'])
# Adding field 'Post.localisation'
db.add_column(u'cms_post', 'localisation',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Localisation'], null=True, blank=True),
keep_default=False)
# Deleting field 'Category.site'
db.delete_column(u'cms_category', 'site_id')
# Adding field 'Category.localisation'
db.add_column(u'cms_category', 'localisation',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Localisation'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'Localisation'
db.delete_table(u'cms_localisation')
# Deleting field 'Post.localisation'
db.delete_column(u'cms_post', 'localisation_id')
# Adding field 'Category.site'
db.add_column(u'cms_category', 'site',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'], null=True, blank=True),
keep_default=False)
# Deleting field 'Category.localisation'
db.delete_column(u'cms_category', 'localisation_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cms.category': {
'Meta': {'ordering': "('language', 'position', 'title')", 'object_name': 'Category'},
'featured_in_navbar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'last_author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'category_last_author'", 'null': 'True', 'to': u"orm['auth.User']"}),
'localisation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cms.Localisation']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cms.Category']", 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'cms.localisation': {
'Meta': {'object_name': 'Localisation'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
u'cms.post': {
'Meta': {'ordering': "('-created_at',)", 'object_name': 'Post'},
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_in_category': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'last_author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'post_last_author'", 'null': 'True', 'to': u"orm['auth.User']"}),
'localisation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cms.Localisation']", 'null': 'True', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': u"orm['cms.Category']"}),
'related_posts': ('sortedm2m.fields.SortedManyToManyField', [], {'related_name': "'related_posts_rel_+'", 'blank': 'True', 'to': u"orm['cms.Post']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cms.Post']", 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cms']
|
universalcore/unicore-cms-django
|
cms/migrations/0013_auto__add_localisation__add_field_post_localisation__del_field_categor.py
|
Python
|
bsd-2-clause
| 9,317
|
"""Navigation along a white line using visual feedback."""
import numpy as np
try:
import cv2
except ImportError:
print "You need OpenCV to use vision modules, sorry."
sys.exit(1)
from util import Enum, rotateImage
from base import DependentFrameProcessor
from main import main
from linedetection import Line, LineDetector
class LineWalker(DependentFrameProcessor):
"""Navigates parallel to a white line boundary of a pickup/dropoff region."""
State = Enum(['INIT', 'SEARCHING', 'GOOD', 'BAD', 'FAILED'])
epsilonLineAngle = 2.0 # if white line is at an angle less than this, no correction is required (stops bot from oscillating around zero)
maxLineAngle = 75.0 # if white line is at angle greater than this, either line detected is spurious or we are way off for line-walking
def __init__(self, options, processorPool):
DependentFrameProcessor.__init__(self, options, processorPool)
self.detector = processorPool.getProcessorByType(LineDetector)
if self.detector is None:
self.loge("__init__", "Could not find a LineDetector; will not activate :(")
self.state = LineWalker.State.INIT
def initialize(self, imageIn, timeNow):
#self.image = imageIn
#self.imageSize = (self.image.shape[1], self.image.shape[0]) # (width, height)
self.headingError = 0.0
# [Sim] Need to get live values from bot_loc (?)
self.sim = False
self.heading = 0.0 # degrees
if self.detector is not None:
self.active = True
else:
self.state = LineWalker.State.FAILED
def process(self, imageIn, timeNow):
#self.image = imageIn
self.headingError = 0.0
# [Sim] Turn imageIn by current angle = self.heading
if self.sim and self.heading != 0.0:
#self.logd("process", "Heading = %.2f" % self.heading)
imageIn = rotateImage(imageIn, self.heading)
# Grab detected line, if any
if self.detector is None:
return # skip if we don't have a detector and were still called to process
self.state = LineWalker.State.SEARCHING
if self.detector.state is LineDetector.State.FOUND: # skip if line not found
# TODO skip if confidence is low
whiteLine = self.detector.primaryLine
if self.gui:
cv2.circle(self.detector.imageOut, whiteLine.ptLeft, 10, (255, 0, 0), 2)
cv2.circle(self.detector.imageOut, whiteLine.ptRight, 10, (255, 0, 0), 2)
if whiteLine.valid and abs(whiteLine.angle) >= self.epsilonLineAngle and abs(whiteLine.angle) <= self.maxLineAngle:
# Compute heading error between self and line; TODO use actual bot heading, and reject very high changes (position, angle) as invalid
self.headingError = -whiteLine.angle
self.state = LineWalker.State.GOOD
else:
self.state = LineWalker.State.BAD
# [Sim] Artificially change self heading
if self.sim and self.headingError != 0.0:
if whiteLine.ptLeft[1] < whiteLine.ptRight[1]:
self.heading += 1.0
else:
self.heading -= 1.0
# TODO Based on self.detector.state, iterate through move-check loop
return True, self.detector.imageOut
def onKeyPress(self, key, keyChar=None):
if keyChar == None: # special key
keyByte = key & 0xff
if keyByte == 0x51: # LEFT
self.heading -= 1.0
elif keyByte == 0x53: # RIGHT
self.heading += 1.0
return True
|
IEEERobotics/high-level
|
qwe/vision/linewalking.py
|
Python
|
bsd-2-clause
| 3,459
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0003_add_validators'),
('redirects', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='URLRedirect',
fields=[
('route_ptr', models.OneToOneField(to='routes.Route', auto_created=True, primary_key=True, serialize=False, parent_link=True)),
('target', models.URLField(max_length=2000)),
('permanent', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
bases=('routes.route',),
),
]
|
Ian-Foote/django-conman
|
conman/redirects/migrations/0002_urlredirect.py
|
Python
|
bsd-2-clause
| 776
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'VisitRegistrationErrorLog.error_type'
db.alter_column(u'clinics_visitregistrationerrorlog', 'error_type', self.gf('django.db.models.fields.CharField')(max_length=50))
def backwards(self, orm):
# Changing field 'VisitRegistrationErrorLog.error_type'
db.alter_column(u'clinics_visitregistrationerrorlog', 'error_type', self.gf('django.db.models.fields.PositiveIntegerField')())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_renovated': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year_opened': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstatistic': {
'Meta': {'unique_together': "[('clinic', 'statistic', 'month')]", 'object_name': 'ClinicStatistic'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'float_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int_value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'month': ('django.db.models.fields.DateField', [], {}),
'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'statistic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.Statistic']"}),
'text_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'clinics.patient': {
'Meta': {'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.region': {
'Meta': {'unique_together': "(('external_id', 'type'),)", 'object_name': 'Region'},
'alternate_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'external_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'lga'", 'max_length': '16'})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'clinics.visitregistrationerror': {
'Meta': {'object_name': 'VisitRegistrationError'},
'error_count': ('django.db.models.fields.PositiveIntegerField', [], {}),
'error_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'clinics.visitregistrationerrorlog': {
'Meta': {'object_name': 'VisitRegistrationErrorLog'},
'error_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'message_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'statistics.statistic': {
'Meta': {'object_name': 'Statistic'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.StatisticGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'statistic_type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'statistics.statisticgroup': {
'Meta': {'object_name': 'StatisticGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['clinics']
|
myvoice-nigeria/myvoice
|
myvoice/clinics/migrations/0014_auto__chg_field_visitregistrationerrorlog_error_type.py
|
Python
|
bsd-2-clause
| 13,224
|
import os
import numpy as np
import re
import sys
try:
import h5py
except ImportError:
h5py = None
'''
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
'''
from .. import logger, logging
from .base import MFPackage, MissingFile
from .name import Modflow
_re_fmtin = re.compile(
r'\((?P<body>(?P<rep>\d*)(?P<symbol>[IEFG][SN]?)(?P<w>\d+)(\.(?P<d>\d+))?'
r'|FREE|BINARY)\)')
class MFFileReader(object):
"""MODFLOW file reader"""
_parent_class = MFPackage
def __init__(self, f=None, parent=None):
"""Initialize with a file and an instance of a parent class
Parameters
----------
f : str, file-like object or None
A path to a file, or a file-like reader with with a 'readlines'
method, such as BytesIO. If None, then it is obtained from
parent.fpath, or parent.fname
parent : instance of MFPackage
"""
# Set up logger
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logger.level)
if parent is None:
parent = self._parent_class()
if not isinstance(parent, self._parent_class):
self.logger.error(
"'parent' should be an instance of a %r object; found %r",
self._parent_class.__name__, parent.__class__.__name__)
self.parent = parent
if f is None:
if getattr(parent, 'fpath', None) is not None:
f = parent.fpath
elif getattr(parent, 'fname', None) is not None:
f = parent.fname
else:
raise ValueError('unsure how to open file')
# Read data
if hasattr(f, 'readlines'):
# it is a file reader object, e.g. BytesIO
self.fname = f.__class__.__name__
self.lines = f.readlines()
else:
self.fpath = self.parent.fpath = f
if getattr(self, 'fname', None) is None:
self.fname = os.path.split(self.parent.fpath)[1]
# Read whole file at once, then close it
with open(self.parent.fpath, 'r') as fp:
self.lines = fp.readlines()
if self.parent.nam is None:
self.parent.nam = Modflow()
try:
self.parent.nam.ref_dir = os.path.dirname(self.fpath)
except:
pass
self.logger.info("read file '%s' with %d lines",
self.fname, len(self.lines))
self.lineno = 0
self.data_set_num = None
def __len__(self):
"""Returns number of lines"""
return len(self.lines)
def location_exception(self, e):
"""Use to show location of exception while reading file
Example:
fp = _MFFileReader(fpath, self)
try:
fp.read_text(0)
...
fp.check_end()
except Exception as e:
exec(fp.location_exception(e))
"""
location = '%s:%s:%s:Data set %s:' % \
(self.parent.__class__.__name__, self.fname, self.lineno,
self.data_set_num)
if sys.version_info[0] < 3:
return "import sys; raise type(e), type(e)('" + location + "' + " \
"str(e)), sys.exc_info()[2]"
else:
return "import sys; raise type(e)(str(e) + '" + location + "' + " \
"str(e)).with_traceback(sys.exc_info()[2])"
def check_end(self):
"""Check end of file and show messages in logger on status"""
if len(self) == self.lineno:
self.logger.info("finished reading %d lines", self.lineno)
elif len(self) > self.lineno:
remain = len(self) - self.lineno
a, b = 's', ''
if remain == 1:
b, a = a, b
self.logger.warn(
"finished reading %d lines, but %d line%s remain%s",
self.lineno, remain, a, b)
else:
raise ValueError("%d > %d ?" % (self.lineno, len(self)))
@property
def curinfo(self):
"""Returns line and data set number info"""
return str(self.lineno) + ':Data set ' + str(self.data_set_num)
@property
def not_eof(self):
"""Reader is not at the end of file (EOF)"""
return self.lineno < len(self.lines)
@property
def curline(self):
"""Return the current line"""
try:
if self.lineno == 0:
return ''
else:
return self.lines[self.lineno - 1]
except IndexError:
self.logger.error('%s:Unexpected end of file', self.curinfo)
raise IndexError('Unexpected end of file')
def nextline(self, data_set_num=None):
"""Get next line, setting data set number and increment lineno"""
if data_set_num is not None:
self.data_set_num = data_set_num
self.logger.debug('%s:using nextline', self.curinfo)
self.lineno += 1
try:
line = self.lines[self.lineno - 1]
except IndexError:
self.lineno -= 1
self.logger.error('%s:Unexpected end of file', self.curinfo)
raise IndexError('Unexpected end of file')
if data_set_num is not None:
self.logger.debug(
'%s:returning line with length %d:%r',
self.curinfo, len(line), line)
return line
def readline(self):
"""Alias for nextline()"""
return self.nextline()
def conv(self, item, fmt, name=None):
"""Convert item to format fmt
Parameters
----------
item : str
fmt : str, default ('s')
's' for string or no conversion (default)
'i' for integer
'f' for float
name : str or None
Optional name to provide context information for debugging
"""
try:
if type(fmt) == np.dtype:
return fmt.type(item)
elif fmt == 's': # string
return item
elif fmt == 'i': # integer
return int(item)
elif fmt == 'f': # any floating-point number
# typically either a REAL or DOUBLE PRECISION
return self.parent._float_type.type(item)
else:
raise ValueError('Unknown fmt code %r' % (fmt,))
except ValueError:
if name is not None:
msg = 'Cannot cast %r of %r to type %r' % (name, item, fmt)
else:
msg = 'Cannot cast %r to type %r' % (item, fmt)
raise ValueError(msg)
def get_items(self, data_set_num=None, num_items=None, fmt='s',
multiline=False):
"""Get items from one or more lines (if multiline) into a list
If num_items is defined, then only this count will be returned and any
remaining items from the line will be ignored. If there are too few
items on the line, the values will be some form of "zero", such as
0, 0.0 or ''.
However, if `multiline=True`, then multiple lines can be read to reach
num_items.
If fmt is defined, it must be:
- 's' for string or no conversion (default)
- 'i' for integer
- 'f' for float, as defined by parent._float_type
"""
if data_set_num is not None:
self.data_set_num = data_set_num
self.logger.debug(
'%s:using get_items for num_items=%s',
self.curinfo, num_items)
startln = self.lineno + 1
fill_missing = False
if num_items is None or not multiline:
items = self.nextline().split()
if num_items is not None and len(items) > num_items:
items = items[:num_items]
if (not multiline and num_items is not None and
len(items) < num_items):
fill_missing = (num_items - len(items))
else:
assert isinstance(num_items, int), type(num_items)
assert num_items > 0, num_items
items = []
while len(items) < num_items:
items += self.nextline().split()
if len(items) > num_items: # trim off too many
items = items[:num_items]
if fmt == 's':
res = items
else:
res = [self.conv(x, fmt) for x in items]
if fill_missing:
if fmt == 's':
fill_value = ''
else:
fill_value = '0'
res += [self.conv(fill_value, fmt)] * fill_missing
if data_set_num is not None:
if multiline:
toline = ' to %s' % (self.lineno,)
else:
toline = ''
self.logger.debug('%s:read %d items from line %d%s',
self.data_set_num, num_items, startln, toline)
return res
def get_named_items(self, data_set_num, names, fmt='s'):
"""Get items into dict. See get_items for fmt usage"""
items = self.get_items(data_set_num, len(names), fmt)
res = {}
for name, item in zip(names, items):
if fmt != 's':
item = self.conv(item, fmt, name)
res[name] = item
return res
def read_named_items(self, data_set_num, names, fmt='s'):
"""Read items into parent. See get_items for fmt usage"""
startln = self.lineno + 1
items = self.get_named_items(data_set_num, names, fmt)
for name in items.keys():
setattr(self.parent, name, items[name])
self.logger.debug('%s:read %d items from line %d',
self.data_set_num, len(items), startln)
def read_text(self, data_set_num=0):
"""Reads 0 or more text (comment) for lines that start with '#'"""
startln = self.lineno + 1
self.parent.text = []
while True:
try:
line = self.nextline(data_set_num)
except IndexError:
break
if line.startswith('#'):
line = line[1:].strip()
self.parent.text.append(line)
else:
self.lineno -= 1 # scroll back one?
break
self.logger.debug('%s:read %d lines of text from line %d to %d',
self.data_set_num,
len(self.parent.text), startln, self.lineno)
def read_options(self, data_set_num, process_aux=True):
"""Read options, and optionally process auxiliary variables"""
line = self.nextline(data_set_num)
self.parent.Options = line.upper().split()
if hasattr(self.parent, 'valid_options'):
for opt in self.parent.Options:
if opt not in self.parent.Options:
self.logger.warn("%s:unrecognised option %r",
self.data_set_num, opt)
if process_aux:
raise NotImplementedError
else:
self.logger.debug('%s:read %d options from line %d:%s',
self.data_set_num, len(self.parent.Options),
self.lineno, self.parent.Options)
def read_parameter(self, data_set_num, names):
"""Read [PARAMETER values]
This optional item must start with the word "PARAMETER". If not found,
then names are set to 0.
Parameter names are provided in a list, and are stored as integers
to the parent object.
"""
startln = self.lineno + 1
line = self.nextline(data_set_num)
self.lineno -= 1
if line.upper().startswith('PARAMETER'):
items = self.get_items(num_items=len(names) + 1)
assert items[0].upper() == 'PARAMETER', items[0]
for name, item in zip(names, items[1:]):
value = self.conv(item, 'i', name)
setattr(self.parent, name, value)
else:
for name in names:
setattr(self.parent, name, 0)
self.logger.debug('%s:read %d parameters from line %d',
self.data_set_num, len(names), startln)
def get_array(self, data_set_num, shape, dtype, return_dict=False):
"""Returns array data, similar to array reading utilities U2DREL,
U2DINT, and U1DREL. If return_dict=True, a dict is returned with all
other attributes.
Inputs:
data_set_num - number
shape - 1D array, e.g. 10, or 2D array (20, 30)
dtype - e.g. np.float32 or 'f'
See Page 8-57 from the MODFLOW-2005 mannual for details.
"""
startln = self.lineno + 1
res = {}
first_line = self.nextline(data_set_num)
# Comments are considered after a '#' character on the first line
if '#' in first_line:
res['text'] = first_line[(first_line.find('#') + 1):].strip()
num_type = np.dtype(dtype).type
res['array'] = ar = np.empty(shape, dtype=dtype)
num_items = ar.size
def read_array_data(obj, fmtin):
'''Helper subroutine to actually read array data'''
fmt = _re_fmtin.search(fmtin.upper())
if not fmt:
raise ValueError(
'cannot understand Fortran format: ' + repr(fmtin))
fmt = fmt.groupdict()
if fmt['body'] == 'BINARY':
data_size = ar.size * ar.dtype.itemsize
if hasattr(obj, 'read'):
data = obj.read(data_size)
else:
raise NotImplementedError(
"not sure how to 'read' from " + repr(obj))
iar = np.fromstring(data, dtype)
else: # ASCII
items = []
if not hasattr(obj, 'readline'):
raise NotImplementedError(
"not sure how to 'readline' from " + repr(obj))
if fmt['body'] == 'FREE':
while len(items) < num_items:
items += obj.readline().split()
else: # interpret Fortran format
if fmt['rep']:
rep = int(fmt['rep'])
else:
rep = 1
width = int(fmt['w'])
while len(items) < num_items:
line = obj.readline()
pos = 0
for n in range(rep):
try:
item = line[pos:pos + width].strip()
pos += width
if item:
items.append(item)
except IndexError:
break
iar = np.fromiter(items, dtype=dtype)
if iar.size != ar.size:
raise ValueError('expected size %s, but found %s' %
(ar.size, iar.size))
return iar
# First, assume using more modern free-format control line
control_line = first_line
dat = control_line.split()
# First item is the control word
res['cntrl'] = cntrl = dat[0].upper()
if cntrl == 'CONSTANT':
# CONSTANT CNSTNT
if len(dat) < 2:
raise ValueError(
'expecting to find at least 2 items for CONSTANT')
res['cnstnt'] = cnstnt = dat[1]
if len(dat) > 2 and 'text' not in res:
st = first_line.find(cnstnt) + len(cnstnt)
res['text'] = first_line[st:].strip()
ar.fill(cnstnt)
elif cntrl == 'INTERNAL':
# INTERNAL CNSTNT FMTIN [IPRN]
if len(dat) < 3:
raise ValueError(
'expecting to find at least 3 items for INTERNAL')
res['cnstnt'] = cnstnt = dat[1]
res['fmtin'] = fmtin = dat[2]
if len(dat) >= 4:
res['iprn'] = iprn = dat[3] # not used
if len(dat) > 4 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
iar = read_array_data(self, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'EXTERNAL':
# EXTERNAL Nunit CNSTNT FMTIN IPRN
if len(dat) < 5:
raise ValueError(
'expecting to find at least 5 items for EXTERNAL')
res['nunit'] = nunit = int(dat[1])
res['cnstnt'] = cnstnt = dat[2]
res['fmtin'] = fmtin = dat[3].upper()
res['iprn'] = iprn = dat[4] # not used
if len(dat) > 5 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
# Needs a reference to nam[nunit]
if self.parent.nam is None:
raise AttributeError(
"reference to 'nam' required for EXTERNAL array")
try:
obj = self.parent.nam[nunit]
except KeyError:
raise KeyError("nunit %s not in nam", nunit)
iar = read_array_data(obj, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'OPEN/CLOSE':
# OPEN/CLOSE FNAME CNSTNT FMTIN IPRN
if len(dat) < 5:
raise ValueError(
'expecting to find at least 5 items for OPEN/CLOSE')
res['fname'] = fname = dat[1]
res['cnstnt'] = cnstnt = dat[2]
res['fmtin'] = fmtin = dat[3].upper()
res['iprn'] = iprn = dat[4]
if len(dat) > 5 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
with open(fname, 'rb') as fp:
iar = read_array_data(fp, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'HDF5':
# GMS extension: http://www.xmswiki.com/xms/GMS:MODFLOW_with_HDF5
if not h5py:
raise ImportError('h5py module required to read HDF5 data')
# HDF5 CNSTNT IPRN "FNAME" "pathInFile" nDim start1 nToRead1 ...
file_ch = r'\w/\.\-\+_\(\)'
dat = re.findall('([' + file_ch + ']+|"[' + file_ch + ' ]+")',
control_line)
if len(dat) < 8:
raise ValueError('expecting to find at least 8 '
'items for HDF5; found ' + str(len(dat)))
assert dat[0].upper() == 'HDF5', dat[0]
res['cnstnt'] = cnstnt = dat[1]
try:
cnstnt_val = num_type(cnstnt)
except ValueError: # e.g. 1.0 as int 1
cnstnt_val = num_type(float(cnstnt))
res['iprn'] = dat[2]
res['fname'] = fname = dat[3].strip('"')
res['pathInFile'] = pathInFile = dat[4].strip('"')
nDim = int(dat[5])
nDim_len = {1: 8, 2: 10, 3: 12}
if nDim not in nDim_len:
raise ValueError('expecting to nDim to be one of 1, 2, or 3; '
'found ' + str(nDim))
elif len(dat) < nDim_len[nDim]:
raise ValueError(
('expecting to find at least %d items for HDF5 with '
'%d dimensions; found %d') %
(nDim_len[nDim], nDim, len(dat)))
elif len(dat) > nDim_len[nDim]:
token = dat[nDim_len[nDim]]
st = first_line.find(token) + len(token)
res['text'] = first_line[st:].strip()
if nDim >= 1:
start1, nToRead1 = int(dat[6]), int(dat[7])
slice1 = slice(start1, start1 + nToRead1)
if nDim >= 2:
start2, nToRead2 = int(dat[8]), int(dat[9])
slice2 = slice(start2, start2 + nToRead2)
if nDim == 3:
start3, nToRead3 = int(dat[10]), int(dat[11])
slice3 = slice(start3, start3 + nToRead3)
fpath = os.path.join(self.parent.nam.ref_dir, fname)
if not os.path.isfile(fpath):
raise MissingFile("cannot find file '%s'" % (fpath,))
h5 = h5py.File(fpath, 'r')
ds = h5[pathInFile]
if nDim == 1:
iar = ds[slice1]
elif nDim == 2:
iar = ds[slice1, slice2]
elif nDim == 3:
iar = ds[slice1, slice2, slice3]
h5.close()
ar[:] = iar.reshape(shape) * cnstnt_val
elif len(control_line) > 20: # FIXED-FORMAT CONTROL LINE
# LOCAT CNSTNT FMTIN IPRN
del res['cntrl'] # control word was not used for fixed-format
try:
res['locat'] = locat = int(control_line[0:10])
res['cnstnt'] = cnstnt = control_line[10:20].strip()
if len(control_line) > 20:
res['fmtin'] = fmtin = control_line[20:40].strip().upper()
if len(control_line) > 40:
res['iprn'] = iprn = control_line[40:50].strip()
except ValueError:
raise ValueError('fixed-format control line not '
'understood: ' + repr(control_line))
if len(control_line) > 50 and 'text' not in res:
res['text'] = first_line[50:].strip()
if locat == 0: # all elements are set equal to cnstnt
ar.fill(cnstnt)
else:
nunit = abs(locat)
if self.parent.nunit == nunit:
obj = self
elif self.parent.nam is None:
obj = self
else:
obj = self.parent.nam[nunit]
if locat < 0:
fmtin = '(BINARY)'
iar = read_array_data(obj, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
else:
raise ValueError('array control line not understood: ' +
repr(control_line))
if 'text' in res:
withtext = ' with text "' + res['text'] + '"'
else:
withtext = ''
self.logger.debug(
'%s:read %r array with shape %s from line %d to %d%s',
self.data_set_num, ar.dtype.char, ar.shape,
startln, self.lineno, withtext)
if return_dict:
return res
else:
return ar
|
mwtoews/moflow
|
moflow/mf/reader.py
|
Python
|
bsd-2-clause
| 22,960
|
#!/usr/bin/env python
import os
import sys
# Add the project to the path
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
# setup the environment
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dicom_review.conf.settings')
os.environ.setdefault('PYTHON_EGG_CACHE', '/tmp')
from django.core.management import execute_from_command_line
execute_from_command_line()
|
chop-dbhi/django-dicom-review
|
bin/manage.py
|
Python
|
bsd-2-clause
| 380
|
import mufsim.gamedb as db
import mufsim.stackitems as si
from mufsim.interface import network_interface as netifc
from mufsim.logger import log
from mufsim.errors import MufRuntimeError
from mufsim.insts.base import Instruction, instr
@instr("descriptors")
class InstDescriptors(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value == -1:
descrs = netifc.get_descriptors()
else:
if db.getobj(who).objtype != "player":
raise MufRuntimeError("Expected #-1 or player dbref.")
descrs = netifc.user_descrs(who.value)
for descr in descrs:
fr.data_push(descr)
fr.data_push(len(descrs))
@instr("descr_array")
class InstDescrArray(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value == -1:
descrs = netifc.get_descriptors()
else:
if db.getobj(who).objtype != "player":
raise MufRuntimeError("Expected #-1 or player dbref.")
descrs = netifc.user_descrs(who.value)
fr.data_push_list(descrs)
@instr("descrcon")
class InstDescrCon(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(netifc.descr_con(descr))
@instr("descrdbref")
class InstDescrDBRef(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
ref = netifc.descr_dbref(descr)
obj = si.DBRef(ref)
fr.data_push(obj)
@instr("descr_setuser")
class InstDescrSetUser(Instruction):
def execute(self, fr):
fr.check_underflow(3)
pw = fr.data_pop(str)
who = fr.data_pop_object()
descr = fr.data_pop(int)
if who.objtype != "player":
raise MufRuntimeError("Expected player dbref.")
was = netifc.descr_dbref(descr)
if db.getobj(who).password != pw:
raise MufRuntimeError("Incorrect password!")
if netifc.descr_set_user(descr, who.dbref):
was = db.getobj(was)
# TODO: actually check password?
log("RECONNECTED DESCRIPTOR %d FROM %s TO %s USING PW '%s'" %
(descr, was, who, pw))
@instr("descrboot")
class InstDescrBoot(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
who = netifc.descr_dbref(descr)
if netifc.descr_disconnect(descr):
log("BOOTED DESCRIPTOR %d: %s" % (descr, db.getobj(who)))
@instr("descrnotify")
class InstDescrNotify(Instruction):
def execute(self, fr):
fr.check_underflow(2)
msg = fr.data_pop(str)
descr = fr.data_pop(int)
who = netifc.descr_dbref(descr)
if netifc.is_descr_online(descr):
log("NOTIFY TO DESCR %d, %s: %s" %
(descr, db.getobj(who), msg))
@instr("descrflush")
class InstDescrFlush(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
if descr == -1:
netifc.flush_all_descrs()
log("DESCRFLUSH ALL DESCRS.")
elif netifc.is_descr_online(descr):
netifc.descr_flush(descr)
who = netifc.descr_dbref(descr)
log("DESCRFLUSH %d, %s" % (descr, db.getobj(who)))
@instr("descr")
class InstDescr(Instruction):
def execute(self, fr):
# TODO: get real descr.
fr.data_push(db.getobj(fr.user).descr)
@instr("firstdescr")
class InstFirstDescr(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value < 0:
descrs = netifc.get_descriptors()
else:
descrs = netifc.user_descrs(who.value)
if descrs:
fr.data_push(descrs[0])
else:
fr.data_push(0)
@instr("lastdescr")
class InstLastDescr(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value < 0:
descrs = netifc.get_descriptors()
else:
descrs = netifc.user_descrs(who.value)
if descrs:
fr.data_push(descrs[-1])
else:
fr.data_push(0)
@instr("nextdescr")
class InstNextDescr(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
descrs = netifc.get_descriptors()
if descr in descrs:
pos = descrs.index(descr) + 1
if pos >= len(descrs):
fr.data_push(0)
else:
fr.data_push(descrs[pos])
else:
fr.data_push(0)
@instr("descrbufsize")
class InstDescrBufSize(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(netifc.descr_bufsize(descr))
@instr("descrsecure?")
class InstDescrSecureP(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(1 if netifc.descr_secure(descr) else 0)
@instr("descruser")
class InstDescrUser(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
who = netifc.descr_user(descr)
if who >= 0:
fr.data_push(db.getobj(who).name)
else:
fr.data_push("")
@instr("descrhost")
class InstDescrHost(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(netifc.descr_host(descr))
@instr("descrtime")
class InstDescrTime(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(int(netifc.descr_time(descr)))
@instr("descridle")
class InstDescrIdle(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(int(netifc.descr_idle(descr)))
@instr("descrleastidle")
class InstDescrLeastIdle(Instruction):
def execute(self, fr):
who = fr.data_pop_object()
descrs = netifc.user_descrs(who.dbref)
idles = [netifc.descr_idle(descr) for descr in descrs]
fr.data_push(min(idles))
@instr("descrmostidle")
class InstDescrMostIdle(Instruction):
def execute(self, fr):
who = fr.data_pop_object()
descrs = netifc.user_descrs(who.dbref)
idles = [netifc.descr_idle(descr) for descr in descrs]
fr.data_push(max(idles))
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
|
revarbat/mufsim
|
mufsim/insts/descriptors.py
|
Python
|
bsd-2-clause
| 6,210
|
from setuptools import setup
setup(
name='tracer', version='0.1', description="Symbolically trace concrete inputs.",
packages=['tracer', 'tracer.cachemanager' ],
install_requires=[ 'shellphish-qemu'],
)
|
angr/tracer
|
setup.py
|
Python
|
bsd-2-clause
| 207
|
from django.core.management.base import BaseCommand
from twiggy_goodies.django import LogMixin
from allmychanges.models import Repo
class Command(LogMixin, BaseCommand):
help = u"""Deletes all repositories and related information."""
def handle(self, *args, **options):
Repo.objects.all().delete()
|
AllMyChanges/allmychanges.com
|
allmychanges/management/commands/cleardb.py
|
Python
|
bsd-2-clause
| 318
|
# THIS FILE IS GENERATED FROM THE PERMUTE SETUP.PY
version='0.1.alpha4'
|
qqqube/permute
|
permute/version.py
|
Python
|
bsd-2-clause
| 72
|
class GraphQLException(Exception):
pass
|
tallstreet/graphql-parser-python
|
graphqlparser/exceptions.py
|
Python
|
bsd-2-clause
| 44
|
#
# Command Generator
#
# Send SNMP GETBULK request using the following options:
#
# * with SNMPv2c, community 'public'
# * over IPv4/UDP
# * to an Agent at demo.snmplabs.com:161
# * with values non-repeaters = 0, max-repetitions = 25
# * for two OIDs in string form
# * stop when response OIDs leave the scopes of initial OIDs
#
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmdGen = cmdgen.CommandGenerator()
errorIndication, errorStatus, errorIndex, varBindTable = cmdGen.bulkCmd(
cmdgen.CommunityData('public'),
cmdgen.UdpTransportTarget(('demo.snmplabs.com', 161)),
0, 25,
'1.3.6.1.2.1.2.2',
'1.3.6.1.2.1.2.3',
)
if errorIndication:
print(errorIndication)
else:
if errorStatus:
print('%s at %s' % (
errorStatus.prettyPrint(),
errorIndex and varBindTable[-1][int(errorIndex)-1][0] or '?'
)
)
else:
for varBindTableRow in varBindTable:
for name, val in varBindTableRow:
print('%s = %s' % (name.prettyPrint(), val.prettyPrint()))
|
ww9rivers/pysnmp
|
examples/v3arch/oneliner/manager/cmdgen/getbulk-v2c.py
|
Python
|
bsd-2-clause
| 1,059
|
from __future__ import absolute_import, unicode_literals
import re
from collections import OrderedDict
from django import template
from django.template import loader
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import escape, format_html, smart_urlquote
from django.utils.safestring import SafeData, mark_safe
from rest_framework.compat import apply_markdown, pygments_highlight
from rest_framework.renderers import HTMLFormRenderer
from rest_framework.utils.urls import replace_query_param
register = template.Library()
# Regex for adding classes to html snippets
class_re = re.compile(r'(?<=class=["\'])(.*)(?=["\'])')
@register.tag(name='code')
def highlight_code(parser, token):
code = token.split_contents()[-1]
nodelist = parser.parse(('endcode',))
parser.delete_first_token()
return CodeNode(code, nodelist)
class CodeNode(template.Node):
style = 'emacs'
def __init__(self, lang, code):
self.lang = lang
self.nodelist = code
def render(self, context):
text = self.nodelist.render(context)
return pygments_highlight(text, self.lang, self.style)
@register.filter()
def with_location(fields, location):
return [
field for field in fields
if field.location == location
]
@register.simple_tag
def form_for_link(link):
import coreschema
properties = OrderedDict([
(field.name, field.schema or coreschema.String())
for field in link.fields
])
required = [
field.name
for field in link.fields
if field.required
]
schema = coreschema.Object(properties=properties, required=required)
return mark_safe(coreschema.render_to_form(schema))
@register.simple_tag
def render_markdown(markdown_text):
if apply_markdown is None:
return markdown_text
return mark_safe(apply_markdown(markdown_text))
@register.simple_tag
def get_pagination_html(pager):
return pager.to_html()
@register.simple_tag
def render_form(serializer, template_pack=None):
style = {'template_pack': template_pack} if template_pack else {}
renderer = HTMLFormRenderer()
return renderer.render(serializer.data, None, {'style': style})
@register.simple_tag
def render_field(field, style):
renderer = style.get('renderer', HTMLFormRenderer())
return renderer.render_field(field, style)
@register.simple_tag
def optional_login(request):
"""
Include a login snippet if REST framework's login view is in the URLconf.
"""
try:
login_url = reverse('rest_framework:login')
except NoReverseMatch:
return ''
snippet = "<li><a href='{href}?next={next}'>Log in</a></li>"
snippet = format_html(snippet, href=login_url, next=escape(request.path))
return mark_safe(snippet)
@register.simple_tag
def optional_docs_login(request):
"""
Include a login snippet if REST framework's login view is in the URLconf.
"""
try:
login_url = reverse('rest_framework:login')
except NoReverseMatch:
return 'log in'
snippet = "<a href='{href}?next={next}'>log in</a>"
snippet = format_html(snippet, href=login_url, next=escape(request.path))
return mark_safe(snippet)
@register.simple_tag
def optional_logout(request, user):
"""
Include a logout snippet if REST framework's logout view is in the URLconf.
"""
try:
logout_url = reverse('rest_framework:logout')
except NoReverseMatch:
snippet = format_html('<li class="navbar-text">{user}</li>', user=escape(user))
return mark_safe(snippet)
snippet = """<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">
{user}
<b class="caret"></b>
</a>
<ul class="dropdown-menu">
<li><a href='{href}?next={next}'>Log out</a></li>
</ul>
</li>"""
snippet = format_html(snippet, user=escape(user), href=logout_url, next=escape(request.path))
return mark_safe(snippet)
@register.simple_tag
def add_query_param(request, key, val):
"""
Add a query parameter to the current request url, and return the new url.
"""
iri = request.get_full_path()
uri = iri_to_uri(iri)
return escape(replace_query_param(uri, key, val))
@register.filter
def as_string(value):
if value is None:
return ''
return '%s' % value
@register.filter
def as_list_of_strings(value):
return [
'' if (item is None) else ('%s' % item)
for item in value
]
@register.filter
def add_class(value, css_class):
"""
https://stackoverflow.com/questions/4124220/django-adding-css-classes-when-rendering-form-fields-in-a-template
Inserts classes into template variables that contain HTML tags,
useful for modifying forms without needing to change the Form objects.
Usage:
{{ field.label_tag|add_class:"control-label" }}
In the case of REST Framework, the filter is used to add Bootstrap-specific
classes to the forms.
"""
html = six.text_type(value)
match = class_re.search(html)
if match:
m = re.search(r'^%s$|^%s\s|\s%s\s|\s%s$' % (css_class, css_class,
css_class, css_class),
match.group(1))
if not m:
return mark_safe(class_re.sub(match.group(1) + " " + css_class,
html))
else:
return mark_safe(html.replace('>', ' class="%s">' % css_class, 1))
return value
@register.filter
def format_value(value):
if getattr(value, 'is_hyperlink', False):
name = six.text_type(value.obj)
return mark_safe('<a href=%s>%s</a>' % (value, escape(name)))
if value is None or isinstance(value, bool):
return mark_safe('<code>%s</code>' % {True: 'true', False: 'false', None: 'null'}[value])
elif isinstance(value, list):
if any([isinstance(item, (list, dict)) for item in value]):
template = loader.get_template('rest_framework/admin/list_value.html')
else:
template = loader.get_template('rest_framework/admin/simple_list_value.html')
context = {'value': value}
return template.render(context)
elif isinstance(value, dict):
template = loader.get_template('rest_framework/admin/dict_value.html')
context = {'value': value}
return template.render(context)
elif isinstance(value, six.string_types):
if (
(value.startswith('http:') or value.startswith('https:')) and not
re.search(r'\s', value)
):
return mark_safe('<a href="{value}">{value}</a>'.format(value=escape(value)))
elif '@' in value and not re.search(r'\s', value):
return mark_safe('<a href="mailto:{value}">{value}</a>'.format(value=escape(value)))
elif '\n' in value:
return mark_safe('<pre>%s</pre>' % escape(value))
return six.text_type(value)
@register.filter
def items(value):
"""
Simple filter to return the items of the dict. Useful when the dict may
have a key 'items' which is resolved first in Django tempalte dot-notation
lookup. See issue #4931
Also see: https://stackoverflow.com/questions/15416662/django-template-loop-over-dictionary-items-with-items-as-key
"""
return value.items()
@register.filter
def data(value):
"""
Simple filter to access `data` attribute of object,
specifically coreapi.Document.
As per `items` filter above, allows accessing `document.data` when
Document contains Link keyed-at "data".
See issue #5395
"""
return value.data
@register.filter
def schema_links(section, sec_key=None):
"""
Recursively find every link in a schema, even nested.
"""
NESTED_FORMAT = '%s > %s' # this format is used in docs/js/api.js:normalizeKeys
links = section.links
if section.data:
data = section.data.items()
for sub_section_key, sub_section in data:
new_links = schema_links(sub_section, sec_key=sub_section_key)
links.update(new_links)
if sec_key is not None:
new_links = OrderedDict()
for link_key, link in links.items():
new_key = NESTED_FORMAT % (sec_key, link_key)
new_links.update({new_key: link})
return new_links
return links
@register.filter
def add_nested_class(value):
if isinstance(value, dict):
return 'class=nested'
if isinstance(value, list) and any([isinstance(item, (list, dict)) for item in value]):
return 'class=nested'
return ''
# Bunch of stuff cloned from urlize
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)', '"', "']", "'}", "'"]
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'),
('"', '"'), ("'", "'")]
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
def smart_urlquote_wrapper(matched_url):
"""
Simple wrapper for smart_urlquote. ValueError("Invalid IPv6 URL") can
be raised here, see issue #1386
"""
try:
return smart_urlquote(matched_url)
except ValueError:
return None
@register.filter
def urlize_quoted_links(text, trim_url_limit=None, nofollow=True, autoescape=True):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an ellipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
return limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (
middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1
):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote_wrapper(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote_wrapper('http://%s' % middle)
elif ':' not in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
@register.filter
def break_long_headers(header):
"""
Breaks headers longer than 160 characters (~page length)
when possible (are comma separated)
"""
if len(header) > 160 and ',' in header:
header = mark_safe('<br> ' + ', <br>'.join(header.split(',')))
return header
|
kgeorgy/django-rest-framework
|
rest_framework/templatetags/rest_framework.py
|
Python
|
bsd-2-clause
| 13,205
|
"""
This module includes a class for interfacing scikit-learn's logistic
regression model.
"""
import numpy as np
import sklearn.linear_model
from libact.base.interfaces import ProbabilisticModel
class LogisticRegression(ProbabilisticModel):
"""Logistic Regression Classifier
References
----------
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
"""
def __init__(self, *args, **kwargs):
self.model = sklearn.linear_model.LogisticRegression(*args, **kwargs)
def train(self, dataset, *args, **kwargs):
return self.model.fit(*(dataset.format_sklearn() + args), **kwargs)
def predict(self, feature, *args, **kwargs):
return self.model.predict(feature, *args, **kwargs)
def score(self, testing_dataset, *args, **kwargs):
return self.model.score(*(testing_dataset.format_sklearn() + args), **kwargs)
def predict_real(self, feature, *args, **kwargs):
dvalue = self.model.decision_function(feature, *args, **kwargs)
if len(np.shape(dvalue)) == 1: # n_classes == 2
return np.vstack((-dvalue, dvalue)).T
else:
return dvalue
def predict_proba(self, feature, *args, **kwargs):
return self.model.predict_proba(feature, *args, **kwargs)
|
ntucllab/libact
|
libact/models/logistic_regression.py
|
Python
|
bsd-2-clause
| 1,314
|
#===========================================================================
#
# MQTT message broker location. Default broker location is port 1883
# for regular and 8883 for SSL.
#
#===========================================================================
host = '192.168.1.5'
port = 1883
# Keep alive time in seconds. Client sends a ping if no other message
# is sent in this interval.
keepAlive = 60
#===========================================================================
#
# User name and password (strings) for broker log in.
#
#===========================================================================
user = None
password = None
#===========================================================================
#
# Secure connection options. See the paho-mqtt docs for details.
#
#===========================================================================
# List of certificate files.
ca_certs = [
]
certFile = None
keyFile = None
|
TD22057/T-Home
|
conf/broker.py
|
Python
|
bsd-2-clause
| 955
|
from decimal import Decimal
from functools import partial
import html
import json
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import QuerySet
from django.utils.encoding import force_str
from django.utils.encoding import smart_str
from django.utils.formats import number_format
from django.utils.functional import Promise
from django.utils.html import strip_tags
from django.utils.translation import gettext_lazy as _
def field_as_string(obj, field, ascii=False):
value = getattr(obj, field + '_csv_display', None)
if value is None:
value = getattr(obj, field + '_display', None)
if value is None:
value = getattr(obj, field)
if isinstance(value, bool):
value = (_('no'), _('yes'))[value]
if isinstance(value, float) or isinstance(value, int) or isinstance(value, Decimal):
value = number_format(value)
if isinstance(value, list) or isinstance(value, QuerySet):
value = ", ".join([str(val) for val in value])
return smart_plain_text(value, ascii)
def plain_text(html_content):
return html.unescape(strip_tags(html_content))
def smart_plain_text(s, ascii=False):
if s is None:
return ''
try:
# Converts to unicode, remove HTML tags, convert HTML entities
us = plain_text(str(s))
if ascii:
return smart_str(us)
return us
except UnicodeDecodeError:
return smart_str(s)
class DjangoJSONEncoder(DjangoJSONEncoder):
"""
Taken (slightly modified) from:
http://stackoverflow.com/questions/2249792/json-serializing-django-models-with-simplejson
"""
def default(self, obj):
# https://docs.djangoproject.com/en/dev/topics/serialization/#id2
if isinstance(obj, Promise):
return force_str(obj)
if isinstance(obj, QuerySet):
# `default` must return a python serializable
# structure, the easiest way is to load the JSON
# string produced by `serialize` and return it
return json.loads(serialize('json', obj))
return force_str(obj)
# partial function, we can now use dumps(my_dict) instead
# of dumps(my_dict, cls=DjangoJSONEncoder)
json_django_dumps = partial(json.dumps, cls=DjangoJSONEncoder)
|
makinacorpus/Geotrek
|
mapentity/serializers/helpers.py
|
Python
|
bsd-2-clause
| 2,369
|