gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CLI Backend for the Analyzer Part of the Debugger.
The analyzer performs post hoc analysis of dumped intermediate tensors and
graph structure information from debugged Session.run() calls.
The other part of the debugger is the stepper (c.f. stepper_cli.py).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import re
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import source_utils
RL = debugger_cli_common.RichLine
# String constants for the depth-dependent hanging indent at the beginning
# of each line.
HANG_UNFINISHED = "| " # Used for unfinished recursion depths.
HANG_FINISHED = " "
HANG_SUFFIX = "|- "
# String constant for displaying depth and op type.
DEPTH_TEMPLATE = "(%d) "
OP_TYPE_TEMPLATE = "[%s] "
# String constants for control inputs/outputs, etc.
CTRL_LABEL = "(Ctrl) "
ELLIPSIS = "..."
SORT_TENSORS_BY_TIMESTAMP = "timestamp"
SORT_TENSORS_BY_DUMP_SIZE = "dump_size"
SORT_TENSORS_BY_OP_TYPE = "op_type"
SORT_TENSORS_BY_TENSOR_NAME = "tensor_name"
def _add_main_menu(output,
node_name=None,
enable_list_tensors=True,
enable_node_info=True,
enable_print_tensor=True,
enable_list_inputs=True,
enable_list_outputs=True):
"""Generate main menu for the screen output from a command.
Args:
output: (debugger_cli_common.RichTextLines) the output object to modify.
node_name: (str or None) name of the node involved (if any). If None,
the menu items node_info, list_inputs and list_outputs will be
automatically disabled, overriding the values of arguments
enable_node_info, enable_list_inputs and enable_list_outputs.
enable_list_tensors: (bool) whether the list_tensor menu item will be
enabled.
enable_node_info: (bool) whether the node_info item will be enabled.
enable_print_tensor: (bool) whether the print_tensor item will be enabled.
enable_list_inputs: (bool) whether the item list_inputs will be enabled.
enable_list_outputs: (bool) whether the item list_outputs will be enabled.
"""
menu = debugger_cli_common.Menu()
menu.append(
debugger_cli_common.MenuItem(
"list_tensors", "list_tensors", enabled=enable_list_tensors))
if node_name:
menu.append(
debugger_cli_common.MenuItem(
"node_info",
"node_info -a -d -t %s" % node_name,
enabled=enable_node_info))
menu.append(
debugger_cli_common.MenuItem(
"print_tensor",
"print_tensor %s" % node_name,
enabled=enable_print_tensor))
menu.append(
debugger_cli_common.MenuItem(
"list_inputs",
"list_inputs -c -r %s" % node_name,
enabled=enable_list_inputs))
menu.append(
debugger_cli_common.MenuItem(
"list_outputs",
"list_outputs -c -r %s" % node_name,
enabled=enable_list_outputs))
else:
menu.append(
debugger_cli_common.MenuItem(
"node_info", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("print_tensor", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("list_inputs", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("list_outputs", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("run_info", "run_info"))
menu.append(
debugger_cli_common.MenuItem("help", "help"))
output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
class DebugAnalyzer(object):
"""Analyzer for debug data from dump directories."""
def __init__(self, debug_dump):
"""DebugAnalyzer constructor.
Args:
debug_dump: A DebugDumpDir object.
"""
self._debug_dump = debug_dump
# Initialize tensor filters state.
self._tensor_filters = {}
# Argument parsers for command handlers.
self._arg_parsers = {}
# Parser for list_tensors.
ap = argparse.ArgumentParser(
description="List dumped intermediate tensors.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-f",
"--tensor_filter",
dest="tensor_filter",
type=str,
default="",
help="List only Tensors passing the filter of the specified name")
ap.add_argument(
"-n",
"--node_name_filter",
dest="node_name_filter",
type=str,
default="",
help="filter node name by regex.")
ap.add_argument(
"-t",
"--op_type_filter",
dest="op_type_filter",
type=str,
default="",
help="filter op type by regex.")
ap.add_argument(
"-s",
"--sort_by",
dest="sort_by",
type=str,
default=SORT_TENSORS_BY_TIMESTAMP,
help=("the field to sort the data by: (%s | %s | %s | %s)" %
(SORT_TENSORS_BY_TIMESTAMP, SORT_TENSORS_BY_DUMP_SIZE,
SORT_TENSORS_BY_OP_TYPE, SORT_TENSORS_BY_TENSOR_NAME)))
ap.add_argument(
"-r",
"--reverse",
dest="reverse",
action="store_true",
help="sort the data in reverse (descending) order")
self._arg_parsers["list_tensors"] = ap
# Parser for node_info.
ap = argparse.ArgumentParser(
description="Show information about a node.", usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an associated tensor, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-a",
"--attributes",
dest="attributes",
action="store_true",
help="Also list attributes of the node.")
ap.add_argument(
"-d",
"--dumps",
dest="dumps",
action="store_true",
help="Also list dumps available from the node.")
ap.add_argument(
"-t",
"--traceback",
dest="traceback",
action="store_true",
help="Also include the traceback of the node's creation "
"(if available in Python).")
self._arg_parsers["node_info"] = ap
# Parser for list_inputs.
ap = argparse.ArgumentParser(
description="Show inputs to a node.", usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an output tensor from the node, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-c", "--control", action="store_true", help="Include control inputs.")
ap.add_argument(
"-d",
"--depth",
dest="depth",
type=int,
default=20,
help="Maximum depth of recursion used when showing the input tree.")
ap.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="Show inputs to the node recursively, i.e., the input tree.")
ap.add_argument(
"-t",
"--op_type",
action="store_true",
help="Show op types of input nodes.")
self._arg_parsers["list_inputs"] = ap
# Parser for list_outputs.
ap = argparse.ArgumentParser(
description="Show the nodes that receive the outputs of given node.",
usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an output tensor from the node, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-c", "--control", action="store_true", help="Include control inputs.")
ap.add_argument(
"-d",
"--depth",
dest="depth",
type=int,
default=20,
help="Maximum depth of recursion used when showing the output tree.")
ap.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="Show recipients of the node recursively, i.e., the output "
"tree.")
ap.add_argument(
"-t",
"--op_type",
action="store_true",
help="Show op types of recipient nodes.")
self._arg_parsers["list_outputs"] = ap
# Parser for print_tensor.
ap = argparse.ArgumentParser(
description="Print the value of a dumped tensor.",
usage=argparse.SUPPRESS)
ap.add_argument(
"tensor_name",
type=str,
help="Name of the tensor, followed by any slicing indices, "
"e.g., hidden1/Wx_plus_b/MatMul:0, "
"hidden1/Wx_plus_b/MatMul:0[1, :]")
ap.add_argument(
"-n",
"--number",
dest="number",
type=int,
default=-1,
help="0-based dump number for the specified tensor. "
"Required for tensor with multiple dumps.")
ap.add_argument(
"-r",
"--ranges",
dest="ranges",
type=str,
default="",
help="Numerical ranges to highlight tensor elements in. "
"Examples: -r 0,1e-8, -r [-0.1,0.1], "
"-r \"[[-inf, -0.1], [0.1, inf]]\"")
ap.add_argument(
"-a",
"--all",
dest="print_all",
action="store_true",
help="Print the tensor in its entirety, i.e., do not use ellipses.")
ap.add_argument(
"-s",
"--numeric_summary",
action="store_true",
help="Include summary for non-empty tensors of numeric (int*, float*, "
"complex*) and Boolean types.")
self._arg_parsers["print_tensor"] = ap
# Parser for print_source.
ap = argparse.ArgumentParser(
description="Print a Python source file with overlaid debug "
"information, including the nodes (ops) or Tensors created at the "
"source lines.",
usage=argparse.SUPPRESS)
ap.add_argument(
"source_file_path",
type=str,
help="Path to the source file.")
ap.add_argument(
"-t",
"--tensors",
dest="tensors",
action="store_true",
help="Label lines with dumped Tensors, instead of ops.")
ap.add_argument(
"-m",
"--max_elements_per_line",
type=int,
default=10,
help="Maximum number of elements (ops or Tensors) to show per source "
"line.")
ap.add_argument(
"-b",
"--line_begin",
type=int,
default=1,
help="Print source beginning at line number (1-based.)")
self._arg_parsers["print_source"] = ap
# Parser for list_source.
ap = argparse.ArgumentParser(
description="List source files responsible for constructing nodes and "
"tensors present in the run().",
usage=argparse.SUPPRESS)
ap.add_argument(
"-p",
"--path_filter",
type=str,
default="",
help="Regular expression filter for file path.")
ap.add_argument(
"-n",
"--node_name_filter",
type=str,
default="",
help="Regular expression filter for node name.")
self._arg_parsers["list_source"] = ap
# TODO(cais): Implement list_nodes.
def add_tensor_filter(self, filter_name, filter_callable):
"""Add a tensor filter.
A tensor filter is a named callable of the signature:
filter_callable(dump_datum, tensor),
wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying
metadata about the dumped tensor, including tensor name, timestamps, etc.
tensor is the value of the dumped tensor as an numpy.ndarray object.
The return value of the function is a bool.
This is the same signature as the input argument to
debug_data.DebugDumpDir.find().
Args:
filter_name: (str) name of the filter. Cannot be empty.
filter_callable: (callable) a filter function of the signature described
as above.
Raises:
ValueError: If filter_name is an empty str.
TypeError: If filter_name is not a str.
Or if filter_callable is not callable.
"""
if not isinstance(filter_name, str):
raise TypeError("Input argument filter_name is expected to be str, "
"but is not.")
# Check that filter_name is not an empty str.
if not filter_name:
raise ValueError("Input argument filter_name cannot be empty.")
# Check that filter_callable is callable.
if not callable(filter_callable):
raise TypeError(
"Input argument filter_callable is expected to be callable, "
"but is not.")
self._tensor_filters[filter_name] = filter_callable
def get_tensor_filter(self, filter_name):
"""Retrieve filter function by name.
Args:
filter_name: Name of the filter set during add_tensor_filter() call.
Returns:
The callable associated with the filter name.
Raises:
ValueError: If there is no tensor filter of the specified filter name.
"""
if filter_name not in self._tensor_filters:
raise ValueError("There is no tensor filter named \"%s\"" % filter_name)
return self._tensor_filters[filter_name]
def get_help(self, handler_name):
return self._arg_parsers[handler_name].format_help()
def list_tensors(self, args, screen_info=None):
"""Command handler for list_tensors.
List tensors dumped during debugged Session.run() call.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# TODO(cais): Add annotations of substrings for dumped tensor names, to
# facilitate on-screen highlighting/selection of node names.
_ = screen_info
parsed = self._arg_parsers["list_tensors"].parse_args(args)
output = []
filter_strs = []
if parsed.op_type_filter:
op_type_regex = re.compile(parsed.op_type_filter)
filter_strs.append("Op type regex filter: \"%s\"" % parsed.op_type_filter)
else:
op_type_regex = None
if parsed.node_name_filter:
node_name_regex = re.compile(parsed.node_name_filter)
filter_strs.append("Node name regex filter: \"%s\"" %
parsed.node_name_filter)
else:
node_name_regex = None
output = debugger_cli_common.RichTextLines(filter_strs)
output.append("")
if parsed.tensor_filter:
try:
filter_callable = self.get_tensor_filter(parsed.tensor_filter)
except ValueError:
output = cli_shared.error("There is no tensor filter named \"%s\"." %
parsed.tensor_filter)
_add_main_menu(output, node_name=None, enable_list_tensors=False)
return output
data_to_show = self._debug_dump.find(filter_callable)
else:
data_to_show = self._debug_dump.dumped_tensor_data
# TODO(cais): Implement filter by lambda on tensor value.
max_timestamp_width, max_dump_size_width, max_op_type_width = (
self._measure_tensor_list_column_widths(data_to_show))
# Sort the data.
data_to_show = self._sort_dump_data_by(
data_to_show, parsed.sort_by, parsed.reverse)
output.extend(
self._tensor_list_column_heads(parsed, max_timestamp_width,
max_dump_size_width, max_op_type_width))
dump_count = 0
for dump in data_to_show:
if node_name_regex and not node_name_regex.match(dump.node_name):
continue
if op_type_regex:
op_type = self._debug_dump.node_op_type(dump.node_name)
if not op_type_regex.match(op_type):
continue
rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0
dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
dumped_tensor_name = "%s:%d" % (dump.node_name, dump.output_slot)
op_type = self._debug_dump.node_op_type(dump.node_name)
line = "[%.3f]" % rel_time
line += " " * (max_timestamp_width - len(line))
line += dump_size_str
line += " " * (max_timestamp_width + max_dump_size_width - len(line))
line += op_type
line += " " * (max_timestamp_width + max_dump_size_width +
max_op_type_width - len(line))
line += " %s" % dumped_tensor_name
output.append(
line,
font_attr_segs=[(
len(line) - len(dumped_tensor_name), len(line),
debugger_cli_common.MenuItem("", "pt %s" % dumped_tensor_name))])
dump_count += 1
if parsed.tensor_filter:
output.prepend([
"%d dumped tensor(s) passing filter \"%s\":" %
(dump_count, parsed.tensor_filter)
])
else:
output.prepend(["%d dumped tensor(s):" % dump_count])
_add_main_menu(output, node_name=None, enable_list_tensors=False)
return output
def _measure_tensor_list_column_widths(self, data):
"""Determine the maximum widths of the timestamp and op-type column.
This method assumes that data is sorted in the default order, i.e.,
by ascending timestamps.
Args:
data: (list of DebugTensorDaum) the data based on which the maximum
column widths will be determined.
Returns:
(int) maximum width of the timestamp column. 0 if data is empty.
(int) maximum width of the dump size column. 0 if data is empty.
(int) maximum width of the op type column. 0 if data is empty.
"""
max_timestamp_width = 0
if data:
max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0
max_timestamp_width = len("[%.3f] " % max_rel_time_ms)
max_dump_size_width = 0
for dump in data:
dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
if len(dump_size_str) + 1 > max_dump_size_width:
max_dump_size_width = len(dump_size_str) + 1
max_op_type_width = 0
for dump in data:
op_type = self._debug_dump.node_op_type(dump.node_name)
if len(op_type) > max_op_type_width:
max_op_type_width = len(op_type)
return max_timestamp_width, max_dump_size_width, max_op_type_width
def _sort_dump_data_by(self, data, sort_by, reverse):
"""Sort a list of DebugTensorDatum in specified order.
Args:
data: (list of DebugTensorDatum) the data to be sorted.
sort_by: The field to sort data by.
reverse: (bool) Whether to use reversed (descending) order.
Returns:
(list of DebugTensorDatum) in sorted order.
Raises:
ValueError: given an invalid value of sort_by.
"""
if sort_by == SORT_TENSORS_BY_TIMESTAMP:
return sorted(
data,
reverse=reverse,
key=lambda x: x.timestamp)
elif sort_by == SORT_TENSORS_BY_DUMP_SIZE:
return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes)
elif sort_by == SORT_TENSORS_BY_OP_TYPE:
return sorted(
data,
reverse=reverse,
key=lambda x: self._debug_dump.node_op_type(x.node_name))
elif sort_by == SORT_TENSORS_BY_TENSOR_NAME:
return sorted(
data,
reverse=reverse,
key=lambda x: "%s:%d" % (x.node_name, x.output_slot))
else:
raise ValueError("Unsupported key to sort tensors by: %s" % sort_by)
def _tensor_list_column_heads(self, parsed, max_timestamp_width,
max_dump_size_width, max_op_type_width):
"""Generate a line containing the column heads of the tensor list.
Args:
parsed: Parsed arguments (by argparse) of the list_tensors command.
max_timestamp_width: (int) maximum width of the timestamp column.
max_dump_size_width: (int) maximum width of the dump size column.
max_op_type_width: (int) maximum width of the op type column.
Returns:
A RichTextLines object.
"""
base_command = "list_tensors"
if parsed.tensor_filter:
base_command += " -f %s" % parsed.tensor_filter
if parsed.op_type_filter:
base_command += " -t %s" % parsed.op_type_filter
if parsed.node_name_filter:
base_command += " -n %s" % parsed.node_name_filter
attr_segs = {0: []}
row = "t (ms)"
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TIMESTAMP)
if parsed.sort_by == SORT_TENSORS_BY_TIMESTAMP and not parsed.reverse:
command += " -r"
attr_segs[0].append(
(0, len(row), [debugger_cli_common.MenuItem(None, command), "bold"]))
row += " " * (max_timestamp_width - len(row))
prev_len = len(row)
row += "Size"
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_DUMP_SIZE)
if parsed.sort_by == SORT_TENSORS_BY_DUMP_SIZE and not parsed.reverse:
command += " -r"
attr_segs[0].append((prev_len, len(row),
[debugger_cli_common.MenuItem(None, command), "bold"]))
row += " " * (max_dump_size_width + max_timestamp_width - len(row))
prev_len = len(row)
row += "Op type"
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_OP_TYPE)
if parsed.sort_by == SORT_TENSORS_BY_OP_TYPE and not parsed.reverse:
command += " -r"
attr_segs[0].append((prev_len, len(row),
[debugger_cli_common.MenuItem(None, command), "bold"]))
row += " " * (
max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)
)
prev_len = len(row)
row += " Tensor name"
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TENSOR_NAME)
if parsed.sort_by == SORT_TENSORS_BY_TENSOR_NAME and not parsed.reverse:
command += " -r"
attr_segs[0].append((prev_len + 1, len(row),
[debugger_cli_common.MenuItem("", command), "bold"]))
row += " " * (
max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)
)
return debugger_cli_common.RichTextLines([row], font_attr_segs=attr_segs)
def node_info(self, args, screen_info=None):
"""Command handler for node_info.
Query information about a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# TODO(cais): Add annotation of substrings for node names, to facilitate
# on-screen highlighting/selection of node names.
_ = screen_info
parsed = self._arg_parsers["node_info"].parse_args(args)
# Get a node name, regardless of whether the input is a node name (without
# output slot attached) or a tensor name (with output slot attached).
node_name, unused_slot = debug_data.parse_node_or_tensor_name(
parsed.node_name)
if not self._debug_dump.node_exists(node_name):
output = cli_shared.error(
"There is no node named \"%s\" in the partition graphs" % node_name)
_add_main_menu(
output,
node_name=None,
enable_list_tensors=True,
enable_node_info=False,
enable_list_inputs=False,
enable_list_outputs=False)
return output
# TODO(cais): Provide UI glossary feature to explain to users what the
# term "partition graph" means and how it is related to TF graph objects
# in Python. The information can be along the line of:
# "A tensorflow graph defined in Python is stripped of unused ops
# according to the feeds and fetches and divided into a number of
# partition graphs that may be distributed among multiple devices and
# hosts. The partition graphs are what's actually executed by the C++
# runtime during a run() call."
lines = ["Node %s" % node_name]
font_attr_segs = {
0: [(len(lines[-1]) - len(node_name), len(lines[-1]), "bold")]
}
lines.append("")
lines.append(" Op: %s" % self._debug_dump.node_op_type(node_name))
lines.append(" Device: %s" % self._debug_dump.node_device(node_name))
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
# List node inputs (non-control and control).
inputs = self._debug_dump.node_inputs(node_name)
ctrl_inputs = self._debug_dump.node_inputs(node_name, is_control=True)
output.extend(self._format_neighbors("input", inputs, ctrl_inputs))
# List node output recipients (non-control and control).
recs = self._debug_dump.node_recipients(node_name)
ctrl_recs = self._debug_dump.node_recipients(node_name, is_control=True)
output.extend(self._format_neighbors("recipient", recs, ctrl_recs))
# Optional: List attributes of the node.
if parsed.attributes:
output.extend(self._list_node_attributes(node_name))
# Optional: List dumps available from the node.
if parsed.dumps:
output.extend(self._list_node_dumps(node_name))
if parsed.traceback:
output.extend(self._render_node_traceback(node_name))
_add_main_menu(output, node_name=node_name, enable_node_info=False)
return output
def _render_node_traceback(self, node_name):
"""Render traceback of a node's creation in Python, if available.
Args:
node_name: (str) name of the node.
Returns:
A RichTextLines object containing the stack trace of the node's
construction.
"""
lines = [RL(""), RL(""), RL("Traceback of node construction:", "bold")]
try:
node_stack = self._debug_dump.node_traceback(node_name)
for depth, (file_path, line, function_name, text) in enumerate(
node_stack):
lines.append("%d: %s" % (depth, file_path))
attribute = debugger_cli_common.MenuItem(
"", "ps %s -b %d" % (file_path, line)) if text else None
line_number_line = RL(" ")
line_number_line += RL("Line: %d" % line, attribute)
lines.append(line_number_line)
lines.append(" Function: %s" % function_name)
lines.append(" Text: " + (("\"%s\"" % text) if text else "None"))
lines.append("")
except KeyError:
lines.append("(Node unavailable in the loaded Python graph)")
except LookupError:
lines.append("(Unavailable because no Python graph has been loaded)")
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def list_inputs(self, args, screen_info=None):
"""Command handler for inputs.
Show inputs to a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# Screen info not currently used by this handler. Include this line to
# mute pylint.
_ = screen_info
# TODO(cais): Use screen info to format the output lines more prettily,
# e.g., hanging indent of long node names.
parsed = self._arg_parsers["list_inputs"].parse_args(args)
output = self._list_inputs_or_outputs(
parsed.recursive,
parsed.node_name,
parsed.depth,
parsed.control,
parsed.op_type,
do_outputs=False)
node_name = debug_data.get_node_name(parsed.node_name)
_add_main_menu(output, node_name=node_name, enable_list_inputs=False)
return output
def print_tensor(self, args, screen_info=None):
"""Command handler for print_tensor.
Print value of a given dumped tensor.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
parsed = self._arg_parsers["print_tensor"].parse_args(args)
if screen_info and "cols" in screen_info:
np_printoptions = {"linewidth": screen_info["cols"]}
else:
np_printoptions = {}
# Determine if any range-highlighting is required.
highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)
tensor_name, tensor_slicing = (
command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))
node_name, output_slot = debug_data.parse_node_or_tensor_name(tensor_name)
if (self._debug_dump.loaded_partition_graphs() and
not self._debug_dump.node_exists(node_name)):
output = cli_shared.error(
"Node \"%s\" does not exist in partition graphs" % node_name)
_add_main_menu(
output,
node_name=None,
enable_list_tensors=True,
enable_print_tensor=False)
return output
watch_keys = self._debug_dump.debug_watch_keys(node_name)
if output_slot is None:
output_slots = set()
for watch_key in watch_keys:
output_slots.add(int(watch_key.split(":")[1]))
if len(output_slots) == 1:
# There is only one dumped tensor from this node, so there is no
# ambiguity. Proceed to show the only dumped tensor.
output_slot = list(output_slots)[0]
else:
# There are more than one dumped tensors from this node. Indicate as
# such.
# TODO(cais): Provide an output screen with command links for
# convenience.
lines = [
"Node \"%s\" generated debug dumps from %s output slots:" %
(node_name, len(output_slots)),
"Please specify the output slot: %s:x." % node_name
]
output = debugger_cli_common.RichTextLines(lines)
_add_main_menu(
output,
node_name=node_name,
enable_list_tensors=True,
enable_print_tensor=False)
return output
# Find debug dump data that match the tensor name (node name + output
# slot).
matching_data = []
for watch_key in watch_keys:
debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
for datum in debug_tensor_data:
if datum.output_slot == output_slot:
matching_data.append(datum)
if not matching_data:
# No dump for this tensor.
output = cli_shared.error("Tensor \"%s\" did not generate any dumps." %
parsed.tensor_name)
elif len(matching_data) == 1:
# There is only one dump for this tensor.
if parsed.number <= 0:
output = cli_shared.format_tensor(
matching_data[0].get_tensor(),
matching_data[0].watch_key,
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=highlight_options,
include_numeric_summary=parsed.numeric_summary)
else:
output = cli_shared.error(
"Invalid number (%d) for tensor %s, which generated one dump." %
(parsed.number, parsed.tensor_name))
_add_main_menu(output, node_name=node_name, enable_print_tensor=False)
else:
# There are more than one dumps for this tensor.
if parsed.number < 0:
lines = [
"Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name,
len(matching_data))
]
font_attr_segs = {}
for i, datum in enumerate(matching_data):
rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key))
command = "print_tensor %s -n %d" % (parsed.tensor_name, i)
font_attr_segs[len(lines) - 1] = [(
len(lines[-1]) - len(datum.watch_key), len(lines[-1]),
debugger_cli_common.MenuItem(None, command))]
lines.append("")
lines.append(
"You can use the -n (--number) flag to specify which dump to "
"print.")
lines.append("For example:")
lines.append(" print_tensor %s -n 0" % parsed.tensor_name)
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
elif parsed.number >= len(matching_data):
output = cli_shared.error(
"Specified number (%d) exceeds the number of available dumps "
"(%d) for tensor %s" %
(parsed.number, len(matching_data), parsed.tensor_name))
else:
output = cli_shared.format_tensor(
matching_data[parsed.number].get_tensor(),
matching_data[parsed.number].watch_key + " (dump #%d)" %
parsed.number,
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=highlight_options)
_add_main_menu(output, node_name=node_name, enable_print_tensor=False)
return output
def list_outputs(self, args, screen_info=None):
"""Command handler for inputs.
Show inputs to a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# Screen info not currently used by this handler. Include this line to
# mute pylint.
_ = screen_info
# TODO(cais): Use screen info to format the output lines more prettily,
# e.g., hanging indent of long node names.
parsed = self._arg_parsers["list_outputs"].parse_args(args)
output = self._list_inputs_or_outputs(
parsed.recursive,
parsed.node_name,
parsed.depth,
parsed.control,
parsed.op_type,
do_outputs=True)
node_name = debug_data.get_node_name(parsed.node_name)
_add_main_menu(output, node_name=node_name, enable_list_outputs=False)
return output
def _reconstruct_print_source_command(self,
parsed,
line_begin,
max_elements_per_line_increase=0):
return "ps %s %s -b %d -m %d" % (
parsed.source_file_path, "-t" if parsed.tensors else "", line_begin,
parsed.max_elements_per_line + max_elements_per_line_increase)
def print_source(self, args, screen_info=None):
"""Print the content of a source file."""
del screen_info # Unused.
parsed = self._arg_parsers["print_source"].parse_args(args)
source_annotation = source_utils.annotate_source(
self._debug_dump,
parsed.source_file_path,
do_dumped_tensors=parsed.tensors)
source_lines, line_num_width = source_utils.load_source(
parsed.source_file_path)
labeled_source_lines = []
actual_initial_scroll_target = 0
for i, line in enumerate(source_lines):
annotated_line = RL("L%d" % (i + 1), cli_shared.COLOR_YELLOW)
annotated_line += " " * (line_num_width - len(annotated_line))
annotated_line += line
labeled_source_lines.append(annotated_line)
if i + 1 == parsed.line_begin:
actual_initial_scroll_target = len(labeled_source_lines) - 1
if i + 1 in source_annotation:
sorted_elements = sorted(source_annotation[i + 1])
for k, element in enumerate(sorted_elements):
if k >= parsed.max_elements_per_line:
omitted_info_line = RL(" (... Omitted %d of %d %s ...) " % (
len(sorted_elements) - parsed.max_elements_per_line,
len(sorted_elements),
"tensor(s)" if parsed.tensors else "op(s)"))
omitted_info_line += RL(
"+5",
debugger_cli_common.MenuItem(
None,
self._reconstruct_print_source_command(
parsed, i + 1, max_elements_per_line_increase=5)))
labeled_source_lines.append(omitted_info_line)
break
label = RL(" " * 4)
if self._debug_dump.debug_watch_keys(
debug_data.get_node_name(element)):
attribute = debugger_cli_common.MenuItem("", "pt %s" % element)
else:
attribute = cli_shared.COLOR_BLUE
label += RL(element, attribute)
labeled_source_lines.append(label)
output = debugger_cli_common.rich_text_lines_from_rich_line_list(
labeled_source_lines,
annotations={debugger_cli_common.INIT_SCROLL_POS_KEY:
actual_initial_scroll_target})
_add_main_menu(output, node_name=None)
return output
def _make_source_table(self, source_list, is_tf_py_library):
"""Make a table summarizing the source files that create nodes and tensors.
Args:
source_list: List of source files and related information as a list of
tuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps,
first_line).
is_tf_py_library: (`bool`) whether this table is for files that belong
to the TensorFlow Python library.
Returns:
The table as a `debugger_cli_common.RichTextLines` object.
"""
path_head = "Source file path"
num_nodes_head = "#(nodes)"
num_tensors_head = "#(tensors)"
num_dumps_head = "#(tensor dumps)"
if is_tf_py_library:
# Use color to mark files that are guessed to belong to TensorFlow Python
# library.
color = cli_shared.COLOR_GRAY
lines = [RL("TensorFlow Python library file(s):", color)]
else:
color = cli_shared.COLOR_WHITE
lines = [RL("File(s) outside TensorFlow Python library:", color)]
if not source_list:
lines.append(RL("[No files.]"))
lines.append(RL())
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
path_column_width = max(
max([len(item[0]) for item in source_list]), len(path_head)) + 1
num_nodes_column_width = max(
max([len(str(item[2])) for item in source_list]),
len(num_nodes_head)) + 1
num_tensors_column_width = max(
max([len(str(item[3])) for item in source_list]),
len(num_tensors_head)) + 1
head = RL(path_head + " " * (path_column_width - len(path_head)), color)
head += RL(num_nodes_head + " " * (
num_nodes_column_width - len(num_nodes_head)), color)
head += RL(num_tensors_head + " " * (
num_tensors_column_width - len(num_tensors_head)), color)
head += RL(num_dumps_head, color)
lines.append(head)
for (file_path, _, num_nodes, num_tensors, num_dumps,
first_line_num) in source_list:
path_attributes = [color]
if source_utils.is_extension_uncompiled_python_source(file_path):
path_attributes.append(
debugger_cli_common.MenuItem(None, "ps %s -b %d" %
(file_path, first_line_num)))
line = RL(file_path, path_attributes)
line += " " * (path_column_width - len(line))
line += RL(
str(num_nodes) + " " * (num_nodes_column_width - len(str(num_nodes))),
color)
line += RL(
str(num_tensors) + " " *
(num_tensors_column_width - len(str(num_tensors))), color)
line += RL(str(num_dumps), color)
lines.append(line)
lines.append(RL())
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def list_source(self, args, screen_info=None):
"""List Python source files that constructed nodes and tensors."""
del screen_info # Unused.
parsed = self._arg_parsers["list_source"].parse_args(args)
source_list = source_utils.list_source_files_against_dump(
self._debug_dump,
path_regex_whitelist=parsed.path_filter,
node_name_regex_whitelist=parsed.node_name_filter)
top_lines = [
RL("List of source files that created nodes in this run", "bold")]
if parsed.path_filter:
top_lines.append(
RL("File path regex filter: \"%s\"" % parsed.path_filter))
if parsed.node_name_filter:
top_lines.append(
RL("Node name regex filter: \"%s\"" % parsed.node_name_filter))
top_lines.append(RL())
output = debugger_cli_common.rich_text_lines_from_rich_line_list(top_lines)
if not source_list:
output.append("[No source file information.]")
return output
output.extend(self._make_source_table(
[item for item in source_list if not item[1]], False))
output.extend(self._make_source_table(
[item for item in source_list if item[1]], True))
_add_main_menu(output, node_name=None)
return output
def _list_inputs_or_outputs(self,
recursive,
node_name,
depth,
control,
op_type,
do_outputs=False):
"""Helper function used by list_inputs and list_outputs.
Format a list of lines to display the inputs or output recipients of a
given node.
Args:
recursive: Whether the listing is to be done recursively, as a boolean.
node_name: The name of the node in question, as a str.
depth: Maximum recursion depth, applies only if recursive == True, as an
int.
control: Whether control inputs or control recipients are included, as a
boolean.
op_type: Whether the op types of the nodes are to be included, as a
boolean.
do_outputs: Whether recipients, instead of input nodes are to be
listed, as a boolean.
Returns:
Input or recipient tree formatted as a RichTextLines object.
"""
if do_outputs:
tracker = self._debug_dump.node_recipients
type_str = "Recipients of"
short_type_str = "recipients"
else:
tracker = self._debug_dump.node_inputs
type_str = "Inputs to"
short_type_str = "inputs"
lines = []
font_attr_segs = {}
# Check if this is a tensor name, instead of a node name.
node_name, _ = debug_data.parse_node_or_tensor_name(node_name)
# Check if node exists.
if not self._debug_dump.node_exists(node_name):
return cli_shared.error(
"There is no node named \"%s\" in the partition graphs" % node_name)
if recursive:
max_depth = depth
else:
max_depth = 1
if control:
include_ctrls_str = ", control %s included" % short_type_str
else:
include_ctrls_str = ""
line = "%s node \"%s\"" % (type_str, node_name)
font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, "bold")
]
lines.append(line + " (Depth limit = %d%s):" % (max_depth, include_ctrls_str
))
command_template = "lo -c -r %s" if do_outputs else "li -c -r %s"
self._dfs_from_node(
lines,
font_attr_segs,
node_name,
tracker,
max_depth,
1, [],
control,
op_type,
command_template=command_template)
# Include legend.
lines.append("")
lines.append("Legend:")
lines.append(" (d): recursion depth = d.")
if control:
lines.append(" (Ctrl): Control input.")
if op_type:
lines.append(" [Op]: Input node has op type Op.")
# TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes.
return debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
def _dfs_from_node(self,
lines,
attr_segs,
node_name,
tracker,
max_depth,
depth,
unfinished,
include_control=False,
show_op_type=False,
command_template=None):
"""Perform depth-first search (DFS) traversal of a node's input tree.
It recursively tracks the inputs (or output recipients) of the node called
node_name, and append these inputs (or output recipients) to a list of text
lines (lines) with proper indentation that reflects the recursion depth,
together with some formatting attributes (to attr_segs). The formatting
attributes can include command shortcuts, for example.
Args:
lines: Text lines to append to, as a list of str.
attr_segs: (dict) Attribute segments dictionary to append to.
node_name: Name of the node, as a str. This arg is updated during the
recursion.
tracker: A callable that takes one str as the node name input and
returns a list of str as the inputs/outputs.
This makes it this function general enough to be used with both
node-input and node-output tracking.
max_depth: Maximum recursion depth, as an int.
depth: Current recursion depth. This arg is updated during the
recursion.
unfinished: A stack of unfinished recursion depths, as a list of int.
include_control: Whether control dependencies are to be included as
inputs (and marked as such).
show_op_type: Whether op type of the input nodes are to be displayed
alongside the nodes' names.
command_template: (str) Template for command shortcut of the node names.
"""
# Make a shallow copy of the list because it may be extended later.
all_inputs = copy.copy(tracker(node_name, is_control=False))
is_ctrl = [False] * len(all_inputs)
if include_control:
# Sort control inputs or recipients in alphabetical order of the node
# names.
ctrl_inputs = sorted(tracker(node_name, is_control=True))
all_inputs.extend(ctrl_inputs)
is_ctrl.extend([True] * len(ctrl_inputs))
if not all_inputs:
if depth == 1:
lines.append(" [None]")
return
unfinished.append(depth)
# Create depth-dependent hanging indent for the line.
hang = ""
for k in xrange(depth):
if k < depth - 1:
if k + 1 in unfinished:
hang += HANG_UNFINISHED
else:
hang += HANG_FINISHED
else:
hang += HANG_SUFFIX
if all_inputs and depth > max_depth:
lines.append(hang + ELLIPSIS)
unfinished.pop()
return
hang += DEPTH_TEMPLATE % depth
for i in xrange(len(all_inputs)):
inp = all_inputs[i]
if is_ctrl[i]:
ctrl_str = CTRL_LABEL
else:
ctrl_str = ""
op_type_str = ""
if show_op_type:
op_type_str = OP_TYPE_TEMPLATE % self._debug_dump.node_op_type(inp)
if i == len(all_inputs) - 1:
unfinished.pop()
line = hang + ctrl_str + op_type_str + inp
lines.append(line)
if command_template:
attr_segs[len(lines) - 1] = [(
len(line) - len(inp), len(line),
debugger_cli_common.MenuItem(None, command_template % inp))]
# Recursive call.
# The input's/output's name can be a tensor name, in the case of node
# with >1 output slots.
inp_node_name, _ = debug_data.parse_node_or_tensor_name(inp)
self._dfs_from_node(
lines,
attr_segs,
inp_node_name,
tracker,
max_depth,
depth + 1,
unfinished,
include_control=include_control,
show_op_type=show_op_type,
command_template=command_template)
def _format_neighbors(self, neighbor_type, non_ctrls, ctrls):
"""List neighbors (inputs or recipients) of a node.
Args:
neighbor_type: ("input" | "recipient")
non_ctrls: Non-control neighbor node names, as a list of str.
ctrls: Control neighbor node names, as a list of str.
Returns:
A RichTextLines object.
"""
# TODO(cais): Return RichTextLines instead, to allow annotation of node
# names.
lines = []
font_attr_segs = {}
lines.append("")
lines.append(" %d %s(s) + %d control %s(s):" %
(len(non_ctrls), neighbor_type, len(ctrls), neighbor_type))
lines.append(" %d %s(s):" % (len(non_ctrls), neighbor_type))
for non_ctrl in non_ctrls:
line = " [%s] %s" % (self._debug_dump.node_op_type(non_ctrl),
non_ctrl)
lines.append(line)
font_attr_segs[len(lines) - 1] = [(
len(line) - len(non_ctrl), len(line),
debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % non_ctrl))]
if ctrls:
lines.append("")
lines.append(" %d control %s(s):" % (len(ctrls), neighbor_type))
for ctrl in ctrls:
line = " [%s] %s" % (self._debug_dump.node_op_type(ctrl), ctrl)
lines.append(line)
font_attr_segs[len(lines) - 1] = [(
len(line) - len(ctrl), len(line),
debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % ctrl))]
return debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
def _list_node_attributes(self, node_name):
"""List neighbors (inputs or recipients) of a node.
Args:
node_name: Name of the node of which the attributes are to be listed.
Returns:
A RichTextLines object.
"""
lines = []
lines.append("")
lines.append("Node attributes:")
attrs = self._debug_dump.node_attributes(node_name)
for attr_key in attrs:
lines.append(" %s:" % attr_key)
attr_val_str = repr(attrs[attr_key]).strip().replace("\n", " ")
lines.append(" %s" % attr_val_str)
lines.append("")
return debugger_cli_common.RichTextLines(lines)
def _list_node_dumps(self, node_name):
"""List dumped tensor data from a node.
Args:
node_name: Name of the node of which the attributes are to be listed.
Returns:
A RichTextLines object.
"""
lines = []
font_attr_segs = {}
watch_keys = self._debug_dump.debug_watch_keys(node_name)
dump_count = 0
for watch_key in watch_keys:
debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
for datum in debug_tensor_data:
line = " Slot %d @ %s @ %.3f ms" % (
datum.output_slot, datum.debug_op,
(datum.timestamp - self._debug_dump.t0) / 1000.0)
lines.append(line)
command = "pt %s:%d -n %d" % (node_name, datum.output_slot, dump_count)
font_attr_segs[len(lines) - 1] = [(
2, len(line), debugger_cli_common.MenuItem(None, command))]
dump_count += 1
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
output_with_header = debugger_cli_common.RichTextLines(
["%d dumped tensor(s):" % dump_count, ""])
output_with_header.extend(output)
return output_with_header
def create_analyzer_ui(debug_dump,
tensor_filters=None,
ui_type="curses",
on_ui_exit=None):
"""Create an instance of CursesUI based on a DebugDumpDir object.
Args:
debug_dump: (debug_data.DebugDumpDir) The debug dump to use.
tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor
filter (Callable).
ui_type: (str) requested UI type, e.g., "curses", "readline".
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
Returns:
(base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer
commands and tab-completions registered.
"""
analyzer = DebugAnalyzer(debug_dump)
if tensor_filters:
for tensor_filter_name in tensor_filters:
analyzer.add_tensor_filter(
tensor_filter_name, tensor_filters[tensor_filter_name])
cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit)
cli.register_command_handler(
"list_tensors",
analyzer.list_tensors,
analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
cli.register_command_handler(
"node_info",
analyzer.node_info,
analyzer.get_help("node_info"),
prefix_aliases=["ni"])
cli.register_command_handler(
"list_inputs",
analyzer.list_inputs,
analyzer.get_help("list_inputs"),
prefix_aliases=["li"])
cli.register_command_handler(
"list_outputs",
analyzer.list_outputs,
analyzer.get_help("list_outputs"),
prefix_aliases=["lo"])
cli.register_command_handler(
"print_tensor",
analyzer.print_tensor,
analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
cli.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
cli.register_command_handler(
"list_source",
analyzer.list_source,
analyzer.get_help("list_source"),
prefix_aliases=["ls"])
dumped_tensor_names = []
for datum in debug_dump.dumped_tensor_data:
dumped_tensor_names.append("%s:%d" % (datum.node_name, datum.output_slot))
# Tab completions for command "print_tensors".
cli.register_tab_comp_context(["print_tensor", "pt"], dumped_tensor_names)
return cli
| |
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
from . import handler
from . import xmlreader
# See whether the xmlcharrefreplace error handler is
# supported
try:
from codecs import xmlcharrefreplace_errors
_error_handling = "xmlcharrefreplace"
del xmlcharrefreplace_errors
except ImportError:
_error_handling = "strict"
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1"):
if out is None:
import sys
out = sys.stdout
handler.ContentHandler.__init__(self)
self._out = out
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
def _write(self, text):
if isinstance(text, str):
self._out.write(text)
else:
self._out.write(text.encode(self._encoding, _error_handling))
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
self._write('>')
def endElement(self, name):
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._out.write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._out.write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
self._write('>')
def endElementNS(self, name, qname):
self._write('</%s>' % self._qname(name))
def characters(self, content):
self._write(escape(content))
def ignorableWhitespace(self, content):
self._write(content)
def processingInstruction(self, target, data):
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base = ""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
| |
#!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""EmPOWER resouce pool and resource block classes."""
from empower.datatypes.etheraddress import EtherAddress
BT_L20 = 0
BT_HT20 = 1
BT_HT40 = 2
L20 = 'L20'
HT20 = 'HT20'
HT40 = 'HT40'
BANDS = {BT_L20: L20,
BT_HT20: HT20,
BT_HT40: HT40}
REVERSE_BANDS = {L20: BT_L20,
HT20: BT_HT20,
HT40: BT_HT40}
TX_MCAST_LEGACY = 0x0
TX_MCAST_DMS = 0x1
TX_MCAST_UR = 0x2
TX_MCAST_LEGACY_H = 'legacy'
TX_MCAST_DMS_H = 'dms'
TX_MCAST_UR_H = 'ur'
TX_MCAST = {TX_MCAST_LEGACY: TX_MCAST_LEGACY_H,
TX_MCAST_DMS: TX_MCAST_DMS_H,
TX_MCAST_UR: TX_MCAST_UR_H}
REVERSE_TX_MCAST = {TX_MCAST_LEGACY_H: TX_MCAST_LEGACY,
TX_MCAST_DMS_H: TX_MCAST_DMS,
TX_MCAST_UR_H: TX_MCAST_UR}
class TxPolicyProp(dict):
"""Override getitem behaviour by a default TxPolicy."""
def __init__(self, block, *args, **kwargs):
super().__init__(*args, **kwargs)
self.block = block
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
value = TxPolicy(key, self.block)
dict.__setitem__(self, key, value)
return dict.__getitem__(self, key)
class TxPolicy(object):
"""Transmission policy.
A transmission policy is a set of rule that must be used by the rate
control algorithm to select the actual transmission rate.
Attributes:
block: the actuall block to which this tx policy refers to
hwaddr: the mac address of the wireless interface
channel: The channel id
band: The band type (0=L20, 1=HT20, 2=HT40)
ucqm: User interference matrix group. Rssi values to LVAPs.
ncqm: Network interference matrix group. Rssi values to WTPs.
supports: list of MCS supported in this Resource Block as
reported by the device, that is if the device is an 11a
device it will report [6, 12, 18, 36, 54]. If the device is
an 11n device it will report [0, 1, 2, 3, 4, 5, 6, 7]
"""
def __init__(self, addr, block):
self.addr = addr
self.block = block
self._no_ack = False
self._rts_cts = 2436
self._mcast = TX_MCAST_LEGACY
self._mcs = block.supports
self._ur_count = 3
def to_dict(self):
"""Return a json-frinedly representation of the object."""
return {'no_ack': self.no_ack,
'rts_cts': self.rts_cts,
'mcast': TX_MCAST[self.mcast],
'mcs': self.mcs,
'ur_count': self.ur_count}
def __repr__(self):
mcs = ", ".join([str(x) for x in self.mcs])
return "%s no_ack %s rts_cts %u mcast %s mcs %s ur_count %u" % \
(self.addr, self.no_ack, self.rts_cts, TX_MCAST[self.mcast],
mcs, self.ur_count)
@property
def ur_count(self):
""" Get ur_count . """
return self._ur_count
@ur_count .setter
def ur_count(self, ur_count):
""" Set ur_count . """
self._ur_count = int(ur_count)
self.block.radio.connection.send_set_port(self)
@property
def mcast(self):
""" Get mcast mode. """
return self._mcast
@mcast.setter
def mcast(self, mcast):
""" Set the mcast mode. """
self._mcast = mcast if mcast in TX_MCAST else TX_MCAST_LEGACY
self.block.radio.connection.send_set_port(self)
@property
def mcs(self):
""" Get set of MCS. """
return self._mcs
@mcs.setter
def mcs(self, mcs):
""" Set the list of MCS. """
self._mcs = self.block.supports & set(mcs)
if not self._mcs:
self._mcs = self.block.supports
self.block.radio.connection.send_set_port(self)
@property
def no_ack(self):
""" Get no ack flag. """
return self._no_ack
@no_ack.setter
def no_ack(self, no_ack):
""" Set the no ack flag. """
self._no_ack = True if no_ack else False
self.block.radio.connection.send_set_port(self)
@property
def rts_cts(self):
""" Get rts_cts . """
return self._rts_cts
@rts_cts.setter
def rts_cts(self, rts_cts):
""" Set rts_cts . """
self._rts_cts = int(rts_cts)
self.block.radio.connection.send_set_port(self)
class CQM(dict):
"""Override getitem behaviour by returning -inf instead of KeyError
when the key is missing."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
inf = {'addr': key,
'last_rssi_std': -float("inf"),
'last_rssi_avg': -float("inf"),
'last_packets': 0,
'hist_packets': 0,
'mov_rssi': -float("inf")}
return inf
def build_block(block):
"""Build a new resource block from another block or from a tuple."""
if isinstance(block, ResourceBlock):
requested = block
elif isinstance(block, tuple):
if len(tuple) < 3:
raise ValueError("Invalid tuple")
from empower.main import RUNTIME
wtp = RUNTIME.wtps[EtherAddress(block[0])]
hwaddr = EtherAddress(block[1])
channel = block[2]
band = REVERSE_BANDS[block[3]]
requested = ResourceBlock(wtp, hwaddr, channel, band)
else:
raise ValueError("Expected ResourceBlock or tuple, got %s",
type(block))
for supported in requested.radio.supports:
if supported == requested:
return supported
raise KeyError(requested)
class ResourcePool(set):
""" Resource Pool class.
A resource block represents the minimum allocation element in a EmPOWER
network. It typically consists of a wifi channel and the number of
streams. However it could also consist of multiple bands (for example 160
MHz channel obtained by aggregating two 80 MHz channels in 802.11ac
networks).
The class Overrides the set object's "and" method for ResourceBlock
objects by excluding the Resource Block address form the matching.from
"""
def __init__(self, *args, **kwds):
super(ResourcePool, self).__init__(*args, **kwds)
def __and__(self, other):
result = ResourcePool()
for rblock in self:
for rblock_other in other:
if rblock.channel == rblock_other.channel and \
rblock.band == rblock_other.band:
result.add(rblock)
return result
def __or__(self, other):
result = ResourcePool()
for rblock in self:
result.add(rblock)
for rblock in other:
result.add(rblock)
return result
class ResourceBlock(object):
""" EmPOWER resource block.
A resource block is identified by a channel, a timeslot, and the
spatial stream id. Channel is a tuple in the form (channel number, channel
type). Channel type is one of the channel supported by 802.11: 20, 20HT,
40HT, 40VHT, 80VHT.
Attributes:
radio: The WTP or the LVAP at which this resource block is available
hwaddr: the mac address of the wireless interface
channel: The channel id
band: The band type (0=L20, 1=HT20, 2=HT40)
ucqm: User interference matrix group. Rssi values to LVAPs.
ncqm: Network interference matrix group. Rssi values to WTPs.
supports: list of MCS supported in this Resource Block as
reported by the device, that is if the device is an 11a
device it will report [6, 12, 18, 36, 54]. If the device is
an 11n device it will report [0, 1, 2, 3, 4, 5, 6, 7]
"""
def __init__(self, radio, hwaddr, channel, band):
self._radio = radio
self._hwaddr = hwaddr
self._channel = channel
self._band = band
self.ucqm = CQM()
self.ncqm = CQM()
self.tx_policies = TxPolicyProp(self)
if self.band == BT_HT20 or self.band == BT_HT40:
self._supports = set([0, 1, 2, 3, 4, 5, 6, 7])
else:
if self.channel > 14:
self._supports = \
set([6.0, 9.0, 12.0, 18.0, 24.0, 36.0, 48.0, 54.0])
else:
self._supports = \
set([1.0, 2.0, 5.5, 11.0,
6.0, 9.0, 12.0, 18.0, 24.0, 36.0, 48, 54.0])
@property
def addr(self):
""" Return the radio's address. """
return self._radio.addr
@property
def radio(self):
""" Return the radio. """
return self._radio
@radio.setter
def radio(self, radio):
""" Set the band. """
self._radio = radio
@property
def supports(self):
""" Return the list of supported MCS. """
return self._supports
@supports.setter
def supports(self, supports):
""" Set the band. """
for supported in supports:
self._supports.add(int(supported))
@property
def hwaddr(self):
""" Return the hwaddr. """
return self._hwaddr
@hwaddr.setter
def hwaddr(self, hwaddr):
""" Set the hwaddr. """
self._hwaddr = hwaddr
@property
def band(self):
""" Return the band. """
return self._band
@band.setter
def band(self, band):
""" Set the band. """
if band not in BANDS:
raise ValueError("Invalid band type %s" % band)
self._band = band
@property
def channel(self):
""" Return the channel. """
return self._channel
@channel.setter
def channel(self, channel):
""" Set the band. """
if channel < 1 or channel > 165:
raise ValueError("Invalid channel %u" % channel)
self._channel = channel
def to_dict(self):
""" Return a JSON-serializable dictionary representing the Resource
Pool """
tx_policies = {str(k): v for k, v in self.tx_policies.items()}
return {'addr': self.radio.addr,
'hwaddr': self.hwaddr,
'channel': self.channel,
'supports': sorted(self.supports),
'tx_policies': tx_policies,
'band': BANDS[self.band],
'ucqm': {str(k): v for k, v in self.ucqm.items()},
'ncqm': {str(k): v for k, v in self.ncqm.items()}}
def __hash__(self):
return hash(self.radio.addr) + hash(self.hwaddr) + \
hash(self.channel) + hash(self.band)
def __eq__(self, other):
if not isinstance(other, ResourceBlock):
return False
return (other.radio == self.radio and
other.hwaddr == self.hwaddr and
other.channel == self.channel and
other.band == self.band)
def __repr__(self):
return "(%s, %s, %u, %s)" % (self.radio.addr, self.hwaddr,
self.channel, BANDS[self.band])
| |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AWS Account as a custodian resource.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from dateutil.parser import parse as parse_date
from dateutil.tz import tzutc
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import Filter, FilterRegistry, ValueFilter, FilterValidationError
from c7n.manager import ResourceManager, resources
from c7n.utils import local_session, type_schema
from c7n.resources.iam import CredentialReport
filters = FilterRegistry('aws.account.actions')
actions = ActionRegistry('aws.account.filters')
def get_account(session_factory, config):
session = local_session(session_factory)
client = session.client('iam')
aliases = client.list_account_aliases().get(
'AccountAliases', ('',))
name = aliases and aliases[0] or ""
return {'account_id': config.account_id,
'account_name': name}
@resources.register('account')
class Account(ResourceManager):
filter_registry = filters
action_registry = actions
class resource_type(object):
id = 'account_id'
name = 'account_name'
filter_name = None
@classmethod
def get_permissions(cls):
return ('iam:ListAccountAliases',)
def get_model(self):
return self.resource_type
def resources(self):
return self.filter_resources([get_account(self.session_factory, self.config)])
def get_resources(self, resource_ids):
return [get_account(self.session_factory, self.config)]
@filters.register('credential')
class AccountCredentialReport(CredentialReport):
def process(self, resources, event=None):
super(AccountCredentialReport, self).process(resources, event)
report = self.get_credential_report()
if report is None:
return []
results = []
info = report.get('<root_account>')
for r in resources:
if self.match(info):
r['c7n:credential-report'] = info
results.append(r)
return results
@filters.register('check-cloudtrail')
class CloudTrailEnabled(Filter):
"""Verify cloud trail enabled for this account per specifications.
Returns an annotated account resource if trail is not enabled.
Of particular note, the current-region option will evaluate whether cloudtrail is available
in the current region, either as a multi region trail or as a trail with it as the home region.
:example:
.. code-block:: yaml
policies:
- name: account-cloudtrail-enabled
resource: account
region: us-east-1
filters:
- type: check-cloudtrail
global-events: true
multi-region: true
running: true
"""
schema = type_schema(
'check-cloudtrail',
**{'multi-region': {'type': 'boolean'},
'global-events': {'type': 'boolean'},
'current-region': {'type': 'boolean'},
'running': {'type': 'boolean'},
'notifies': {'type': 'boolean'},
'file-digest': {'type': 'boolean'},
'kms': {'type': 'boolean'},
'kms-key': {'type': 'string'}})
permissions = ('cloudtrail:DescribeTrails', 'cloudtrail:GetTrailStatus')
def process(self, resources, event=None):
session = local_session(self.manager.session_factory)
client = session.client('cloudtrail')
trails = client.describe_trails()['trailList']
resources[0]['c7n:cloudtrails'] = trails
if self.data.get('global-events'):
trails = [t for t in trails if t.get('IncludeGlobalServiceEvents')]
if self.data.get('current-region'):
current_region = session.region_name
trails = [t for t in trails if t.get(
'HomeRegion') == current_region or t.get('IsMultiRegionTrail')]
if self.data.get('kms'):
trails = [t for t in trails if t.get('KmsKeyId')]
if self.data.get('kms-key'):
trails = [t for t in trails
if t.get('KmsKeyId', '') == self.data['kms-key']]
if self.data.get('file-digest'):
trails = [t for t in trails
if t.get('LogFileValidationEnabled')]
if self.data.get('multi-region'):
trails = [t for t in trails if t.get('IsMultiRegionTrail')]
if self.data.get('notifies'):
trails = [t for t in trails if t.get('SnsTopicARN')]
if self.data.get('running', True):
running = []
for t in list(trails):
t['Status'] = status = client.get_trail_status(
Name=t['TrailARN'])
if status['IsLogging'] and not status.get(
'LatestDeliveryError'):
running.append(t)
trails = running
if trails:
return []
return resources
@filters.register('check-config')
class ConfigEnabled(Filter):
"""Is config service enabled for this account
:example:
.. code-block:: yaml
policies:
- name: account-check-config-services
resource: account
region: us-east-1
filters:
- type: check-config
all-resources: true
global-resources: true
running: true
"""
schema = type_schema(
'check-config', **{
'all-resources': {'type': 'boolean'},
'running': {'type': 'boolean'},
'global-resources': {'type': 'boolean'}})
permissions = ('config:DescribeDeliveryChannels',
'config:DescribeConfigurationRecorders',
'config:DescribeConfigurationRecorderStatus')
def process(self, resources, event=None):
client = local_session(
self.manager.session_factory).client('config')
channels = client.describe_delivery_channels()[
'DeliveryChannels']
recorders = client.describe_configuration_recorders()[
'ConfigurationRecorders']
resources[0]['c7n:config_recorders'] = recorders
resources[0]['c7n:config_channels'] = channels
if self.data.get('global-resources'):
recorders = [
r for r in recorders
if r['recordingGroup'].get('includeGlobalResourceTypes')]
if self.data.get('all-resources'):
recorders = [r for r in recorders
if r['recordingGroup'].get('allSupported')]
if self.data.get('running', True) and recorders:
status = {s['name']: s for
s in client.describe_configuration_recorder_status(
)['ConfigurationRecordersStatus']}
resources[0]['c7n:config_status'] = status
recorders = [r for r in recorders if status[r['name']]['recording'] and
status[r['name']]['lastStatus'].lower() in ('pending', 'success')]
if channels and recorders:
return []
return resources
@filters.register('iam-summary')
class IAMSummary(ValueFilter):
"""Return annotated account resource if iam summary filter matches.
Some use cases include, detecting root api keys or mfa usage.
Example iam summary wrt to matchable fields::
{
"AccessKeysPerUserQuota": 2,
"AccountAccessKeysPresent": 0,
"AccountMFAEnabled": 1,
"AccountSigningCertificatesPresent": 0,
"AssumeRolePolicySizeQuota": 2048,
"AttachedPoliciesPerGroupQuota": 10,
"AttachedPoliciesPerRoleQuota": 10,
"AttachedPoliciesPerUserQuota": 10,
"GroupPolicySizeQuota": 5120,
"Groups": 1,
"GroupsPerUserQuota": 10,
"GroupsQuota": 100,
"InstanceProfiles": 0,
"InstanceProfilesQuota": 100,
"MFADevices": 3,
"MFADevicesInUse": 2,
"Policies": 3,
"PoliciesQuota": 1000,
"PolicySizeQuota": 5120,
"PolicyVersionsInUse": 5,
"PolicyVersionsInUseQuota": 10000,
"Providers": 0,
"RolePolicySizeQuota": 10240,
"Roles": 4,
"RolesQuota": 250,
"ServerCertificates": 0,
"ServerCertificatesQuota": 20,
"SigningCertificatesPerUserQuota": 2,
"UserPolicySizeQuota": 2048,
"Users": 5,
"UsersQuota": 5000,
"VersionsPerPolicyQuota": 5,
}
For example to determine if an account has either not been
enabled with root mfa or has root api keys.
.. code-block:: yaml
policies:
- name: root-keys-or-no-mfa
resource: account
filters:
- type: iam-summary
key: AccountMFAEnabled
value: true
op: eq
value_type: swap
"""
schema = type_schema('iam-summary', rinherit=ValueFilter.schema)
permissions = ('iam:GetAccountSummary',)
def process(self, resources, event=None):
if not resources[0].get('c7n:iam_summary'):
client = local_session(
self.manager.session_factory).client('iam')
resources[0]['c7n:iam_summary'] = client.get_account_summary(
)['SummaryMap']
if self.match(resources[0]['c7n:iam_summary']):
return resources
return []
@filters.register('password-policy')
class AccountPasswordPolicy(ValueFilter):
"""Check an account's password policy.
Note that on top of the default password policy fields, we also add an extra key,
PasswordPolicyConfigured which will be set to true or false to signify if the given
account has attempted to set a policy at all.
:example:
.. code-block:: yaml
policies:
- name: password-policy-check
resource: account
region: us-east-1
filters:
- type: password-policy
key: MinimumPasswordLength
value: 10
op: ge
- type: password-policy
key: RequireSymbols
value: true
"""
schema = type_schema('password-policy', rinherit=ValueFilter.schema)
permissions = ('iam:GetAccountPasswordPolicy',)
def process(self, resources, event=None):
account = resources[0]
if not account.get('c7n:password_policy'):
client = local_session(self.manager.session_factory).client('iam')
policy = {}
try:
policy = client.get_account_password_policy().get('PasswordPolicy', {})
policy['PasswordPolicyConfigured'] = True
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
policy['PasswordPolicyConfigured'] = False
else:
raise
account['c7n:password_policy'] = policy
if self.match(account['c7n:password_policy']):
return resources
return []
@filters.register('service-limit')
class ServiceLimit(Filter):
"""Check if account's service limits are past a given threshold.
Supported limits are per trusted advisor, which is variable based
on usage in the account and support level enabled on the account.
- service: AutoScaling limit: Auto Scaling groups
- service: AutoScaling limit: Launch configurations
- service: EBS limit: Active snapshots
- service: EBS limit: Active volumes
- service: EBS limit: General Purpose (SSD) volume storage (GiB)
- service: EBS limit: Magnetic volume storage (GiB)
- service: EBS limit: Provisioned IOPS
- service: EBS limit: Provisioned IOPS (SSD) storage (GiB)
- service: EC2 limit: Elastic IP addresses (EIPs)
# Note this is extant for each active instance type in the account
# however the total value is against sum of all instance types.
# see issue https://github.com/capitalone/cloud-custodian/issues/516
- service: EC2 limit: On-Demand instances - m3.medium
- service: EC2 limit: Reserved Instances - purchase limit (monthly)
- service: ELB limit: Active load balancers
- service: IAM limit: Groups
- service: IAM limit: Instance profiles
- service: IAM limit: Roles
- service: IAM limit: Server certificates
- service: IAM limit: Users
- service: RDS limit: DB instances
- service: RDS limit: DB parameter groups
- service: RDS limit: DB security groups
- service: RDS limit: DB snapshots per user
- service: RDS limit: Storage quota (GB)
- service: RDS limit: Internet gateways
- service: SES limit: Daily sending quota
- service: VPC limit: VPCs
- service: VPC limit: VPC Elastic IP addresses (EIPs)
:example:
.. code-block:: yaml
policies:
- name: account-service-limits
resource: account
filters:
- type: service-limit
services:
- EC2
threshold: 1.0
- name: specify-region-for-global-service
region: us-east-1
resource: account
filters:
- type: service-limit
services:
- IAM
limits:
- Roles
"""
schema = type_schema(
'service-limit',
threshold={'type': 'number'},
refresh_period={'type': 'integer'},
limits={'type': 'array', 'items': {'type': 'string'}},
services={'type': 'array', 'items': {
'enum': ['EC2', 'ELB', 'VPC', 'AutoScaling',
'RDS', 'EBS', 'SES', 'IAM']}})
permissions = ('support:DescribeTrustedAdvisorCheckResult',)
check_id = 'eW7HH0l7J9'
check_limit = ('region', 'service', 'check', 'limit', 'extant', 'color')
global_services = set(['IAM'])
def validate(self):
region = self.manager.data.get('region', '')
if len(self.global_services.intersection(self.data.get('services', []))):
if region != 'us-east-1':
raise FilterValidationError(
"Global services: %s must be targeted in us-east-1 on the policy"
% ', '.join(self.global_services))
return self
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client(
'support', region_name='us-east-1')
checks = client.describe_trusted_advisor_check_result(
checkId=self.check_id, language='en')['result']
region = self.manager.config.region
checks['flaggedResources'] = [r for r in checks['flaggedResources']
if r['metadata'][0] == region or (r['metadata'][0] == '-' and region == 'us-east-1')]
resources[0]['c7n:ServiceLimits'] = checks
delta = timedelta(self.data.get('refresh_period', 1))
check_date = parse_date(checks['timestamp'])
if datetime.now(tz=tzutc()) - delta > check_date:
client.refresh_trusted_advisor_check(checkId=self.check_id)
threshold = self.data.get('threshold')
services = self.data.get('services')
limits = self.data.get('limits')
exceeded = []
for resource in checks['flaggedResources']:
if threshold is None and resource['status'] == 'ok':
continue
limit = dict(zip(self.check_limit, resource['metadata']))
if services and limit['service'] not in services:
continue
if limits and limit['check'] not in limits:
continue
limit['status'] = resource['status']
limit['percentage'] = float(limit['extant'] or 0) / float(
limit['limit']) * 100
if threshold and limit['percentage'] < threshold:
continue
exceeded.append(limit)
if exceeded:
resources[0]['c7n:ServiceLimitsExceeded'] = exceeded
return resources
return []
@actions.register('request-limit-increase')
class RequestLimitIncrease(BaseAction):
r"""File support ticket to raise limit.
:Example:
.. code-block:: yaml
policies:
- name: account-service-limits
resource: account
filters:
- type: service-limit
services:
- EBS
limits:
- Provisioned IOPS (SSD) storage (GiB)
threshold: 60.5
actions:
- type: request-limit-increase
notify: [email, email2]
## You can use one of either percent-increase or an amount-increase.
percent-increase: 50
message: "Please raise the below account limit(s); \n {limits}"
"""
schema = {
'type': 'object',
'notify': {'type': 'array'},
'properties': {
'type': {'enum': ['request-limit-increase']},
'percent-increase': {'type': 'number', 'minimum': 1},
'amount-increase': {'type': 'number', 'minimum': 1},
'subject': {'type': 'string'},
'message': {'type': 'string'},
'severity': {'type': 'string', 'enum': ['urgent', 'high', 'normal', 'low']}
},
'oneOf': [
{'required': ['type', 'percent-increase']},
{'required': ['type', 'amount-increase']}
]
}
permissions = ('support:CreateCase',)
default_subject = '[Account:{account}]Raise the following limit(s) of {service} in {region}'
default_template = 'Please raise the below account limit(s); \n {limits}'
default_severity = 'normal'
service_code_mapping = {
'AutoScaling': 'auto-scaling',
'ELB': 'elastic-load-balancing',
'EBS': 'amazon-elastic-block-store',
'EC2': 'amazon-elastic-compute-cloud-linux',
'RDS': 'amazon-relational-database-service-aurora',
'VPC': 'amazon-virtual-private-cloud',
'IAM': 'aws-identity-and-access-management',
'CloudFormation': 'aws-cloudformation',
}
def process(self, resources):
session = local_session(self.manager.session_factory)
client = session.client('support', region_name='us-east-1')
account_id = self.manager.config.account_id
service_map = {}
region_map = {}
limit_exceeded = resources[0].get('c7n:ServiceLimitsExceeded', [])
percent_increase = self.data.get('percent-increase')
amount_increase = self.data.get('amount-increase')
for s in limit_exceeded:
current_limit = int(s['limit'])
if percent_increase:
increase_by = current_limit * float(percent_increase) / 100
increase_by = max(increase_by, 1)
else:
increase_by = amount_increase
increase_by = round(increase_by)
msg = '\nIncrease %s by %d in %s \n\t Current Limit: %s\n\t Current Usage: %s\n\t ' \
'Set New Limit to: %d' % (
s['check'], increase_by, s['region'], s['limit'], s['extant'],
(current_limit + increase_by))
service_map.setdefault(s['service'], []).append(msg)
region_map.setdefault(s['service'], s['region'])
for service in service_map:
subject = self.data.get('subject', self.default_subject).format(
service=service, region=region_map[service], account=account_id)
service_code = self.service_code_mapping.get(service)
body = self.data.get('message', self.default_template)
body = body.format(**{
'service': service,
'limits': '\n\t'.join(service_map[service]),
})
client.create_case(
subject=subject,
communicationBody=body,
serviceCode=service_code,
categoryCode='general-guidance',
severityCode=self.data.get('severity', self.default_severity),
ccEmailAddresses=self.data.get('notify', []))
def cloudtrail_policy(original, bucket_name, account_id):
'''add CloudTrail permissions to an S3 policy, preserving existing'''
ct_actions = [
{
'Action': 's3:GetBucketAcl',
'Effect': 'Allow',
'Principal': {'Service': 'cloudtrail.amazonaws.com'},
'Resource': 'arn:aws:s3:::' + bucket_name,
'Sid': 'AWSCloudTrailAclCheck20150319',
},
{
'Action': 's3:PutObject',
'Condition': {
'StringEquals':
{'s3:x-amz-acl': 'bucket-owner-full-control'},
},
'Effect': 'Allow',
'Principal': {'Service': 'cloudtrail.amazonaws.com'},
'Resource': 'arn:aws:s3:::%s/AWSLogs/%s/*' % (
bucket_name, account_id
),
'Sid': 'AWSCloudTrailWrite20150319',
},
]
# parse original policy
if original is None:
policy = {
'Statement': [],
'Version': '2012-10-17',
}
else:
policy = json.loads(original['Policy'])
original_actions = [a.get('Action') for a in policy['Statement']]
for cta in ct_actions:
if cta['Action'] not in original_actions:
policy['Statement'].append(cta)
return json.dumps(policy)
@actions.register('enable-cloudtrail')
class EnableTrail(BaseAction):
"""Enables logging on the trail(s) named in the policy
:Example:
.. code-block:: yaml
policies:
- name: trail-test
description: Ensure CloudTrail logging is enabled
resource: account
actions:
- type: enable-cloudtrail
trail: mytrail
bucket: trails
"""
permissions = (
'cloudtrail:CreateTrail',
'cloudtrail:DescribeTrails',
'cloudtrail:GetTrailStatus',
'cloudtrail:StartLogging',
'cloudtrail:UpdateTrail',
's3:CreateBucket',
's3:GetBucketPolicy',
's3:PutBucketPolicy',
)
schema = type_schema(
'enable-cloudtrail',
**{
'trail': {'type': 'string'},
'bucket': {'type': 'string'},
'bucket-region': {'type': 'string'},
'multi-region': {'type': 'boolean'},
'global-events': {'type': 'boolean'},
'notify': {'type': 'string'},
'file-digest': {'type': 'boolean'},
'kms': {'type': 'boolean'},
'kms-key': {'type': 'string'},
'required': ('bucket',),
}
)
def process(self, accounts):
"""Create or enable CloudTrail"""
session = local_session(self.manager.session_factory)
client = session.client('cloudtrail')
bucket_name = self.data['bucket']
bucket_region = self.data.get('bucket-region', 'us-east-1')
trail_name = self.data.get('trail', 'default-trail')
multi_region = self.data.get('multi-region', True)
global_events = self.data.get('global-events', True)
notify = self.data.get('notify', '')
file_digest = self.data.get('file-digest', False)
kms = self.data.get('kms', False)
kms_key = self.data.get('kms-key', '')
s3client = session.client('s3')
try:
s3client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': bucket_region}
)
except ClientError as ce:
if not ('Error' in ce.response and
ce.response['Error']['Code'] == 'BucketAlreadyOwnedByYou'):
raise ce
try:
current_policy = s3client.get_bucket_policy(Bucket=bucket_name)
except ClientError:
current_policy = None
policy_json = cloudtrail_policy(
current_policy, bucket_name, self.manager.config.account_id)
s3client.put_bucket_policy(Bucket=bucket_name, Policy=policy_json)
trails = client.describe_trails().get('trailList', ())
if trail_name not in [t.get('Name') for t in trails]:
new_trail = client.create_trail(
Name=trail_name,
S3BucketName=bucket_name,
)
if new_trail:
trails.append(new_trail)
# the loop below will configure the new trail
for trail in trails:
if trail.get('Name') != trail_name:
continue
# enable
arn = trail['TrailARN']
status = client.get_trail_status(Name=arn)
if not status['IsLogging']:
client.start_logging(Name=arn)
# apply configuration changes (if any)
update_args = {}
if multi_region != trail.get('IsMultiRegionTrail'):
update_args['IsMultiRegionTrail'] = multi_region
if global_events != trail.get('IncludeGlobalServiceEvents'):
update_args['IncludeGlobalServiceEvents'] = global_events
if notify != trail.get('SNSTopicArn'):
update_args['SnsTopicName'] = notify
if file_digest != trail.get('LogFileValidationEnabled'):
update_args['EnableLogFileValidation'] = file_digest
if kms_key != trail.get('KmsKeyId'):
if not kms and 'KmsKeyId' in trail:
kms_key = ''
update_args['KmsKeyId'] = kms_key
if update_args:
update_args['Name'] = trail_name
client.update_trail(**update_args)
@filters.register('has-virtual-mfa')
class HasVirtualMFA(Filter):
"""Is the account configured with a virtual MFA device?
:example:
.. code-block:: yaml
policies:
- name: account-with-virtual-mfa
resource: account
region: us-east-1
filters:
- type: has-virtual-mfa
value: true
"""
schema = type_schema('has-virtual-mfa', **{'value': {'type': 'boolean'}})
permissions = ('iam:ListVirtualMFADevices',)
def mfa_belongs_to_root_account(self, mfa):
return mfa['SerialNumber'].endswith(':mfa/root-account-mfa-device')
def account_has_virtual_mfa(self, account):
if not account.get('c7n:VirtualMFADevices'):
client = local_session(self.manager.session_factory).client('iam')
paginator = client.get_paginator('list_virtual_mfa_devices')
raw_list = paginator.paginate().build_full_result()['VirtualMFADevices']
account['c7n:VirtualMFADevices'] = list(filter(
self.mfa_belongs_to_root_account, raw_list))
expect_virtual_mfa = self.data.get('value', True)
has_virtual_mfa = any(account['c7n:VirtualMFADevices'])
return expect_virtual_mfa == has_virtual_mfa
def process(self, resources, event=None):
return list(filter(self.account_has_virtual_mfa, resources))
@actions.register('enable-data-events')
class EnableDataEvents(BaseAction):
"""Ensure all buckets in account are setup to log data events.
Note this works via a single trail for data events per
(https://goo.gl/1ux7RG).
This trail should NOT be used for api management events, the
configuration here is soley for data events. If directed to create
a trail this will do so without management events.
:example:
.. code-block:: yaml
policies:
- name: s3-enable-data-events-logging
resource: account
actions:
- type: enable-data-events
data-trail:
name: s3-events
multi-region: us-east-1
"""
schema = type_schema(
'enable-data-events', required=['data-trail'], **{
'data-trail': {
'type': 'object',
'additionalProperties': False,
'required': ['name'],
'properties': {
'create': {
'title': 'Should we create trail if needed for events?',
'type': 'boolean'},
'type': {'enum': ['ReadOnly', 'WriteOnly', 'All']},
'name': {
'title': 'The name of the event trail',
'type': 'string'},
'topic': {
'title': 'If creating, the sns topic for the trail to send updates',
'type': 'string'},
's3-bucket': {
'title': 'If creating, the bucket to store trail event data',
'type': 'string'},
's3-prefix': {'type': 'string'},
'key-id': {
'title': 'If creating, Enable kms on the trail',
'type': 'string'},
# region that we're aggregating via trails.
'multi-region': {
'title': 'If creating, use this region for all data trails',
'type': 'string'}}}})
def validate(self):
if self.data['data-trail'].get('create'):
if 's3-bucket' not in self.data['data-trail']:
raise FilterValidationError(
"If creating data trails, an s3-bucket is required")
return self
def get_permissions(self):
perms = [
'cloudtrail:DescribeTrails',
'cloudtrail:GetEventSelectors',
'cloudtrail:PutEventSelectors']
if self.data.get('data-trail', {}).get('create'):
perms.extend([
'cloudtrail:CreateTrail', 'cloudtrail:StartLogging'])
return perms
def add_data_trail(self, client, trail_cfg):
if not trail_cfg.get('create'):
raise ValueError(
"s3 data event trail missing and not configured to create")
params = dict(
Name=trail_cfg['name'],
S3BucketName=trail_cfg['s3-bucket'],
EnableLogFileValidation=True)
if 'key-id' in trail_cfg:
params['KmsKeyId'] = trail_cfg['key-id']
if 's3-prefix' in trail_cfg:
params['S3KeyPrefix'] = trail_cfg['s3-prefix']
if 'topic' in trail_cfg:
params['SnsTopicName'] = trail_cfg['topic']
if 'multi-region' in trail_cfg:
params['IsMultiRegionTrail'] = True
client.create_trail(**params)
return {'Name': trail_cfg['name']}
def process(self, resources):
session = local_session(self.manager.session_factory)
region = self.data['data-trail'].get('multi-region')
if region:
client = session.client('cloudtrail', region_name=region)
else:
client = session.client('cloudtrail')
added = False
tconfig = self.data['data-trail']
trails = client.describe_trails(
trailNameList=[tconfig['name']]).get('trailList', ())
if not trails:
trail = self.add_data_trail(client, tconfig)
added = True
else:
trail = trails[0]
events = client.get_event_selectors(
TrailName=trail['Name']).get('EventSelectors', [])
for e in events:
found = False
if not e.get('DataResources'):
continue
for data_events in e['DataResources']:
if data_events['Type'] != 'AWS::S3::Object':
continue
for b in data_events['Values']:
if b.rsplit(':')[-1].strip('/') == '':
found = True
break
if found:
resources[0]['c7n_data_trail'] = trail
return
# Opinionated choice, separate api and data events.
event_count = len(events)
events = [e for e in events if not e.get('IncludeManagementEvents')]
if len(events) != event_count:
self.log.warning("removing api trail from data trail")
# future proof'd for other data events, for s3 this trail
# encompasses all the buckets in the account.
events.append({
'IncludeManagementEvents': False,
'ReadWriteType': tconfig.get('type', 'All'),
'DataResources': [{
'Type': 'AWS::S3::Object',
'Values': ['arn:aws:s3:::']}]})
client.put_event_selectors(
TrailName=trail['Name'],
EventSelectors=events)
if added:
client.start_logging(Name=tconfig['name'])
resources[0]['c7n_data_trail'] = trail
@filters.register('shield-enabled')
class ShieldEnabled(Filter):
permissions = ('shield:DescribeSubscription',)
schema = type_schema(
'shield-enabled',
state={'type': 'boolean'})
def process(self, resources, event=None):
state = self.data.get('state', False)
client = self.manager.session_factory().client('shield')
try:
subscription = client.describe_subscription().get(
'Subscription', None)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
subscription = None
resources[0]['c7n:ShieldSubscription'] = subscription
if state and subscription:
return resources
elif not state and not subscription:
return resources
return []
@actions.register('set-shield-advanced')
class SetShieldAdvanced(BaseAction):
"""Enable/disable Shield Advanced on an account."""
permissions = (
'shield:CreateSubscription', 'shield:DeleteSubscription')
schema = type_schema(
'set-shield-advanced',
state={'type': 'boolean'})
def process(self, resources):
client = self.manager.session_factory().client('shield')
state = self.data.get('state', True)
if state:
client.create_subscription()
else:
try:
client.delete_subscription()
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return
raise
| |
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from decimal import Decimal
import io
from test_framework.blocktools import add_witness_commitment, create_block, create_coinbase, send_to_witness
from test_framework.messages import BIP125_SEQUENCE_NUMBER, CTransaction
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes, sync_mempools
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[
"-walletrbf={}".format(i),
"-mintxfee=0.00002",
"-mempoolreplacement=1",
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
if True:
raise SkipTest("DeepOnion doesn't support RBF.")
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
test_maxtxfee_fails(self, rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress(address_type='p2sh-segwit'))
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# (32-byte p2sh-pwpkh output size + 148 p2pkh spend estimate) * 10k(discard_rate) / 1000 = 1800
# P2SH outputs are slightly "over-discarding" due to the IsDust calculation assuming it will
# be spent as a P2PKH.
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 50000 - 1800})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
def test_settxfee(rbf_node, dest_address):
assert_raises_rpc_error(-8, "txfee cannot be less than min relay tx fee", rbf_node.settxfee, Decimal('0.000005'))
assert_raises_rpc_error(-8, "txfee cannot be less than wallet min fee", rbf_node.settxfee, Decimal('0.000015'))
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_maxtxfee_fails(test, rbf_node, dest_address):
test.restart_node(1, ['-maxtxfee=0.00003'] + test.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Specified or calculated fee 0.0000332 is too high (cannot be higher than maxTxFee 0.00003)", rbf_node.bumpfee, rbfid)
test.restart_node(1, test.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time, version=0x20000000)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
add_witness_commitment(block)
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
| |
from __future__ import unicode_literals
from datetime import date
import locale
import sys
from django.apps import apps
from django.contrib.auth import models, management
from django.contrib.auth.checks import check_user_model
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import changepassword, createsuperuser
from django.contrib.auth.models import User, Group
from django.contrib.auth.tests.custom_user import CustomUser, CustomUserWithFK, Email
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core import exceptions
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase, override_settings, override_system_checks
from django.utils import six
from django.utils.encoding import force_str
def mock_inputs(inputs):
"""
Decorator to temporarily replace input/getpass to allow interactive
createsuperuser.
"""
def inner(test_func):
def wrapped(*args):
class mock_getpass:
@staticmethod
def getpass(prompt=b'Password: ', stream=None):
if six.PY2:
# getpass on Windows only supports prompt as bytestring (#19807)
assert isinstance(prompt, six.binary_type)
return inputs['password']
def mock_input(prompt):
# prompt should be encoded in Python 2. This line will raise an
# Exception if prompt contains unencoded non-ASCII on Python 2.
prompt = str(prompt)
assert str('__proxy__') not in prompt
response = ''
for key, val in inputs.items():
if force_str(key) in prompt.lower():
response = val
break
return response
old_getpass = createsuperuser.getpass
old_input = createsuperuser.input
createsuperuser.getpass = mock_getpass
createsuperuser.input = mock_input
try:
test_func(*args)
finally:
createsuperuser.getpass = old_getpass
createsuperuser.input = old_input
return wrapped
return inner
class MockTTY(object):
"""
A fake stdin object that pretends to be a TTY to be used in conjunction
with mock_inputs.
"""
def isatty(self):
return True
@skipIfCustomUser
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), six.text_type)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
models.User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
@skipIfCustomUser
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create_user(username='joe', password='qwerty')
self.stdout = six.StringIO()
self.stderr = six.StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
def test_that_changepassword_command_changes_joes_password(self):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute(username="joe", stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\nPassword changed successfully for user 'joe'"
)
self.assertTrue(models.User.objects.get(username="joe").check_password("not qwerty"))
def test_that_max_tries_exits_1(self):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
command = changepassword.Command()
command._get_pass = lambda *args: args or 'foo'
with self.assertRaises(CommandError):
command.execute(username="joe", stdout=self.stdout, stderr=self.stderr)
def test_that_changepassword_command_works_with_nonascii_output(self):
"""
#21627 -- Executing the changepassword management command should allow
non-ASCII characters from the User object representation.
"""
# 'Julia' with accented 'u':
models.User.objects.create_user(username='J\xfalia', password='qwerty')
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute(username="J\xfalia", stdout=self.stdout)
@skipIfCustomUser
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_basic_usage(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, 'joe@somewhere.org')
# created password should be unusable
self.assertFalse(u.has_usable_password())
@mock_inputs({'password': "nopasswd"})
def test_nolocale(self):
"""
Check that createsuperuser does not break when no locale is set. See
ticket #16017.
"""
old_getdefaultlocale = locale.getdefaultlocale
try:
# Temporarily remove locale information
locale.getdefaultlocale = lambda: (None, None)
# Call the command in this new environment
call_command(
"createsuperuser",
interactive=True,
username="nolocale@somewhere.org",
email="nolocale@somewhere.org",
verbosity=0,
stdin=MockTTY(),
)
except TypeError:
self.fail("createsuperuser fails if the OS provides no information about the current locale")
finally:
# Re-apply locale information
locale.getdefaultlocale = old_getdefaultlocale
# If we were successful, a user should have been created
u = User.objects.get(username="nolocale@somewhere.org")
self.assertEqual(u.email, 'nolocale@somewhere.org')
@mock_inputs({
'password': "nopasswd",
'u\u017eivatel': 'foo', # username (cz)
'email': 'nolocale@somewhere.org'})
def test_non_ascii_verbose_name(self):
# Aliased so the string doesn't get extracted
from django.utils.translation import ugettext_lazy as ulazy
username_field = User._meta.get_field('username')
old_verbose_name = username_field.verbose_name
username_field.verbose_name = ulazy('u\u017eivatel')
new_io = six.StringIO()
try:
call_command(
"createsuperuser",
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
finally:
username_field.verbose_name = old_verbose_name
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
def test_verbosity_zero(self):
# We can suppress output on the management command
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe2",
email="joe2@somewhere.org",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, 'joe2@somewhere.org')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe+admin@somewhere.org",
email="joe@somewhere.org",
stdout=new_io
)
u = User._default_manager.get(username="joe+admin@somewhere.org")
self.assertEqual(u.email, 'joe@somewhere.org')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom User model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
email="joe@somewhere.org",
date_of_birth="1976-04-01",
stdout=new_io,
skip_checks=True
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="joe@somewhere.org")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = six.StringIO()
with self.assertRaises(CommandError):
call_command(
"createsuperuser",
interactive=False,
username="joe@somewhere.org",
stdout=new_io,
stderr=new_io,
skip_checks=True
)
self.assertEqual(CustomUser._default_manager.count(), 0)
def test_skip_if_not_in_TTY(self):
"""
If the command is not called from a TTY, it should be skipped and a
message should be displayed (#7423).
"""
class FakeStdin(object):
"""A fake stdin object that has isatty() return False."""
def isatty(self):
return False
out = six.StringIO()
call_command(
"createsuperuser",
stdin=FakeStdin(),
stdout=out,
interactive=True,
)
self.assertEqual(User._default_manager.count(), 0)
self.assertIn("Superuser creation skipped", out.getvalue())
def test_passing_stdin(self):
"""
You can pass a stdin object as an option and it should be
available on self.stdin.
If no such option is passed, it defaults to sys.stdin.
"""
sentinel = object()
command = createsuperuser.Command()
command.execute(
stdin=sentinel,
stdout=six.StringIO(),
interactive=False,
verbosity=0,
username='janet',
email='janet@example.com',
)
self.assertIs(command.stdin, sentinel)
command = createsuperuser.Command()
command.execute(
stdout=six.StringIO(),
interactive=False,
verbosity=0,
username='joe',
email='joe@example.com',
)
self.assertIs(command.stdin, sys.stdin)
@override_settings(AUTH_USER_MODEL='auth.CustomUserWithFK')
def test_fields_with_fk(self):
new_io = six.StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='mymail@gmail.com')
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=email.email,
group=group.pk,
stdout=new_io,
skip_checks=True,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
non_existent_email = 'mymail2@gmail.com'
with self.assertRaisesMessage(CommandError,
'email instance with email %r does not exist.' % non_existent_email):
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=non_existent_email,
stdout=new_io,
skip_checks=True,
)
@override_settings(AUTH_USER_MODEL='auth.CustomUserWithFK')
def test_fields_with_fk_interactive(self):
new_io = six.StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='mymail@gmail.com')
@mock_inputs({
'password': 'nopasswd',
'username (email.id)': email.pk,
'email (email.email)': email.email,
'group (group.id)': group.pk,
})
def test(self):
call_command(
'createsuperuser',
interactive=True,
stdout=new_io,
stdin=MockTTY(),
skip_checks=True,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
test(self)
class CustomUserModelValidationTestCase(TestCase):
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonListRequiredFields')
@override_system_checks([check_user_model])
def test_required_fields_is_list(self):
"REQUIRED_FIELDS should be a list."
from .custom_user import CustomUserNonListRequiredFields
errors = checks.run_checks()
expected = [
checks.Error(
"'REQUIRED_FIELDS' must be a list or tuple.",
hint=None,
obj=CustomUserNonListRequiredFields,
id='auth.E001',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth.CustomUserBadRequiredFields')
@override_system_checks([check_user_model])
def test_username_not_in_required_fields(self):
"USERNAME_FIELD should not appear in REQUIRED_FIELDS."
from .custom_user import CustomUserBadRequiredFields
errors = checks.run_checks()
expected = [
checks.Error(
("The field named as the 'USERNAME_FIELD' for a custom user model "
"must not be included in 'REQUIRED_FIELDS'."),
hint=None,
obj=CustomUserBadRequiredFields,
id='auth.E002',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername')
@override_system_checks([check_user_model])
def test_username_non_unique(self):
"A non-unique USERNAME_FIELD should raise a model validation error."
from .custom_user import CustomUserNonUniqueUsername
errors = checks.run_checks()
expected = [
checks.Error(
("'CustomUserNonUniqueUsername.username' must be "
"unique because it is named as the 'USERNAME_FIELD'."),
hint=None,
obj=CustomUserNonUniqueUsername,
id='auth.E003',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername',
AUTHENTICATION_BACKENDS=[
'my.custom.backend',
])
@override_system_checks([check_user_model])
def test_username_non_unique_with_custom_backend(self):
""" A non-unique USERNAME_FIELD should raise an error only if we use the
default authentication backend. Otherwise, an warning should be raised.
"""
from .custom_user import CustomUserNonUniqueUsername
errors = checks.run_checks()
expected = [
checks.Warning(
("'CustomUserNonUniqueUsername.username' is named as "
"the 'USERNAME_FIELD', but it is not unique."),
hint=('Ensure that your authentication backend(s) can handle '
'non-unique usernames.'),
obj=CustomUserNonUniqueUsername,
id='auth.W004',
)
]
self.assertEqual(errors, expected)
class PermissionTestCase(TestCase):
def setUp(self):
self._original_permissions = models.Permission._meta.permissions[:]
self._original_default_permissions = models.Permission._meta.default_permissions
self._original_verbose_name = models.Permission._meta.verbose_name
def tearDown(self):
models.Permission._meta.permissions = self._original_permissions
models.Permission._meta.default_permissions = self._original_default_permissions
models.Permission._meta.verbose_name = self._original_verbose_name
ContentType.objects.clear_cache()
def test_duplicated_permissions(self):
"""
Test that we show proper error message if we are trying to create
duplicate permissions.
"""
auth_app_config = apps.get_app_config('auth')
# check duplicated default permission
models.Permission._meta.permissions = [
('change_permission', 'Can edit permission (duplicate)')]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'change_permission' clashes with a "
"builtin permission for model 'auth.Permission'.",
create_permissions, auth_app_config, verbosity=0)
# check duplicated custom permissions
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'my_custom_permission' is duplicated for model "
"'auth.Permission'.",
create_permissions, auth_app_config, verbosity=0)
# should not raise anything
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
create_permissions(auth_app_config, verbosity=0)
def test_default_permissions(self):
auth_app_config = apps.get_app_config('auth')
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
]
create_permissions(auth_app_config, verbosity=0)
# add/change/delete permission by default + custom permission
self.assertEqual(models.Permission.objects.filter(
content_type=permission_content_type,
).count(), 4)
models.Permission.objects.filter(content_type=permission_content_type).delete()
models.Permission._meta.default_permissions = []
create_permissions(auth_app_config, verbosity=0)
# custom permission only since default permissions is empty
self.assertEqual(models.Permission.objects.filter(
content_type=permission_content_type,
).count(), 1)
def test_verbose_name_length(self):
auth_app_config = apps.get_app_config('auth')
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
models.Permission.objects.filter(content_type=permission_content_type).delete()
models.Permission._meta.verbose_name = "some ridiculously long verbose name that is out of control" * 5
six.assertRaisesRegex(self, exceptions.ValidationError,
"The verbose_name of permission is longer than 244 characters",
create_permissions, auth_app_config, verbosity=0)
| |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
Cloud Custodian Lambda Provisioning Support
docs/lambda.rst
"""
import abc
import base64
import hashlib
import importlib
import io
import json
import logging
import os
import shutil
import time
import tempfile
import zipfile
# We use this for freezing dependencies for serverless environments
# that support service side building.
# Its also used for release engineering on our pypi uploads
try:
from importlib import metadata as pkgmd
except ImportError:
try:
import importlib_metadata as pkgmd
except (ImportError, FileNotFoundError):
pkgmd = None
# Static event mapping to help simplify cwe rules creation
from c7n.exceptions import ClientError
from c7n.cwe import CloudWatchEvents
from c7n.utils import parse_s3, local_session, get_retry, merge_dict
log = logging.getLogger('custodian.serverless')
LambdaRetry = get_retry(('InsufficientPermissionsException',), max_attempts=2)
RuleRetry = get_retry(('ResourceNotFoundException',), max_attempts=2)
class PythonPackageArchive:
"""Creates a zip file for python lambda functions.
:param tuple modules: the Python modules to add to the archive
Amazon doesn't give us straightforward docs here, only `an example
<http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-deployment-pkg.html#with-s3-example-deployment-pkg-python>`_,
from which we can infer that they simply unzip the file into a directory on
``sys.path``. So what we do is locate all of the ``modules`` specified, and
add all of the ``.py`` files we find for these modules to a zip file.
In addition to the modules specified during instantiation, you can add
arbitrary additional files to the archive using :py:func:`add_file` and
:py:func:`add_contents`. For example, since we only add ``*.py`` files for
you, you'll need to manually add files for any compiled extension modules
that your Lambda requires.
"""
zip_compression = zipfile.ZIP_DEFLATED
def __init__(self, modules=(), cache_file=None):
self._temp_archive_file = tempfile.NamedTemporaryFile(delete=False)
if cache_file:
with open(cache_file, 'rb') as fin:
shutil.copyfileobj(fin, self._temp_archive_file)
self._zip_file = zipfile.ZipFile(
self._temp_archive_file, mode='a',
compression=self.zip_compression)
self._closed = False
self.add_modules(None, modules)
def __del__(self):
try:
if not self._closed:
self.close()
if self._temp_archive_file:
self._temp_archive_file.close()
os.unlink(self.path)
except AttributeError:
# Finalizers in python are fairly problematic, especially when
# breaking cycle references, there are no ordering guaranteees
# so our tempfile may already be gc'd before this ref'd version
# is called.
pass
@property
def path(self):
return self._temp_archive_file.name
@property
def size(self):
if not self._closed:
raise ValueError("Archive not closed, size not accurate")
return os.stat(self._temp_archive_file.name).st_size
def create_zinfo(self, file):
if not isinstance(file, zipfile.ZipInfo):
file = zinfo(file)
# Ensure we apply the compression
file.compress_type = self.zip_compression
# Mark host OS as Linux for all archives
file.create_system = 3
return file
def add_modules(self, ignore, modules):
"""Add the named Python modules to the archive. For consistency's sake
we only add ``*.py`` files, not ``*.pyc``. We also don't add other
files, including compiled modules. You'll have to add such files
manually using :py:meth:`add_file`.
"""
for module_name in modules:
module = importlib.import_module(module_name)
if hasattr(module, '__path__'):
# https://docs.python.org/3/reference/import.html#module-path
for directory in module.__path__:
self.add_directory(directory, ignore)
if getattr(module, '__file__', None) is None:
# Likely a namespace package. Try to add *.pth files so
# submodules are importable under Python 2.7.
sitedir = os.path.abspath(os.path.join(list(module.__path__)[0], os.pardir))
for filename in os.listdir(sitedir):
s = filename.startswith
e = filename.endswith
if s(module_name) and e('-nspkg.pth'):
self.add_file(os.path.join(sitedir, filename))
elif hasattr(module, '__file__'):
# https://docs.python.org/3/reference/import.html#__file__
path = module.__file__
if path.endswith('.pyc'):
_path = path[:-1]
if not os.path.isfile(_path):
raise ValueError(
'Could not find a *.py source file behind ' + path)
path = _path
if not path.endswith('.py'):
raise ValueError(
'We need a *.py source file instead of ' + path)
self.add_file(path)
def add_directory(self, path, ignore=None):
"""Add ``*.py`` files under the directory ``path`` to the archive.
"""
for root, dirs, files in os.walk(path):
arc_prefix = os.path.relpath(root, os.path.dirname(path))
# py3 remove pyc cache dirs.
if '__pycache__' in dirs:
dirs.remove('__pycache__')
for f in files:
dest_path = os.path.join(arc_prefix, f)
# ignore specific files
if ignore and ignore(dest_path):
continue
if f.endswith('.pyc') or f.endswith('.c'):
continue
f_path = os.path.join(root, f)
self.add_file(f_path, dest_path)
def add_file(self, src, dest=None):
"""Add the file at ``src`` to the archive.
If ``dest`` is ``None`` then it is added under just the original
filename. So ``add_file('foo/bar.txt')`` ends up at ``bar.txt`` in the
archive, while ``add_file('bar.txt', 'foo/bar.txt')`` ends up at
``foo/bar.txt``.
"""
dest = dest or os.path.basename(src)
with open(src, 'rb') as fp:
contents = fp.read()
self.add_contents(dest, contents)
def add_py_file(self, src, dest=None):
"""This is a special case of :py:meth:`add_file` that helps for adding
a ``py`` when a ``pyc`` may be present as well. So for example, if
``__file__`` is ``foo.pyc`` and you do:
.. code-block:: python
archive.add_py_file(__file__)
then this method will add ``foo.py`` instead if it exists, and raise
``IOError`` if it doesn't.
"""
src = src[:-1] if src.endswith('.pyc') else src
self.add_file(src, dest)
def add_contents(self, dest, contents):
"""Add file contents to the archive under ``dest``.
If ``dest`` is a path, it will be added compressed and world-readable
(user-writeable). You may also pass a :py:class:`~zipfile.ZipInfo` for
custom behavior.
"""
assert not self._closed, "Archive closed"
dest = self.create_zinfo(dest)
self._zip_file.writestr(dest, contents)
def close(self):
"""Close the zip file.
Note underlying tempfile is removed when archive is garbage collected.
"""
self._closed = True
self._zip_file.close()
log.debug(
"Created custodian serverless archive size: %0.2fmb",
(os.path.getsize(self._temp_archive_file.name) / (1024.0 * 1024.0)))
return self
def remove(self):
"""Dispose of the temp file for garbage collection."""
if self._temp_archive_file:
self._temp_archive_file = None
def get_checksum(self, encoder=base64.b64encode, hasher=hashlib.sha256):
"""Return the b64 encoded sha256 checksum of the archive."""
assert self._closed, "Archive not closed"
with open(self._temp_archive_file.name, 'rb') as fh:
return encoder(checksum(fh, hasher())).decode('ascii')
def get_bytes(self):
"""Return the entire zip file as a byte string. """
assert self._closed, "Archive not closed"
return self.get_stream().read()
def get_stream(self):
"""Return the entire zip file as a stream. """
assert self._closed, "Archive not closed"
return open(self._temp_archive_file.name, 'rb')
def get_reader(self):
"""Return a read-only :py:class:`~zipfile.ZipFile`."""
assert self._closed, "Archive not closed"
buf = io.BytesIO(self.get_bytes())
return zipfile.ZipFile(buf, mode='r')
def get_filenames(self):
"""Return a list of filenames in the archive."""
return [n.filename for n in self.get_reader().filelist]
def get_exec_options(options):
"""preserve cli output options into serverless environment.
"""
d = {}
for k in ('log_group', 'tracer', 'output_dir', 'metrics_enabled'):
if options[k]:
d[k] = options[k]
# ignore local fs/dir output paths
if 'output_dir' in d and '://' not in d['output_dir']:
d.pop('output_dir')
return d
def checksum(fh, hasher, blocksize=65536):
buf = fh.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fh.read(blocksize)
return hasher.digest()
def generate_requirements(packages, ignore=(), exclude=(), include_self=False):
"""Generate frozen requirements file for the given set of packages
if include_self is True we'll also include the packages in the generated
requirements.
"""
if pkgmd is None:
raise ImportError("importlib_metadata missing")
if isinstance(packages, str):
packages = [packages]
deps = []
for p in packages:
_package_deps(p, deps, ignore=ignore)
lines = []
if include_self:
deps = list(set(deps).union(packages))
for d in sorted(deps):
if d in exclude:
continue
try:
lines.append(
'%s==%s' % (d, pkgmd.distribution(d).version))
except pkgmd.PackageNotFoundError:
continue
return '\n'.join(lines)
def _package_deps(package, deps=None, ignore=()):
"""Recursive gather package's named transitive dependencies"""
if deps is None:
deps = []
try:
pdeps = pkgmd.requires(package) or ()
except pkgmd.PackageNotFoundError:
return deps
for r in pdeps:
# skip optional deps
if ';' in r and 'extra' in r:
continue
for idx, c in enumerate(r):
if not c.isalnum() and c not in ('-', '_', '.'):
break
if idx + 1 == len(r):
idx += 1
pkg_name = r[:idx]
if pkg_name in ignore:
continue
if pkg_name not in deps:
try:
_package_deps(pkg_name, deps, ignore)
except pkgmd.PackageNotFoundError:
continue
deps.append(pkg_name)
return deps
def custodian_archive(packages=None):
"""Create a lambda code archive for running custodian.
Lambda archive currently always includes `c7n`. Add additional
packages via function parameters, or in policy via mode block.
Example policy that includes additional packages
.. code-block:: yaml
policy:
name: lambda-archive-example
resource: s3
mode:
packages:
- botocore
packages: List of additional packages to include in the lambda archive.
"""
modules = {'c7n'}
if packages:
modules = filter(None, modules.union(packages))
return PythonPackageArchive(sorted(modules))
class LambdaManager:
""" Provides CRUD operations around lambda functions
"""
def __init__(self, session_factory, s3_asset_path=None):
self.session_factory = session_factory
self.client = self.session_factory().client('lambda')
self.s3_asset_path = s3_asset_path
def list_functions(self, prefix=None):
p = self.client.get_paginator('list_functions')
for rp in p.paginate():
for f in rp.get('Functions', []):
if not prefix:
yield f
elif f['FunctionName'].startswith(prefix):
yield f
def publish(self, func, alias=None, role=None, s3_uri=None):
result, changed = self._create_or_update(
func, role, s3_uri, qualifier=alias)
func.arn = result['FunctionArn']
if alias and changed:
func.alias = self.publish_alias(result, alias)
elif alias:
func.alias = "%s:%s" % (func.arn, alias)
else:
func.alias = func.arn
for e in func.get_events(self.session_factory):
if e.add(func):
log.debug(
"Added event source: %s to function: %s",
e, func.alias)
return result
add = publish
def remove(self, func, alias=None):
for e in func.get_events(self.session_factory):
e.remove(func)
log.info("Removing lambda function %s", func.name)
try:
self.client.delete_function(FunctionName=func.name)
except self.client.exceptions.ResourceNotFoundException:
pass
@staticmethod
def delta_function(old_config, new_config):
changed = []
for k in new_config:
# Layers need special handling as they have extra info on describe.
if k == 'Layers' and k in old_config and new_config[k]:
if sorted(new_config[k]) != sorted([l['Arn'] for l in old_config[k]]):
changed.append(k)
# Vpc needs special handling as a dict with lists
elif k == 'VpcConfig' and k in old_config and new_config[k]:
if set(old_config[k]['SubnetIds']) != set(
new_config[k]['SubnetIds']):
changed.append(k)
elif set(old_config[k]['SecurityGroupIds']) != set(
new_config[k]['SecurityGroupIds']):
changed.append(k)
elif k not in old_config:
if k in LAMBDA_EMPTY_VALUES and LAMBDA_EMPTY_VALUES[k] == new_config[k]:
continue
changed.append(k)
# For role we allow name only configuration
elif k == 'Role':
if (new_config[k] != old_config[k] and
not old_config[k].split('/', 1)[1] == new_config[k]):
changed.append(k)
elif new_config[k] != old_config[k]:
changed.append(k)
return changed
@staticmethod
def diff_tags(old_tags, new_tags):
add = {}
remove = set()
for k, v in new_tags.items():
if k not in old_tags or old_tags[k] != v:
add[k] = v
for k in old_tags:
if k not in new_tags:
remove.add(k)
return add, list(remove)
def _create_or_update(self, func, role=None, s3_uri=None, qualifier=None):
role = func.role or role
assert role, "Lambda function role must be specified"
archive = func.get_archive()
existing = self.get(func.name, qualifier)
if s3_uri:
# TODO: support versioned buckets
bucket, key = self._upload_func(s3_uri, func, archive)
code_ref = {'S3Bucket': bucket, 'S3Key': key}
else:
code_ref = {'ZipFile': archive.get_bytes()}
changed = False
if existing:
result = old_config = existing['Configuration']
if archive.get_checksum() != old_config['CodeSha256']:
log.debug("Updating function %s code", func.name)
params = dict(FunctionName=func.name, Publish=True)
params.update(code_ref)
result = self.client.update_function_code(**params)
changed = True
# TODO/Consider also set publish above to false, and publish
# after configuration change?
new_config = func.get_config()
new_config['Role'] = role
if self._update_tags(existing, new_config.pop('Tags', {})):
changed = True
config_changed = self.delta_function(old_config, new_config)
if config_changed:
log.debug("Updating function: %s config %s",
func.name, ", ".join(sorted(config_changed)))
result = self.client.update_function_configuration(**new_config)
changed = True
if self._update_concurrency(existing, func):
changed = True
else:
log.info('Publishing custodian policy lambda function %s', func.name)
params = func.get_config()
params.update({'Publish': True, 'Code': code_ref, 'Role': role})
result = self.client.create_function(**params)
self._update_concurrency(None, func)
changed = True
return result, changed
def _update_concurrency(self, existing, func):
e_concurrency = None
if existing:
e_concurrency = existing.get('Concurrency', {}).get(
'ReservedConcurrentExecutions')
if e_concurrency == func.concurrency:
return
elif e_concurrency is not None and func.concurrency is None:
log.debug("Removing function: %s concurrency", func.name)
self.client.delete_function_concurrency(
FunctionName=func.name)
return True
log.debug("Updating function: %s concurrency", func.name)
self.client.put_function_concurrency(
FunctionName=func.name,
ReservedConcurrentExecutions=func.concurrency)
def _update_tags(self, existing, new_tags):
# tag dance
base_arn = existing['Configuration']['FunctionArn']
if base_arn.count(':') > 6: # trim version/alias
base_arn = base_arn.rsplit(':', 1)[0]
tags_to_add, tags_to_remove = self.diff_tags(
existing.get('Tags', {}), new_tags)
changed = False
if tags_to_add:
log.debug("Updating function tags: %s" % base_arn)
self.client.tag_resource(Resource=base_arn, Tags=tags_to_add)
changed = True
if tags_to_remove:
log.debug("Removing function stale tags: %s" % base_arn)
self.client.untag_resource(Resource=base_arn, TagKeys=tags_to_remove)
changed = True
return changed
def _upload_func(self, s3_uri, func, archive):
from boto3.s3.transfer import S3Transfer, TransferConfig
_, bucket, key_prefix = parse_s3(s3_uri)
key = "%s/%s" % (key_prefix, func.name)
transfer = S3Transfer(
self.session_factory().client('s3'),
config=TransferConfig(
multipart_threshold=1024 * 1024 * 4))
transfer.upload_file(
archive.path,
bucket=bucket,
key=key,
extra_args={
'ServerSideEncryption': 'AES256'})
return bucket, key
def publish_alias(self, func_data, alias):
"""Create or update an alias for the given function.
"""
if not alias:
return func_data['FunctionArn']
func_name = func_data['FunctionName']
func_version = func_data['Version']
exists = resource_exists(
self.client.get_alias, FunctionName=func_name, Name=alias)
if not exists:
log.debug("Publishing custodian lambda alias %s", alias)
alias_result = self.client.create_alias(
FunctionName=func_name,
Name=alias,
FunctionVersion=func_version)
else:
if (exists['FunctionVersion'] == func_version and
exists['Name'] == alias):
return exists['AliasArn']
log.debug('Updating custodian lambda alias %s', alias)
alias_result = self.client.update_alias(
FunctionName=func_name,
Name=alias,
FunctionVersion=func_version)
return alias_result['AliasArn']
def get(self, func_name, qualifier=None):
params = {'FunctionName': func_name}
if qualifier:
params['Qualifier'] = qualifier
return resource_exists(
self.client.get_function, **params)
def resource_exists(op, NotFound="ResourceNotFoundException", *args, **kw):
try:
return op(*args, **kw)
except ClientError as e:
if e.response['Error']['Code'] == NotFound:
return False
raise
class AbstractLambdaFunction:
"""Abstract base class for lambda functions."""
__metaclass__ = abc.ABCMeta
alias = None
@abc.abstractproperty
def name(self):
"""Name for the lambda function"""
@abc.abstractproperty
def runtime(self):
""" """
@abc.abstractproperty
def description(self):
""" """
@abc.abstractproperty
def handler(self):
""" """
@abc.abstractproperty
def memory_size(self):
""" """
@abc.abstractproperty
def timeout(self):
""" """
@abc.abstractproperty
def role(self):
""" """
@abc.abstractproperty
def subnets(self):
""" """
@abc.abstractproperty
def security_groups(self):
""" """
@abc.abstractproperty
def dead_letter_config(self):
""" """
@abc.abstractproperty
def environment(self):
""" """
@abc.abstractproperty
def kms_key_arn(self):
""" """
@abc.abstractproperty
def tracing_config(self):
""" """
@abc.abstractproperty
def tags(self):
""" """
@abc.abstractproperty
def layers(self):
""" """
@abc.abstractproperty
def concurrency(self):
""" """
@abc.abstractmethod
def get_events(self, session_factory):
"""event sources that should be bound to this lambda."""
@abc.abstractmethod
def get_archive(self):
"""Return the lambda distribution archive object."""
def get_config(self):
conf = {
'FunctionName': self.name,
'MemorySize': self.memory_size,
'Role': self.role,
'Description': self.description,
'Runtime': self.runtime,
'Handler': self.handler,
'Timeout': self.timeout,
'TracingConfig': self.tracing_config,
'KMSKeyArn': self.kms_key_arn,
'DeadLetterConfig': self.dead_letter_config,
'VpcConfig': LAMBDA_EMPTY_VALUES['VpcConfig'],
'Tags': self.tags}
if self.layers:
conf['Layers'] = self.layers
if self.environment['Variables']:
conf['Environment'] = self.environment
if self.subnets and self.security_groups:
conf['VpcConfig'] = {
'SubnetIds': self.subnets,
'SecurityGroupIds': self.security_groups}
return conf
LAMBDA_EMPTY_VALUES = {
'Environment': {'Variables': {}},
'DeadLetterConfig': {},
'TracingConfig': {'Mode': 'PassThrough'},
'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []},
'KMSKeyArn': '',
}
class LambdaFunction(AbstractLambdaFunction):
def __init__(self, func_data, archive):
self.func_data = func_data
required = {
'name', 'handler', 'memory_size',
'timeout', 'role', 'runtime',
'description'}
missing = required.difference(func_data)
if missing:
raise ValueError("Missing required keys %s" % " ".join(missing))
self.archive = archive
@property
def name(self):
return self.func_data['name']
@property
def description(self):
return self.func_data['description']
@property
def handler(self):
return self.func_data['handler']
@property
def memory_size(self):
return self.func_data['memory_size']
@property
def timeout(self):
return self.func_data['timeout']
@property
def runtime(self):
return self.func_data['runtime']
@property
def role(self):
return self.func_data['role']
@property
def layers(self):
return self.func_data.get('layers', ())
@property
def concurrency(self):
return self.func_data.get('concurrency')
@property
def security_groups(self):
return self.func_data.get('security_groups', None)
@property
def subnets(self):
return self.func_data.get('subnets', None)
@property
def dead_letter_config(self):
return self.func_data.get(
'dead_letter_config', LAMBDA_EMPTY_VALUES['DeadLetterConfig'])
@property
def environment(self):
return self.func_data.get(
'environment', LAMBDA_EMPTY_VALUES['Environment'])
@property
def kms_key_arn(self):
return self.func_data.get('kms_key_arn', '')
@property
def tracing_config(self):
# Default
return self.func_data.get(
'tracing_config', LAMBDA_EMPTY_VALUES['TracingConfig'])
@property
def tags(self):
return self.func_data.get('tags', {})
def get_events(self, session_factory):
return self.func_data.get('events', ())
def get_archive(self):
return self.archive
PolicyHandlerTemplate = """\
from c7n import handler
def run(event, context):
return handler.dispatch_event(event, context)
"""
class PolicyLambda(AbstractLambdaFunction):
"""Wraps a custodian policy to turn it into a lambda function.
"""
handler = "custodian_policy.run"
def __init__(self, policy):
self.policy = policy
self.archive = custodian_archive(packages=self.packages)
@property
def name(self):
prefix = self.policy.data['mode'].get('function-prefix', 'custodian-')
return "%s%s" % (prefix, self.policy.name)
@property
def description(self):
return self.policy.data.get(
'description', 'cloud-custodian lambda policy')
@property
def role(self):
return self.policy.data['mode'].get('role', '')
@property
def runtime(self):
return self.policy.data['mode'].get('runtime', 'python3.8')
@property
def memory_size(self):
return self.policy.data['mode'].get('memory', 512)
@property
def timeout(self):
return self.policy.data['mode'].get('timeout', 900)
@property
def security_groups(self):
return self.policy.data['mode'].get('security_groups', None)
@property
def subnets(self):
return self.policy.data['mode'].get('subnets', None)
@property
def dead_letter_config(self):
return self.policy.data['mode'].get(
'dead_letter_config', LAMBDA_EMPTY_VALUES['DeadLetterConfig'])
@property
def environment(self):
return self.policy.data['mode'].get(
'environment', LAMBDA_EMPTY_VALUES['Environment'])
@property
def kms_key_arn(self):
return self.policy.data['mode'].get('kms_key_arn', '')
@property
def tracing_config(self):
# Default
return self.policy.data['mode'].get(
'tracing_config', {'Mode': 'PassThrough'})
@property
def tags(self):
return self.policy.data['mode'].get('tags', {})
@property
def concurrency(self):
return self.policy.data['mode'].get('concurrency')
@property
def layers(self):
return self.policy.data['mode'].get('layers', ())
@property
def packages(self):
return self.policy.data['mode'].get('packages')
def get_events(self, session_factory):
events = []
if self.policy.data['mode']['type'] in (
'config-rule', 'config-poll-rule'):
events.append(
ConfigRule(self.policy.data['mode'], session_factory))
elif self.policy.data['mode']['type'] == 'hub-action':
events.append(
SecurityHubAction(self.policy, session_factory))
else:
events.append(
CloudWatchEventSource(
self.policy.data['mode'], session_factory))
return events
def get_archive(self):
self.archive.add_contents(
'config.json', json.dumps(
{'execution-options': get_exec_options(self.policy.options),
'policies': [self.policy.data]}, indent=2))
self.archive.add_contents('custodian_policy.py', PolicyHandlerTemplate)
self.archive.close()
return self.archive
def zinfo(fname):
"""Amazon lambda exec environment setup can break itself
if zip files aren't constructed a particular way.
ie. It respects file perm attributes from the zip including
those that prevent lambda from working. Namely lambda
extracts code as one user, and executes code as a different
user. Without permissions for the executing user to read
the file the lambda function is broken.
Python's default zipfile.writestr does a 0600 perm which
we modify here as a workaround.
"""
info = zipfile.ZipInfo(fname)
# Grant other users permissions to read
# http://unix.stackexchange.com/questions/14705/
info.external_attr = 0o644 << 16
return info
class AWSEventBase:
"""for AWS Event Sources that want to utilize lazy client initialization.
Primarily utilized by sources that support static rendering to
IAAC templates (tools/ops/policylambda.py) to do so in an account
agnostic fashion.
"""
client_service = None
def __init__(self, data, session_factory):
self.session_factory = session_factory
self._session = None
self._client = None
self.data = data
@property
def session(self):
if not self._session:
self._session = self.session_factory()
return self._session
@property
def client(self):
if not self._client:
self._client = self.session.client(self.client_service)
return self._client
class CloudWatchEventSource(AWSEventBase):
"""Subscribe a lambda to cloud watch events.
Cloud watch events supports a number of different event
sources, from periodic timers with cron syntax, to
real time instance state notifications, cloud trail
events, and realtime asg membership changes.
Event Pattern for Instance State
.. code-block:: json
{
"source": ["aws.ec2"],
"detail-type": ["EC2 Instance State-change Notification"],
"detail": { "state": ["pending"]}
}
Event Pattern for Cloud Trail API
.. code-block:: json
{
"detail-type": ["AWS API Call via CloudTrail"],
"detail": {
"eventSource": ["s3.amazonaws.com"],
"eventName": ["CreateBucket", "DeleteBucket"]
}
}
"""
ASG_EVENT_MAPPING = {
'launch-success': 'EC2 Instance Launch Successful',
'launch-failure': 'EC2 Instance Launch Unsuccessful',
'terminate-success': 'EC2 Instance Terminate Successful',
'terminate-failure': 'EC2 Instance Terminate Unsuccessful'}
client_service = 'events'
def get(self, rule_name):
return resource_exists(self.client.describe_rule, Name=rule_name)
@staticmethod
def delta(src, tgt):
"""Given two cwe rules determine if the configuration is the same.
Name is already implied.
"""
for k in ['State', 'EventPattern', 'ScheduleExpression']:
if src.get(k) != tgt.get(k):
return True
return False
def __repr__(self):
return "<CWEvent Type:%s Events:%s>" % (
self.data.get('type'),
', '.join(map(str, self.data.get('events', []))))
def resolve_cloudtrail_payload(self, payload):
sources = self.data.get('sources', [])
events = []
for e in self.data.get('events'):
if not isinstance(e, dict):
events.append(e)
event_info = CloudWatchEvents.get(e)
if event_info is None:
continue
else:
event_info = e
events.append(e['event'])
sources.append(event_info['source'])
payload['detail'] = {
'eventSource': list(set(sources)),
'eventName': events}
def render_event_pattern(self):
event_type = self.data.get('type')
pattern = self.data.get('pattern')
payload = {}
if pattern:
payload.update(pattern)
if event_type == 'cloudtrail':
payload['detail-type'] = ['AWS API Call via CloudTrail']
self.resolve_cloudtrail_payload(payload)
if event_type == 'cloudtrail':
if 'signin.amazonaws.com' in payload['detail']['eventSource']:
payload['detail-type'] = ['AWS Console Sign In via CloudTrail']
elif event_type == 'guard-duty':
payload['source'] = ['aws.guardduty']
payload['detail-type'] = ['GuardDuty Finding']
if 'resource-filter' in self.data:
payload.update({
'detail': {'resource': {'resourceType': [self.data['resource-filter']]}}})
elif event_type == "ec2-instance-state":
payload['source'] = ['aws.ec2']
payload['detail-type'] = [
"EC2 Instance State-change Notification"]
# Technically could let empty be all events, but likely misconfig
payload['detail'] = {"state": self.data.get('events', [])}
elif event_type == "asg-instance-state":
payload['source'] = ['aws.autoscaling']
events = []
for e in self.data.get('events', []):
events.append(self.ASG_EVENT_MAPPING.get(e, e))
payload['detail-type'] = events
elif event_type == 'phd':
payload['source'] = ['aws.health']
payload.setdefault('detail', {})
if self.data.get('events'):
payload['detail'].update({
'eventTypeCode': list(self.data['events'])
})
if self.data.get('categories', []):
payload['detail']['eventTypeCategory'] = self.data['categories']
if not payload['detail']:
payload.pop('detail')
elif event_type == 'hub-finding':
payload['source'] = ['aws.securityhub']
payload['detail-type'] = ['Security Hub Findings - Imported']
elif event_type == 'hub-action':
payload['source'] = ['aws.securityhub']
payload['detail-type'] = [
'Security Hub Findings - Custom Action',
'Security Hub Insight Results']
elif event_type == 'periodic':
pass
else:
raise ValueError(
"Unknown lambda event source type: %s" % event_type)
if not payload:
return None
if self.data.get('pattern'):
payload = merge_dict(payload, self.data['pattern'])
return json.dumps(payload)
def add(self, func):
params = dict(
Name=func.name, Description=func.description, State='ENABLED')
pattern = self.render_event_pattern()
if pattern:
params['EventPattern'] = pattern
schedule = self.data.get('schedule')
if schedule:
params['ScheduleExpression'] = schedule
rule = self.get(func.name)
if rule and self.delta(rule, params):
log.debug("Updating cwe rule for %s" % func.name)
response = self.client.put_rule(**params)
elif not rule:
log.debug("Creating cwe rule for %s" % (self))
response = self.client.put_rule(**params)
else:
response = {'RuleArn': rule['Arn']}
client = self.session.client('lambda')
try:
client.add_permission(
FunctionName=func.name,
StatementId=func.name,
SourceArn=response['RuleArn'],
Action='lambda:InvokeFunction',
Principal='events.amazonaws.com')
log.debug('Added lambda invoke cwe rule permission')
except client.exceptions.ResourceConflictException:
pass
# Add Targets
found = False
response = RuleRetry(self.client.list_targets_by_rule, Rule=func.name)
# CloudWatchE seems to be quite picky about function arns (no aliases/versions)
func_arn = func.arn
if func_arn.count(':') > 6:
func_arn, version = func_arn.rsplit(':', 1)
for t in response['Targets']:
if func_arn == t['Arn']:
found = True
if found:
return
log.debug('Creating cwe rule target for %s on func:%s' % (
self, func_arn))
self.client.put_targets(
Rule=func.name, Targets=[{"Id": func.name, "Arn": func_arn}])
return True
def update(self, func):
self.add(func)
def pause(self, func):
try:
self.client.disable_rule(Name=func.name)
except ClientError:
pass
def resume(self, func):
try:
self.client.enable_rule(Name=func.name)
except ClientError:
pass
def remove(self, func):
if self.get(func.name):
log.info("Removing cwe targets and rule %s", func.name)
try:
targets = self.client.list_targets_by_rule(
Rule=func.name)['Targets']
if targets:
self.client.remove_targets(
Rule=func.name,
Ids=[t['Id'] for t in targets])
except ClientError as e:
log.warning(
"Could not remove targets for rule %s error: %s",
func.name, e)
self.client.delete_rule(Name=func.name)
class SecurityHubAction:
def __init__(self, policy, session_factory):
self.policy = policy
self.session_factory = session_factory
cwe_data = self.policy.data['mode']
cwe_data['pattern'] = {'resources': [self._get_arn()]}
self.cwe = CloudWatchEventSource(
cwe_data, session_factory)
def __repr__(self):
return "<SecurityHub Action %s>" % self.policy.name
def _get_arn(self):
return 'arn:aws:securityhub:%s:%s:action/custom/%s' % (
self.policy.options.region,
self.policy.options.account_id,
self.policy.name)
def delta(self, src, tgt):
for k in ('Name', 'Description'):
if src[k] != tgt[k]:
return True
return False
def get(self, name):
client = local_session(self.session_factory).client('securityhub')
subscriber = self.cwe.get(name)
arn = self._get_arn()
actions = client.describe_action_targets(
ActionTargetArns=[arn]).get('ActionTargets', ())
assert len(actions) in (0, 1), "Found duplicate action %s" % (
actions,)
action = actions and actions.pop() or None
return {'event': subscriber, 'action': action}
def add(self, func):
self.cwe.add(func)
client = local_session(self.session_factory).client('securityhub')
action = self.get(func.name).get('action')
arn = self._get_arn()
params = {'Name': (
self.policy.data.get('title') or (
"%s %s" % (self.policy.resource_type.split('.')[-1].title(),
self.policy.name))),
'Description': (
self.policy.data.get('description') or
self.policy.data.get('title') or
self.policy.name),
'Id': self.policy.name}
params['Description'] = params['Description'].strip()[:500]
if not action:
log.debug('Creating SecurityHub Action %s' % arn)
return client.create_action_target(
**params).get('ActionTargetArn')
params.pop('Id')
if self.delta(action, params):
log.debug('Updating SecurityHub Action %s' % arn)
client.update_action_target(ActionTargetArn=arn, **params)
return arn
def update(self, func):
self.cwe.update(func)
self.add(func)
def remove(self, func):
self.cwe.remove(func)
client = local_session(self.session_factory).client('securityhub')
client.delete_action_target(ActionTargetArn=self._get_arn())
class BucketLambdaNotification:
""" Subscribe a lambda to bucket notifications directly. """
def __init__(self, data, session_factory, bucket):
self.data = data
self.session_factory = session_factory
self.session = session_factory()
self.bucket = bucket
def delta(self, src, tgt):
for k in ['Id', 'LambdaFunctionArn', 'Events', 'Filters']:
if src.get(k) != tgt.get(k):
return True
return False
def _get_notifies(self, s3, func):
notifies = s3.get_bucket_notification_configuration(
Bucket=self.bucket['Name'])
found = False
for f in notifies.get('LambdaFunctionConfigurations', []):
if f['Id'] != func.name:
continue
found = f
return notifies, found
def add(self, func):
s3 = self.session.client('s3')
notifies, found = self._get_notifies(s3, func)
notifies.pop('ResponseMetadata', None)
func_arn = func.arn
if func_arn.rsplit(':', 1)[-1].isdigit():
func_arn = func_arn.rsplit(':', 1)[0]
n_params = {
'Id': func.name,
'LambdaFunctionArn': func_arn,
'Events': self.data.get('events', ['s3:ObjectCreated:*'])}
if self.data.get('filters'):
n_params['Filters'] = {
'Key': {'FilterRules': self.filters}}
if found:
if self.delta(found, n_params):
notifies['LambdaFunctionConfigurations'].remove(found)
else:
log.info("Bucket lambda notification present")
return
lambda_client = self.session.client('lambda')
params = dict(
FunctionName=func.name,
StatementId=self.bucket['Name'],
Action='lambda:InvokeFunction',
Principal='s3.amazonaws.com')
if self.data.get('account_s3'):
params['SourceAccount'] = self.data['account_s3']
params['SourceArn'] = 'arn:aws:s3:::*'
else:
params['SourceArn'] = 'arn:aws:s3:::%s' % self.bucket['Name']
try:
lambda_client.add_permission(**params)
except lambda_client.exceptions.ResourceConflictException:
pass
notifies.setdefault('LambdaFunctionConfigurations', []).append(n_params)
s3.put_bucket_notification_configuration(
Bucket=self.bucket['Name'], NotificationConfiguration=notifies)
return True
def remove(self, func):
s3 = self.session.client('s3')
notifies, found = self._get_notifies(s3, func)
if not found:
return
lambda_client = self.session.client('lambda')
try:
response = lambda_client.remove_permission(
FunctionName=func['FunctionName'],
StatementId=self.bucket['Name'])
log.debug("Removed lambda permission result: %s" % response)
except lambda_client.exceptions.ResourceNotFoundException:
pass
notifies['LambdaFunctionConfigurations'].remove(found)
s3.put_bucket_notification_configuration(
Bucket=self.bucket['Name'],
NotificationConfiguration=notifies)
class CloudWatchLogSubscription:
""" Subscribe a lambda to a log group[s]
"""
iam_delay = 1.5
def __init__(self, session_factory, log_groups, filter_pattern):
self.log_groups = log_groups
self.filter_pattern = filter_pattern
self.session_factory = session_factory
self.session = session_factory()
self.client = self.session.client('logs')
def add(self, func):
lambda_client = self.session.client('lambda')
for group in self.log_groups:
log.info(
"Creating subscription filter for %s" % group['logGroupName'])
region = group['arn'].split(':', 4)[3]
try:
lambda_client.add_permission(
FunctionName=func.name,
StatementId=group['logGroupName'][1:].replace('/', '-'),
SourceArn=group['arn'],
Action='lambda:InvokeFunction',
Principal='logs.%s.amazonaws.com' % region)
log.debug("Added lambda ipo nvoke log group permission")
# iam eventual consistency and propagation
time.sleep(self.iam_delay)
except lambda_client.exceptions.ResourceConflictException:
pass
# Consistent put semantics / ie no op if extant
self.client.put_subscription_filter(
logGroupName=group['logGroupName'],
filterName=func.name,
filterPattern=self.filter_pattern,
destinationArn=func.alias or func.arn)
def remove(self, func):
lambda_client = self.session.client('lambda')
for group in self.log_groups:
try:
response = lambda_client.remove_permission(
FunctionName=func.name,
StatementId=group['logGroupName'][1:].replace('/', '-'))
log.debug("Removed lambda permission result: %s" % response)
except lambda_client.exceptions.ResourceNotFoundException:
pass
try:
response = self.client.delete_subscription_filter(
logGroupName=group['logGroupName'], filterName=func.name)
log.debug("Removed subscription filter from: %s",
group['logGroupName'])
except lambda_client.exceptions.ResourceNotFoundException:
pass
class SQSSubscription:
""" Subscribe a lambda to one or more SQS queues.
"""
def __init__(self, session_factory, queue_arns, batch_size=10):
self.queue_arns = queue_arns
self.session_factory = session_factory
self.batch_size = batch_size
def add(self, func):
client = local_session(self.session_factory).client('lambda')
event_mappings = {
m['EventSourceArn']: m for m in client.list_event_source_mappings(
FunctionName=func.name).get('EventSourceMappings', ())}
modified = False
for queue_arn in self.queue_arns:
mapping = None
if queue_arn in event_mappings:
mapping = event_mappings[queue_arn]
if (mapping['State'] == 'Enabled' or
mapping['BatchSize'] != self.batch_size):
continue
modified = True
else:
modified = True
if not modified:
return modified
if mapping is not None:
log.info(
"Updating subscription %s on %s", func.name, queue_arn)
client.update_event_source_mapping(
UUID=mapping['UUID'],
Enabled=True,
BatchSize=self.batch_size)
else:
log.info("Subscribing %s to %s", func.name, queue_arn)
client.create_event_source_mapping(
FunctionName=func.name,
EventSourceArn=queue_arn,
BatchSize=self.batch_size)
return modified
def remove(self, func):
client = local_session(self.session_factory).client('lambda')
event_mappings = {
m['EventSourceArn']: m for m in client.list_event_source_mappings(
FunctionName=func.name).get('EventSourceMappings', ())}
for queue_arn in self.queue_arns:
if queue_arn not in event_mappings:
continue
client.delete_event_source_mapping(
UUID=event_mappings[queue_arn]['UUID'])
class SNSSubscription:
""" Subscribe a lambda to one or more SNS topics.
"""
iam_delay = 1.5
def __init__(self, session_factory, topic_arns):
self.topic_arns = topic_arns
self.session_factory = session_factory
@staticmethod
def _parse_arn(arn):
parts = arn.split(':')
region, topic_name = parts[3], parts[5]
statement_id = 'sns-topic-' + topic_name
return region, topic_name, statement_id
def add(self, func):
session = local_session(self.session_factory)
lambda_client = session.client('lambda')
for arn in self.topic_arns:
region, topic_name, statement_id = self._parse_arn(arn)
log.info("Subscribing %s to %s" % (func.name, topic_name))
# Add permission to lambda for sns invocation.
try:
lambda_client.add_permission(
FunctionName=func.name,
StatementId='sns-topic-' + topic_name,
SourceArn=arn,
Action='lambda:InvokeFunction',
Principal='sns.amazonaws.com')
log.debug("Added permission for sns to invoke lambda")
# iam eventual consistency and propagation
time.sleep(self.iam_delay)
except lambda_client.exceptions.ResourceConflictException:
pass
# Subscribe the lambda to the topic, idempotent
sns_client = session.client('sns')
sns_client.subscribe(
TopicArn=arn, Protocol='lambda', Endpoint=func.arn)
def remove(self, func):
session = local_session(self.session_factory)
lambda_client = session.client('lambda')
sns_client = session.client('sns')
for topic_arn in self.topic_arns:
region, topic_name, statement_id = self._parse_arn(topic_arn)
try:
response = lambda_client.remove_permission(
FunctionName=func.name,
StatementId=statement_id)
log.debug("Removed lambda permission result: %s" % response)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
paginator = sns_client.get_paginator('list_subscriptions_by_topic')
class Done(Exception):
pass
try:
for page in paginator.paginate(TopicArn=topic_arn):
for subscription in page['Subscriptions']:
if subscription['Endpoint'] != func.arn:
continue
try:
response = sns_client.unsubscribe(
SubscriptionArn=subscription['SubscriptionArn'])
log.debug("Unsubscribed %s from %s" %
(func.name, topic_name))
except sns_client.exceptions.NotFoundException:
pass
raise Done # break out of both for loops
except Done:
pass
class BucketSNSNotification(SNSSubscription):
""" Subscribe a lambda to bucket notifications via SNS. """
def __init__(self, session_factory, bucket, topic=None):
# NB: We are overwriting __init__ vs. extending.
self.session_factory = session_factory
self.session = session_factory()
self.topic_arns = self.get_topic(bucket) if topic is None else [topic]
self.client = self.session.client('sns')
def get_topic(self, bucket):
session = local_session(self.session_factory)
sns = session.client('sns')
s3 = session.client('s3')
notifies = bucket['Notification']
if 'TopicConfigurations' not in notifies:
notifies['TopicConfigurations'] = []
all_topics = notifies['TopicConfigurations']
topic_arns = [t['TopicArn'] for t in all_topics
if 's3:ObjectCreated:*' in t['Events']]
if not topic_arns:
# No suitable existing topic. Create one.
topic_arn = sns.create_topic(Name=bucket['Name'])['TopicArn']
policy = {
'Statement': [{
'Action': 'SNS:Publish',
'Effect': 'Allow',
'Resource': topic_arn,
'Principal': {'Service': 's3.amazonaws.com'}}]}
sns.set_topic_attributes(
TopicArn=topic_arn,
AttributeName='Policy',
AttributeValue=json.dumps(policy))
notifies['TopicConfigurations'].append({
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']})
s3.put_bucket_notification_configuration(Bucket=bucket['Name'],
NotificationConfiguration=notifies)
topic_arns = [topic_arn]
return topic_arns
class ConfigRule(AWSEventBase):
"""Use a lambda as a custom config rule.
"""
client_service = 'config'
def __repr__(self):
return "<ConfigRule>"
def get_rule_params(self, func):
# config does not support versions/aliases on lambda funcs
func_arn = func.arn
if isinstance(func_arn, str) and func_arn.count(':') > 6:
func_arn, version = func_arn.rsplit(':', 1)
params = dict(
ConfigRuleName=func.name,
Description=func.description,
Source={
'Owner': 'CUSTOM_LAMBDA',
'SourceIdentifier': func_arn,
'SourceDetails': [{
'EventSource': 'aws.config',
'MessageType': 'ConfigurationItemChangeNotification'}]
}
)
if isinstance(func, PolicyLambda):
manager = func.policy.load_resource_manager()
resource_model = manager.get_model()
if resource_model.config_type:
config_type = resource_model.config_type
elif resource_model.cfn_type and 'schedule' in self.data:
config_type = resource_model.cfn_type
else:
raise Exception("You may have attempted to deploy a config "
"based lambda function with an unsupported config type. "
"The most recent AWS config types are here: http://docs.aws"
".amazon.com/config/latest/developerguide/resource"
"-config-reference.html.")
params['Scope'] = {
'ComplianceResourceTypes': [config_type]}
else:
params['Scope']['ComplianceResourceTypes'] = self.data.get(
'resource-types', ())
if self.data.get('schedule'):
params['Source']['SourceDetails'] = [{
'EventSource': 'aws.config',
'MessageType': 'ScheduledNotification'
}]
params['MaximumExecutionFrequency'] = self.data['schedule']
return params
def get(self, rule_name):
rules = resource_exists(
self.client.describe_config_rules,
ConfigRuleNames=[rule_name],
NotFound="NoSuchConfigRuleException")
if not rules:
return rules
return rules['ConfigRules'][0]
@staticmethod
def delta(rule, params):
# doesn't seem like we have anything mutable at the moment,
# since we restrict params, maybe reusing the same policy name
# with a different resource type.
if rule['Scope'] != params['Scope']:
return True
if rule['Source'] != params['Source']:
return True
if ('MaximumExecutionFrequency' in params and
rule['MaximumExecutionFrequency'] != params['MaximumExecutionFrequency']):
return True
if rule.get('Description', '') != params.get('Description', ''):
return True
return False
def add(self, func):
rule = self.get(func.name)
params = self.get_rule_params(func)
if rule and self.delta(rule, params):
log.debug("Updating config rule for %s" % self)
rule.update(params)
return LambdaRetry(self.client.put_config_rule, ConfigRule=rule)
elif rule:
log.debug("Config rule up to date")
return
client = self.session.client('lambda')
try:
client.add_permission(
FunctionName=func.name,
StatementId=func.name,
SourceAccount=func.arn.split(':')[4],
Action='lambda:InvokeFunction',
Principal='config.amazonaws.com')
except client.exceptions.ResourceConflictException:
pass
log.debug("Adding config rule for %s" % func.name)
return LambdaRetry(self.client.put_config_rule, ConfigRule=params)
def remove(self, func):
rule = self.get(func.name)
if not rule:
return
log.info("Removing config rule for %s", func.name)
try:
self.client.delete_config_rule(
ConfigRuleName=func.name)
except self.client.exceptions.NoSuchConfigRuleException:
pass
| |
from __future__ import unicode_literals
import re
import sys
import types
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.urls import Resolver404, resolve
from django.utils import lru_cache, six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting.
DEBUG_ENGINE = Engine(debug=True)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.IGNORECASE)
CLEANSED_SUBSTITUTE = '********************'
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE)
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE)
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception (always in Python 2,
# sometimes in Python 3), take the traceback from self.tb (Python 2
# doesn't have a __traceback__ attribute on Exception)
exc_value = exceptions.pop()
tb = self.tb if six.PY2 or not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if six.PY2:
tb = tb.tb_next
elif not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE)
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _("Of course, you haven't actually done any work yet. "
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."),
"explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; padding-left: 2px; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; padding: 3px 2px; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 10px 20px; }
#template-not-exist .postmortem-section { margin-bottom: 3px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
.append-bottom { margin-bottom: 10px; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML.trim() == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.get_raw_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if postmortem %}
<p class="append-bottom">Django tried loading these templates, in this order:</p>
{% for entry in postmortem %}
<p class="postmortem-section">Using engine <code>{{ entry.backend.name }}</code>:</p>
<ul>
{% if entry.tried %}
{% for attempt in entry.tried %}
<li><code>{{ attempt.0.loader_name }}</code>: {{ attempt.0.name }} ({{ attempt.1 }})</li>
{% endfor %}
</ul>
{% else %}
<li>This engine did not provide a list of tried templates.</li>
{% endif %}
</ul>
{% endfor %}
{% else %}
<p>No templates were found because your 'TEMPLATES' setting is not configured.</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% if template_info.bottom != template_info.total %} cut-bottom{% endif %}">
{% for source_line in template_info.source_lines %}
{% if source_line.0 == template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}"""
"""<span class="specific">{{ template_info.during }}</span>"""
"""{{ template_info.after }}</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endif %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}
<li><h3>
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
</h3></li>
{% endif %}{% endifchanged %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
""" """{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.get_raw_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %}"""
""" * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}"""
"{% for source_line in template_info.source_lines %}"
"{% if source_line.0 == template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endif %}{% endfor %}{% endif %}
Traceback:{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}{% endif %}{% endifchanged %}
File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public website">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
{% if request.user %}
<h3 id="user-info">USER</h3>
<p>{{ request.user }}</p>
{% endif %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""")
TECHNICAL_500_TEXT_TEMPLATE = (""""""
"""{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.get_raw_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %}"""
""" * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}
{% for source_line in template_info.source_lines %}"""
"{% if source_line.0 == template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endif %}{% endfor %}{% endif %}{% if frames %}
Traceback:"""
"{% for frame in frames %}"
"{% ifchanged frame.exc_cause %}"
" {% if frame.exc_cause %}" """
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
{% endif %}
{% endifchanged %}
File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
{% if request.user %}USER: {{ request.user }}{% endif %}
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% if not is_email %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
{% endif %}
""")
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
| |
# -*- coding: utf-8 -*-
"""
oauthlib.common
~~~~~~~~~~~~~~
This module provides data structures and utilities common
to all implementations of OAuth.
"""
from __future__ import absolute_import, unicode_literals
import collections
import datetime
import logging
import random
import re
import sys
import time
try:
from urllib import quote as _quote
from urllib import unquote as _unquote
from urllib import urlencode as _urlencode
except ImportError:
from urllib.parse import quote as _quote
from urllib.parse import unquote as _unquote
from urllib.parse import urlencode as _urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789')
CLIENT_ID_CHARACTER_SET = (r' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMN'
'OPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}')
PASSWORD_PATTERN = re.compile(r'password=[^&]+')
INVALID_HEX_PATTERN = re.compile(r'%[^0-9A-Fa-f]|%[0-9A-Fa-f][^0-9A-Fa-f]')
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
log = logging.getLogger('oauthlib')
PY3 = sys.version_info[0] == 3
if PY3:
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
# 'safe' must be bytes (Python 2.6 requires bytes, other versions allow either)
def quote(s, safe=b'/'):
s = s.encode('utf-8') if isinstance(s, unicode_type) else s
s = _quote(s, safe)
# PY3 always returns unicode. PY2 may return either, depending on whether
# it had to modify the string.
if isinstance(s, bytes_type):
s = s.decode('utf-8')
return s
def unquote(s):
s = _unquote(s)
# PY3 always returns unicode. PY2 seems to always return what you give it,
# which differs from quote's behavior. Just to be safe, make sure it is
# unicode before we return.
if isinstance(s, bytes_type):
s = s.decode('utf-8')
return s
def urlencode(params):
utf8_params = encode_params_utf8(params)
urlencoded = _urlencode(utf8_params)
if isinstance(urlencoded, unicode_type): # PY3 returns unicode
return urlencoded
else:
return urlencoded.decode("utf-8")
def encode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are encoded to
bytestrings using UTF-8
"""
encoded = []
for k, v in params:
encoded.append((
k.encode('utf-8') if isinstance(k, unicode_type) else k,
v.encode('utf-8') if isinstance(v, unicode_type) else v))
return encoded
def decode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are decoded to
unicode using UTF-8.
"""
decoded = []
for k, v in params:
decoded.append((
k.decode('utf-8') if isinstance(k, bytes_type) else k,
v.decode('utf-8') if isinstance(v, bytes_type) else v))
return decoded
urlencoded = set(always_safe) | set('=&;%+~,*@!')
def urldecode(query):
"""Decode a query string in x-www-form-urlencoded format into a sequence
of two-element tuples.
Unlike urlparse.parse_qsl(..., strict_parsing=True) urldecode will enforce
correct formatting of the query string by validation. If validation fails
a ValueError will be raised. urllib.parse_qsl will only raise errors if
any of name-value pairs omits the equals sign.
"""
# Check if query contains invalid characters
if query and not set(query) <= urlencoded:
error = ("Error trying to decode a non urlencoded string. "
"Found invalid characters: %s "
"in the string: '%s'. "
"Please ensure the request/response body is "
"x-www-form-urlencoded.")
raise ValueError(error % (set(query) - urlencoded, query))
# Check for correctly hex encoded values using a regular expression
# All encoded values begin with % followed by two hex characters
# correct = %00, %A0, %0A, %FF
# invalid = %G0, %5H, %PO
if INVALID_HEX_PATTERN.search(query):
raise ValueError('Invalid hex encoding in query string.')
# We encode to utf-8 prior to parsing because parse_qsl behaves
# differently on unicode input in python 2 and 3.
# Python 2.7
# >>> urlparse.parse_qsl(u'%E5%95%A6%E5%95%A6')
# u'\xe5\x95\xa6\xe5\x95\xa6'
# Python 2.7, non unicode input gives the same
# >>> urlparse.parse_qsl('%E5%95%A6%E5%95%A6')
# '\xe5\x95\xa6\xe5\x95\xa6'
# but now we can decode it to unicode
# >>> urlparse.parse_qsl('%E5%95%A6%E5%95%A6').decode('utf-8')
# u'\u5566\u5566'
# Python 3.3 however
# >>> urllib.parse.parse_qsl(u'%E5%95%A6%E5%95%A6')
# u'\u5566\u5566'
query = query.encode(
'utf-8') if not PY3 and isinstance(query, unicode_type) else query
# We want to allow queries such as "c2" whereas urlparse.parse_qsl
# with the strict_parsing flag will not.
params = urlparse.parse_qsl(query, keep_blank_values=True)
# unicode all the things
return decode_params_utf8(params)
def extract_params(raw):
"""Extract parameters and return them as a list of 2-tuples.
Will successfully extract parameters from urlencoded query strings,
dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
empty list of parameters. Any other input will result in a return
value of None.
"""
if isinstance(raw, bytes_type) or isinstance(raw, unicode_type):
try:
params = urldecode(raw)
except ValueError:
params = None
elif hasattr(raw, '__iter__'):
try:
dict(raw)
except ValueError:
params = None
except TypeError:
params = None
else:
params = list(raw.items() if isinstance(raw, dict) else raw)
params = decode_params_utf8(params)
else:
params = None
return params
def generate_nonce():
"""Generate pseudorandom nonce that is unlikely to repeat.
Per `section 3.3`_ of the OAuth 1 RFC 5849 spec.
Per `section 3.2.1`_ of the MAC Access Authentication spec.
A random 64-bit number is appended to the epoch timestamp for both
randomness and to decrease the likelihood of collisions.
.. _`section 3.2.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1
.. _`section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
"""
return unicode_type(unicode_type(random.getrandbits(64)) + generate_timestamp())
def generate_timestamp():
"""Get seconds since epoch (UTC).
Per `section 3.3`_ of the OAuth 1 RFC 5849 spec.
Per `section 3.2.1`_ of the MAC Access Authentication spec.
.. _`section 3.2.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1
.. _`section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
"""
return unicode_type(int(time.time()))
def generate_token(length=30, chars=UNICODE_ASCII_CHARACTER_SET):
"""Generates a non-guessable OAuth token
OAuth (1 and 2) does not specify the format of tokens except that they
should be strings of random characters. Tokens should not be guessable
and entropy when generating the random characters is important. Which is
why SystemRandom is used instead of the default random.choice method.
"""
rand = random.SystemRandom()
return ''.join(rand.choice(chars) for x in range(length))
def generate_signed_token(private_pem, request):
import jwt
now = datetime.datetime.utcnow()
claims = {
'scope': request.scope,
'exp': now + datetime.timedelta(seconds=request.expires_in)
}
claims.update(request.claims)
token = jwt.encode(claims, private_pem, 'RS256')
token = to_unicode(token, "UTF-8")
return token
def verify_signed_token(public_pem, token):
import jwt
return jwt.decode(token, public_pem, algorithms=['RS256'])
def generate_client_id(length=30, chars=CLIENT_ID_CHARACTER_SET):
"""Generates an OAuth client_id
OAuth 2 specify the format of client_id in
http://tools.ietf.org/html/rfc6749#appendix-A.
"""
return generate_token(length, chars)
def add_params_to_qs(query, params):
"""Extend a query with a list of two-tuples."""
if isinstance(params, dict):
params = params.items()
queryparams = urlparse.parse_qsl(query, keep_blank_values=True)
queryparams.extend(params)
return urlencode(queryparams)
def add_params_to_uri(uri, params, fragment=False):
"""Add a list of two-tuples to the uri query components."""
sch, net, path, par, query, fra = urlparse.urlparse(uri)
if fragment:
fra = add_params_to_qs(fra, params)
else:
query = add_params_to_qs(query, params)
return urlparse.urlunparse((sch, net, path, par, query, fra))
def safe_string_equals(a, b):
""" Near-constant time string comparison.
Used in order to avoid timing attacks on sensitive information such
as secret keys during request verification (`rootLabs`_).
.. _`rootLabs`: http://rdist.root.org/2010/01/07/timing-independent-array-comparison/
"""
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def to_unicode(data, encoding='UTF-8'):
"""Convert a number of different types of objects to unicode."""
if isinstance(data, unicode_type):
return data
if isinstance(data, bytes_type):
return unicode_type(data, encoding=encoding)
if hasattr(data, '__iter__'):
try:
dict(data)
except TypeError:
pass
except ValueError:
# Assume it's a one dimensional data structure
return (to_unicode(i, encoding) for i in data)
else:
# We support 2.6 which lacks dict comprehensions
if hasattr(data, 'items'):
data = data.items()
return dict(((to_unicode(k, encoding), to_unicode(v, encoding)) for k, v in data))
return data
class CaseInsensitiveDict(dict):
"""Basic case insensitive dict with strings only keys."""
proxy = {}
def __init__(self, data):
self.proxy = dict((k.lower(), k) for k in data)
for k in data:
self[k] = data[k]
def __contains__(self, k):
return k.lower() in self.proxy
def __delitem__(self, k):
key = self.proxy[k.lower()]
super(CaseInsensitiveDict, self).__delitem__(key)
del self.proxy[k.lower()]
def __getitem__(self, k):
key = self.proxy[k.lower()]
return super(CaseInsensitiveDict, self).__getitem__(key)
def get(self, k, default=None):
return self[k] if k in self else default
def __setitem__(self, k, v):
super(CaseInsensitiveDict, self).__setitem__(k, v)
self.proxy[k.lower()] = k
class Request(object):
"""A malleable representation of a signable HTTP request.
Body argument may contain any data, but parameters will only be decoded if
they are one of:
* urlencoded query string
* dict
* list of 2-tuples
Anything else will be treated as raw body data to be passed through
unmolested.
"""
def __init__(self, uri, http_method='GET', body=None, headers=None,
encoding='utf-8'):
# Convert to unicode using encoding if given, else assume unicode
encode = lambda x: to_unicode(x, encoding) if encoding else x
self.uri = encode(uri)
self.http_method = encode(http_method)
self.headers = CaseInsensitiveDict(encode(headers or {}))
self.body = encode(body)
self.decoded_body = extract_params(self.body)
self.oauth_params = []
self._params = {
"access_token": None,
"client": None,
"client_id": None,
"client_secret": None,
"code": None,
"extra_credentials": None,
"grant_type": None,
"redirect_uri": None,
"refresh_token": None,
"response_type": None,
"scope": None,
"scopes": None,
"state": None,
"token": None,
"user": None,
"token_type_hint": None,
}
self._params.update(dict(urldecode(self.uri_query)))
self._params.update(dict(self.decoded_body or []))
self._params.update(self.headers)
def __getattr__(self, name):
if name in self._params:
return self._params[name]
else:
raise AttributeError(name)
def __repr__(self):
body = self.body
if body and 'password=' in body:
body = PASSWORD_PATTERN.sub('password=***', body)
return '<oauthlib.Request url="%s", http_method="%s", headers="%s", body="%s">' % (
self.uri, self.http_method, self.headers, body)
@property
def uri_query(self):
return urlparse.urlparse(self.uri).query
@property
def uri_query_params(self):
if not self.uri_query:
return []
return urlparse.parse_qsl(self.uri_query, keep_blank_values=True,
strict_parsing=True)
@property
def duplicate_params(self):
seen_keys = collections.defaultdict(int)
all_keys = (p[0]
for p in (self.decoded_body or []) + self.uri_query_params)
for k in all_keys:
seen_keys[k] += 1
return [k for k, c in seen_keys.items() if c > 1]
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future.utils import native
import flask_login
from flask_login import login_required, current_user, logout_user
from flask import flash
from wtforms import (
Form, PasswordField, StringField)
from wtforms.validators import InputRequired
from ldap3 import Server, Connection, Tls, LEVEL, SUBTREE, BASE
import ssl
from flask import url_for, redirect
from airflow import models
from airflow import configuration
from airflow.configuration import AirflowConfigException
from airflow.utils.db import provide_session
import traceback
import re
from airflow.utils.log.logging_mixin import LoggingMixin
login_manager = flask_login.LoginManager()
login_manager.login_view = 'airflow.login' # Calls login() below
login_manager.login_message = None
log = LoggingMixin().log
class AuthenticationError(Exception):
pass
class LdapException(Exception):
pass
def get_ldap_connection(dn=None, password=None):
tls_configuration = None
use_ssl = False
try:
cacert = configuration.get("ldap", "cacert")
tls_configuration = Tls(validate=ssl.CERT_REQUIRED, ca_certs_file=cacert)
use_ssl = True
except:
pass
server = Server(configuration.get("ldap", "uri"), use_ssl, tls_configuration)
conn = Connection(server, native(dn), native(password))
if not conn.bind():
log.error("Cannot bind to ldap server: %s ", conn.last_error)
raise AuthenticationError("Cannot bind to ldap server")
return conn
def group_contains_user(conn, search_base, group_filter, user_name_attr, username):
search_filter = '(&({0}))'.format(group_filter)
if not conn.search(native(search_base), native(search_filter),
attributes=[native(user_name_attr)]):
log.warning("Unable to find group for %s %s", search_base, search_filter)
else:
for entry in conn.entries:
if username in getattr(entry, user_name_attr).values:
return True
return False
def groups_user(conn, search_base, user_filter, user_name_att, username):
search_filter = "(&({0})({1}={2}))".format(user_filter, user_name_att, username)
try:
memberof_attr = configuration.get("ldap", "group_member_attr")
except:
memberof_attr = "memberOf"
res = conn.search(native(search_base), native(search_filter),
attributes=[native(memberof_attr)])
if not res:
log.info("Cannot find user %s", username)
raise AuthenticationError("Invalid username or password")
if conn.response and memberof_attr not in conn.response[0]["attributes"]:
log.warning("""Missing attribute "%s" when looked-up in Ldap database.
The user does not seem to be a member of a group and therefore won't see any dag
if the option filter_by_owner=True and owner_mode=ldapgroup are set""",
memberof_attr)
return []
user_groups = conn.response[0]["attributes"][memberof_attr]
regex = re.compile("cn=([^,]*).*", re.IGNORECASE)
groups_list = []
try:
groups_list = [regex.search(i).group(1) for i in user_groups]
except IndexError:
log.warning("Parsing error when retrieving the user's group(s)."
" Check if the user belongs to at least one group"
" or if the user's groups name do not contain special characters")
return groups_list
class LdapUser(models.User):
def __init__(self, user):
self.user = user
self.ldap_groups = []
# Load and cache superuser and data_profiler settings.
conn = get_ldap_connection(configuration.get("ldap", "bind_user"),
configuration.get("ldap", "bind_password"))
superuser_filter = None
data_profiler_filter = None
try:
superuser_filter = configuration.get("ldap", "superuser_filter")
except AirflowConfigException:
pass
if not superuser_filter:
self.superuser = True
log.debug("Missing configuration for superuser settings or empty. Skipping.")
else:
self.superuser = group_contains_user(conn,
configuration.get("ldap", "basedn"),
superuser_filter,
configuration.get("ldap",
"user_name_attr"),
user.username)
try:
data_profiler_filter = configuration.get("ldap", "data_profiler_filter")
except AirflowConfigException:
pass
if not data_profiler_filter:
self.data_profiler = True
log.debug("Missing configuration for data profiler settings or empty. "
"Skipping.")
else:
self.data_profiler = group_contains_user(conn,
configuration.get("ldap", "basedn"),
data_profiler_filter,
configuration.get("ldap",
"user_name_attr"),
user.username)
# Load the ldap group(s) a user belongs to
try:
self.ldap_groups = groups_user(conn,
configuration.get("ldap", "basedn"),
configuration.get("ldap", "user_filter"),
configuration.get("ldap", "user_name_attr"),
user.username)
except AirflowConfigException:
log.debug("Missing configuration for ldap settings. Skipping")
@staticmethod
def try_login(username, password):
conn = get_ldap_connection(configuration.get("ldap", "bind_user"),
configuration.get("ldap", "bind_password"))
search_filter = "(&({0})({1}={2}))".format(
configuration.get("ldap", "user_filter"),
configuration.get("ldap", "user_name_attr"),
username
)
search_scopes = {
"LEVEL": LEVEL,
"SUBTREE": SUBTREE,
"BASE": BASE
}
search_scope = LEVEL
if configuration.has_option("ldap", "search_scope"):
search_scope = SUBTREE if configuration.get("ldap", "search_scope") == "SUBTREE" else LEVEL
# todo: BASE or ONELEVEL?
res = conn.search(native(configuration.get("ldap", "basedn")),
native(search_filter),
search_scope=native(search_scope))
# todo: use list or result?
if not res:
log.info("Cannot find user %s", username)
raise AuthenticationError("Invalid username or password")
entry = conn.response[0]
conn.unbind()
if 'dn' not in entry:
# The search filter for the user did not return any values, so an
# invalid user was used for credentials.
raise AuthenticationError("Invalid username or password")
try:
conn = get_ldap_connection(entry['dn'], password)
except KeyError as e:
log.error("""
Unable to parse LDAP structure. If you're using Active Directory and not specifying an OU, you must set search_scope=SUBTREE in airflow.cfg.
%s
""" % traceback.format_exc())
raise LdapException("Could not parse LDAP structure. Try setting search_scope in airflow.cfg, or check logs")
if not conn:
log.info("Password incorrect for user %s", username)
raise AuthenticationError("Invalid username or password")
def is_active(self):
'''Required by flask_login'''
return True
def is_authenticated(self):
'''Required by flask_login'''
return True
def is_anonymous(self):
'''Required by flask_login'''
return False
def get_id(self):
'''Returns the current user id as required by flask_login'''
return self.user.get_id()
def data_profiling(self):
'''Provides access to data profiling tools'''
return self.data_profiler
def is_superuser(self):
'''Access all the things'''
return self.superuser
@login_manager.user_loader
@provide_session
def load_user(userid, session=None):
log.debug("Loading user %s", userid)
if not userid or userid == 'None':
return None
user = session.query(models.User).filter(models.User.id == int(userid)).first()
return LdapUser(user)
@provide_session
def login(self, request, session=None):
if current_user.is_authenticated():
flash("You are already logged in")
return redirect(url_for('admin.index'))
username = None
password = None
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
username = request.form.get("username")
password = request.form.get("password")
if not username or not password:
return self.render('airflow/login.html',
title="Airflow - Login",
form=form)
try:
LdapUser.try_login(username, password)
log.info("User %s successfully authenticated", username)
user = session.query(models.User).filter(
models.User.username == username).first()
if not user:
user = models.User(
username=username,
is_superuser=False)
session.merge(user)
session.commit()
flask_login.login_user(LdapUser(user))
session.commit()
return redirect(request.args.get("next") or url_for("admin.index"))
except (LdapException, AuthenticationError) as e:
if type(e) == LdapException:
flash(e, "error")
else:
flash("Incorrect login details")
return self.render('airflow/login.html',
title="Airflow - Login",
form=form)
class LoginForm(Form):
username = StringField('Username', [InputRequired()])
password = PasswordField('Password', [InputRequired()])
| |
# -*- coding: utf-8 -*-
"""
mostly functional tests of gateways.
"""
import os
import socket
import subprocess
import sys
from textwrap import dedent
import execnet
import py
import pytest
from execnet import gateway_base
from execnet import gateway_io
from test_serializer import _find_version
TESTTIMEOUT = 10.0 # seconds
needs_osdup = py.test.mark.skipif("not hasattr(os, 'dup')")
flakytest = pytest.mark.xfail(
reason="on some systems this test fails due to timing problems"
)
skip_win_pypy = pytest.mark.xfail(
condition=hasattr(sys, "pypy_version_info") and sys.platform.startswith("win"),
reason="failing on Windows on PyPy (#63)",
)
def fails(*args, **kwargs):
0 / 0
def test_deprecation(recwarn, monkeypatch):
execnet.PopenGateway().exit()
assert recwarn.pop(DeprecationWarning)
monkeypatch.setattr(socket, "socket", fails)
py.test.raises(Exception, execnet.SocketGateway, "localhost", 8811)
assert recwarn.pop(DeprecationWarning)
monkeypatch.setattr(subprocess, "Popen", fails)
py.test.raises(Exception, execnet.SshGateway, "not-existing")
assert recwarn.pop(DeprecationWarning)
class TestBasicGateway:
def test_correct_setup(self, gw):
assert gw.hasreceiver()
assert gw in gw._group
assert gw.id in gw._group
assert gw.spec
def test_repr_doesnt_crash(self, gw):
assert isinstance(repr(gw), str)
def test_attribute__name__(self, gw):
channel = gw.remote_exec("channel.send(__name__)")
name = channel.receive()
assert name == "__channelexec__"
def test_gateway_status_simple(self, gw):
status = gw.remote_status()
assert status.numexecuting == 0
def test_exc_info_is_clear_after_gateway_startup(self, gw):
ch = gw.remote_exec(
"""
import traceback, sys
excinfo = sys.exc_info()
if excinfo != (None, None, None):
r = traceback.format_exception(*excinfo)
else:
r = 0
channel.send(r)
"""
)
res = ch.receive()
if res != 0:
pytest.fail("remote raised\n%s" % res)
def test_gateway_status_no_real_channel(self, gw):
numchan = gw._channelfactory.channels()
gw.remote_status()
numchan2 = gw._channelfactory.channels()
# note that on CPython this can not really
# fail because refcounting leads to immediate
# closure of temporary channels
assert numchan2 == numchan
@flakytest
def test_gateway_status_busy(self, gw):
numchannels = gw.remote_status().numchannels
ch1 = gw.remote_exec("channel.send(1); channel.receive()")
ch2 = gw.remote_exec("channel.receive()")
ch1.receive()
status = gw.remote_status()
assert status.numexecuting == 2 # number of active execution threads
assert status.numchannels == numchannels + 2
ch1.send(None)
ch2.send(None)
ch1.waitclose()
ch2.waitclose()
for i in range(10):
status = gw.remote_status()
if status.numexecuting == 0:
break
else:
pytest.fail("did not get correct remote status")
# race condition
assert status.numchannels <= numchannels
def test_remote_exec_module(self, tmpdir, gw):
p = tmpdir.join("remotetest.py")
p.write("channel.send(1)")
mod = type(os)("remotetest")
mod.__file__ = str(p)
channel = gw.remote_exec(mod)
name = channel.receive()
assert name == 1
p.write("channel.send(2)")
channel = gw.remote_exec(mod)
name = channel.receive()
assert name == 2
def test_remote_exec_module_is_removed(self, gw, tmpdir, monkeypatch):
remotetest = tmpdir.join("remote.py")
remotetest.write(
dedent(
"""
def remote():
return True
if __name__ == '__channelexec__':
for item in channel: # noqa
channel.send(eval(item)) # noqa
"""
)
)
monkeypatch.syspath_prepend(tmpdir)
import remote
ch = gw.remote_exec(remote)
# simulate sending the code to a remote location that does not have
# access to the source
tmpdir.remove()
ch.send("remote()")
try:
result = ch.receive()
finally:
ch.close()
assert result is True
def test_remote_exec_module_with_traceback(self, gw, tmpdir, monkeypatch):
remotetest = tmpdir.join("remotetest.py")
remotetest.write(
dedent(
"""
def run_me(channel=None):
raise ValueError('me')
if __name__ == '__channelexec__':
run_me()
"""
)
)
monkeypatch.syspath_prepend(tmpdir)
import remotetest
ch = gw.remote_exec(remotetest)
try:
ch.receive()
except execnet.gateway_base.RemoteError as e:
assert 'remotetest.py", line 3, in run_me' in str(e)
assert "ValueError: me" in str(e)
finally:
ch.close()
ch = gw.remote_exec(remotetest.run_me)
try:
ch.receive()
except execnet.gateway_base.RemoteError as e:
assert 'remotetest.py", line 3, in run_me' in str(e)
assert "ValueError: me" in str(e)
finally:
ch.close()
def test_correct_setup_no_py(self, gw):
channel = gw.remote_exec(
"""
import sys
channel.send(list(sys.modules))
"""
)
remotemodules = channel.receive()
assert "py" not in remotemodules, "py should not be imported on remote side"
def test_remote_exec_waitclose(self, gw):
channel = gw.remote_exec("pass")
channel.waitclose(TESTTIMEOUT)
def test_remote_exec_waitclose_2(self, gw):
channel = gw.remote_exec("def gccycle(): pass")
channel.waitclose(TESTTIMEOUT)
def test_remote_exec_waitclose_noarg(self, gw):
channel = gw.remote_exec("pass")
channel.waitclose()
def test_remote_exec_error_after_close(self, gw):
channel = gw.remote_exec("pass")
channel.waitclose(TESTTIMEOUT)
py.test.raises(IOError, channel.send, 0)
def test_remote_exec_no_explicit_close(self, gw):
channel = gw.remote_exec("channel.close()")
with pytest.raises(channel.RemoteError) as excinfo:
channel.waitclose(TESTTIMEOUT)
assert "explicit" in excinfo.value.formatted
def test_remote_exec_channel_anonymous(self, gw):
channel = gw.remote_exec(
"""
obj = channel.receive()
channel.send(obj)
"""
)
channel.send(42)
result = channel.receive()
assert result == 42
@needs_osdup
def test_confusion_from_os_write_stdout(self, gw):
channel = gw.remote_exec(
"""
import os
os.write(1, 'confusion!'.encode('ascii'))
channel.send(channel.receive() * 6)
channel.send(channel.receive() * 6)
"""
)
channel.send(3)
res = channel.receive()
assert res == 18
channel.send(7)
res = channel.receive()
assert res == 42
@needs_osdup
def test_confusion_from_os_write_stderr(self, gw):
channel = gw.remote_exec(
"""
import os
os.write(2, 'test'.encode('ascii'))
channel.send(channel.receive() * 6)
channel.send(channel.receive() * 6)
"""
)
channel.send(3)
res = channel.receive()
assert res == 18
channel.send(7)
res = channel.receive()
assert res == 42
def test__rinfo(self, gw):
rinfo = gw._rinfo()
assert rinfo.executable
assert rinfo.cwd
assert rinfo.version_info
assert repr(rinfo)
old = gw.remote_exec(
"""
import os.path
cwd = os.getcwd()
channel.send(os.path.basename(cwd))
os.chdir('..')
"""
).receive()
try:
rinfo2 = gw._rinfo()
assert rinfo2.cwd == rinfo.cwd
rinfo3 = gw._rinfo(update=True)
assert rinfo3.cwd != rinfo2.cwd
finally:
gw._cache_rinfo = rinfo
gw.remote_exec("import os ; os.chdir(%r)" % old).waitclose()
class TestPopenGateway:
gwtype = "popen"
def test_chdir_separation(self, tmpdir, makegateway):
old = tmpdir.chdir()
try:
gw = makegateway("popen")
finally:
waschangedir = old.chdir()
c = gw.remote_exec("import os ; channel.send(os.getcwd())")
x = c.receive()
assert x.lower() == str(waschangedir).lower()
def test_remoteerror_readable_traceback(self, gw):
with pytest.raises(gateway_base.RemoteError) as e:
gw.remote_exec("x y").waitclose()
assert "gateway_base" in e.value.formatted
def test_many_popen(self, makegateway):
num = 4
l = []
for i in range(num):
l.append(makegateway("popen"))
channels = []
for gw in l:
channel = gw.remote_exec("""channel.send(42)""")
channels.append(channel)
while channels:
channel = channels.pop()
ret = channel.receive()
assert ret == 42
def test_rinfo_popen(self, gw):
rinfo = gw._rinfo()
assert rinfo.executable == sys.executable
assert rinfo.cwd == os.getcwd()
assert rinfo.version_info == sys.version_info
def test_waitclose_on_remote_killed(self, makegateway):
gw = makegateway("popen")
channel = gw.remote_exec(
"""
import os
import time
channel.send(os.getpid())
time.sleep(100)
"""
)
remotepid = channel.receive()
py.process.kill(remotepid)
with pytest.raises(EOFError):
channel.waitclose(TESTTIMEOUT)
with pytest.raises(IOError):
channel.send(None)
with pytest.raises(EOFError):
channel.receive()
def test_receive_on_remote_sysexit(self, gw):
channel = gw.remote_exec(
"""
raise SystemExit()
"""
)
py.test.raises(channel.RemoteError, channel.receive)
def test_dont_write_bytecode(self, makegateway):
check_sys_dont_write_bytecode = """
import sys
channel.send(sys.dont_write_bytecode)
"""
gw = makegateway("popen")
channel = gw.remote_exec(check_sys_dont_write_bytecode)
ret = channel.receive()
assert not ret
gw = makegateway("popen//dont_write_bytecode")
channel = gw.remote_exec(check_sys_dont_write_bytecode)
ret = channel.receive()
assert ret
@py.test.mark.skipif("config.option.broken_isp")
def test_socket_gw_host_not_found(gw, makegateway):
py.test.raises(execnet.HostNotFound, lambda: makegateway("socket=qwepoipqwe:9000"))
class TestSshPopenGateway:
gwtype = "ssh"
def test_sshconfig_config_parsing(self, monkeypatch, makegateway):
l = []
monkeypatch.setattr(
gateway_io, "Popen2IOMaster", lambda *args, **kwargs: l.append(args[0])
)
py.test.raises(AttributeError, lambda: makegateway("ssh=xyz//ssh_config=qwe"))
assert len(l) == 1
popen_args = l[0]
i = popen_args.index("-F")
assert popen_args[i + 1] == "qwe"
def test_sshaddress(self, gw, specssh):
assert gw.remoteaddress == specssh.ssh
def test_host_not_found(self, gw, makegateway):
py.test.raises(
execnet.HostNotFound, lambda: makegateway("ssh=nowhere.codespeak.net")
)
class TestThreads:
def test_threads(self, makegateway):
gw = makegateway("popen")
gw.remote_init_threads(3)
c1 = gw.remote_exec("channel.send(channel.receive())")
c2 = gw.remote_exec("channel.send(channel.receive())")
c2.send(1)
res = c2.receive()
assert res == 1
c1.send(42)
res = c1.receive()
assert res == 42
def test_threads_race_sending(self, makegateway):
# multiple threads sending data in parallel
gw = makegateway("popen")
num = 5
gw.remote_init_threads(num)
print("remote_init_threads(%d)" % num)
channels = []
for x in range(num):
ch = gw.remote_exec(
"""
for x in range(10):
channel.send(''*1000)
channel.receive()
"""
)
channels.append(ch)
for ch in channels:
for x in range(10):
ch.receive(TESTTIMEOUT)
ch.send(1)
for ch in channels:
ch.waitclose(TESTTIMEOUT)
@flakytest
def test_status_with_threads(self, makegateway):
gw = makegateway("popen")
c1 = gw.remote_exec("channel.send(1) ; channel.receive()")
c2 = gw.remote_exec("channel.send(2) ; channel.receive()")
c1.receive()
c2.receive()
rstatus = gw.remote_status()
assert rstatus.numexecuting == 2
c1.send(1)
c2.send(1)
c1.waitclose()
c2.waitclose()
# there is a slight chance that an execution thread
# is still active although it's accompanying channel
# is already closed.
for i in range(10):
rstatus = gw.remote_status()
if rstatus.numexecuting == 0:
return
assert 0, "numexecuting didn't drop to zero"
class TestTracing:
def test_popen_filetracing(self, testdir, monkeypatch, makegateway):
tmpdir = testdir.tmpdir
monkeypatch.setenv("TMP", str(tmpdir))
monkeypatch.setenv("TEMP", str(tmpdir)) # windows
monkeypatch.setenv("EXECNET_DEBUG", "1")
gw = makegateway("popen")
# hack out the debuffilename
fn = gw.remote_exec(
"import execnet;channel.send(execnet.gateway_base.fn)"
).receive()
workerfile = py.path.local(fn)
assert workerfile.check()
worker_line = "creating workergateway"
for line in workerfile.readlines():
if worker_line in line:
break
else:
py.test.fail("did not find {!r} in tracefile".format(worker_line))
gw.exit()
@skip_win_pypy
@flakytest
def test_popen_stderr_tracing(self, capfd, monkeypatch, makegateway):
monkeypatch.setenv("EXECNET_DEBUG", "2")
gw = makegateway("popen")
pid = gw.remote_exec("import os ; channel.send(os.getpid())").receive()
out, err = capfd.readouterr()
worker_line = "[%s] creating workergateway" % pid
assert worker_line in err
gw.exit()
def test_no_tracing_by_default(self):
assert (
gateway_base.trace == gateway_base.notrace
), "trace does not to default to empty tracing"
class TestStringCoerce:
@py.test.mark.skipif('sys.version>="3.0"')
def test_2to3(self, makegateway):
python = _find_version("3")
gw = makegateway("popen//python=%s" % python)
ch = gw.remote_exec("channel.send(channel.receive())")
ch.send("a")
res = ch.receive()
assert isinstance(res, unicode)
gw.reconfigure(py3str_as_py2str=True)
ch = gw.remote_exec("channel.send(channel.receive())")
ch.send("a")
res = ch.receive()
assert isinstance(res, str)
gw.exit()
@py.test.mark.skipif('sys.version<"3.0"')
def test_3to2(self, makegateway):
python = _find_version("2")
gw = makegateway("popen//python=%s" % python)
ch = gw.remote_exec("channel.send(channel.receive())")
ch.send(bytes("a", "ascii"))
res = ch.receive()
assert isinstance(res, str)
gw.reconfigure(py3str_as_py2str=True, py2str_as_py3str=False)
ch = gw.remote_exec("channel.send(channel.receive())")
ch.send("a")
res = ch.receive()
assert isinstance(res, bytes)
gw.exit()
@pytest.mark.parametrize(
"spec, expected_args",
[
("popen//python=python", ["python"]),
("popen//python=sudo -u test python", ["sudo", "-u", "test", "python"]),
pytest.param(
r"popen//python=/hans\ alt/bin/python",
["/hans alt/bin/python"],
marks=pytest.mark.skipif(
sys.platform.startswith("win"), reason="invalid spec on Windows"
),
),
('popen//python="/u/test me/python" -e', ["/u/test me/python", "-e"]),
],
)
def test_popen_args(spec, expected_args):
expected_args = expected_args + ["-u", "-c", gateway_io.popen_bootstrapline]
args = gateway_io.popen_args(execnet.XSpec(spec))
assert args == expected_args
| |
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory, CertifiedAwardFinancialFactory
from tests.unit.dataactcore.factories.job import SubmissionFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns, populate_publish_status
from dataactcore.models.lookups import PUBLISH_STATUS_DICT
_FILE = 'c27_award_financial'
def test_column_headers(database):
expected_subset = {'row_number', 'tas', 'disaster_emergency_fund_code', 'program_activity_code',
'program_activity_name', 'object_class', 'by_direct_reimbursable_fun', 'fain', 'uri', 'piid',
'parent_award_id', 'gross_outlay_amount_by_awa_cpe', 'uniqueid_TAS',
'uniqueid_DisasterEmergencyFundCode', 'uniqueid_ProgramActivityCode',
'uniqueid_ProgramActivityCode', 'uniqueid_ProgramActivityName', 'uniqueid_ObjectClass',
'uniqueid_ByDirectReimbursableFundingSource', 'uniqueid_FAIN', 'uniqueid_URI', 'uniqueid_PIID',
'uniqueid_ParentAwardId'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
"""
Test File C GrossOutlayAmountByAward_CPE balance for a TAS, DEFC, program activity code + name, object class
code, direct/reimbursable flag, and Award ID combination should continue to be reported in subsequent periods
during the FY, once it has been submitted to DATA Act, unless the most recently reported outlay balance for this
award breakdown was zero. This only applies to File C outlays, not TOA.
"""
populate_publish_status(database)
# Base submission
sub_1 = SubmissionFactory(submission_id=1, cgac_code='test', reporting_fiscal_year=2020, reporting_fiscal_period=3,
frec_code=None, publish_status_id=PUBLISH_STATUS_DICT['published'], d2_submission=False)
caf_fain = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='aBcD', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='N',
program_activity_code=None, program_activity_name=None,
object_class=None, by_direct_reimbursable_fun=None,
gross_outlay_amount_by_awa_cpe=5)
caf_uri = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain=None, uri='eFgH',
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None,
object_class=None, by_direct_reimbursable_fun=None,
gross_outlay_amount_by_awa_cpe=5)
caf_piid = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain=None, uri=None,
piid='iJkL', parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None,
object_class=None, by_direct_reimbursable_fun=None,
gross_outlay_amount_by_awa_cpe=5)
caf_paid = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_TAS', fain=None, uri=None,
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='N',
program_activity_code=None, program_activity_name=None,
object_class=None, by_direct_reimbursable_fun=None,
gross_outlay_amount_by_awa_cpe=5)
caf_zero = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='xYz', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None,
object_class=None, by_direct_reimbursable_fun=None,
gross_outlay_amount_by_awa_cpe=0)
caf_null = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='xyZ', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None,
object_class=None, by_direct_reimbursable_fun=None,
gross_outlay_amount_by_awa_cpe=None)
caf_tas = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='different_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
caf_all_9 = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='aBcD',
uri='eFgH', piid='mNoP', parent_award_id='qRsT',
disaster_emergency_fund_code='9', program_activity_code='c',
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
caf_pac = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code='c',
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
caf_pan = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name='n', object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
caf_obj = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name=None, object_class='c',
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
caf_dr = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun='r', gross_outlay_amount_by_awa_cpe=5)
database.session.add_all([sub_1, caf_fain, caf_uri, caf_piid, caf_paid, caf_zero, caf_null, caf_tas, caf_all_9,
caf_pac, caf_pan, caf_obj, caf_dr])
database.session.commit()
# quarterly submission with each of the previous values (one of them is 0 now)
sub_q = SubmissionFactory(submission_id=2, reporting_fiscal_year=2020, reporting_fiscal_period=6, cgac_code='test',
frec_code=None, is_quarter_format=True, d2_submission=False)
af_fain = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='abcd', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=0)
af_uri = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain=None, uri='efgh', piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=3)
af_piid = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain=None, uri=None, piid='ijkl',
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=7)
af_paid = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain=None, uri=None, piid='mnop',
parent_award_id='qrst', disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=2)
af_zero = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=6)
af_null = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=3)
af_tas = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='different_tas', fain='hijk', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=2)
af_9_match = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='aBcD', uri='eFgH',
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
af_pac = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code='c',
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=1)
af_pan = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name='n', object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=2)
af_obj = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name=None, object_class='c',
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=3)
af_dr = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun='r', gross_outlay_amount_by_awa_cpe=4)
# Additional line doesn't mess anything up
af_bonus = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='something_different')
errors = number_of_errors(_FILE, database, models=[af_fain, af_uri, af_piid, af_paid, af_zero, af_null, af_tas,
af_9_match, af_pac, af_pan, af_obj, af_dr, af_bonus],
submission=sub_q)
assert errors == 0
# period submission with each of the previous values
sub_p = SubmissionFactory(submission_id=3, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=True, d2_submission=False)
af_fain = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='abcd', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=9)
af_uri = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain=None, uri='efgh', piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=3)
af_piid = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain=None, uri=None, piid='ijkl',
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=7)
af_paid = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain=None, uri=None, piid='mnop',
parent_award_id='qrst', disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=2)
af_zero = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=6)
af_null = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=3)
af_tas = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='different_tas', fain='hijk', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=2)
# matches the DEFC of 9 with a different DEFC
af_9_match = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='aBcD', uri='eFgH',
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
af_pac = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code='c',
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=1)
af_pan = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name='n', object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=2)
af_obj = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name=None, object_class='c',
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=3)
af_dr = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun='r', gross_outlay_amount_by_awa_cpe=4)
# Additional line doesn't mess anything up
af_bonus = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='something_different')
errors = number_of_errors(_FILE, database, models=[af_fain, af_uri, af_piid, af_paid, af_zero, af_null, af_tas,
af_9_match, af_pac, af_pan, af_obj, af_dr, af_bonus],
submission=sub_p)
assert errors == 0
# submission missing the values that were 0 and NULL the previous quarter does not throw errors
sub_4 = SubmissionFactory(submission_id=4, reporting_fiscal_year=2020, reporting_fiscal_period=6, cgac_code='test',
frec_code=None, is_quarter_format=True, d2_submission=False)
af_fain = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='abcd', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=9)
af_uri = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain=None, uri='efgh', piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=3)
af_piid = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain=None, uri=None, piid='ijkl',
parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=7)
af_paid = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain=None, uri=None, piid='mnop',
parent_award_id='qrst', disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=2)
af_tas = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='different_tas', fain='hijk', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=2)
af_9_match = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='aBcD', uri='eFgH',
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
af_pac = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code='c',
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=1)
af_pan = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name='n', object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=2)
af_obj = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name=None, object_class='c',
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=3)
af_dr = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', program_activity_code=None,
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun='r', gross_outlay_amount_by_awa_cpe=4)
errors = number_of_errors(_FILE, database, models=[af_fain, af_uri, af_piid, af_paid, af_tas, af_pac, af_pan,
af_obj, af_dr, af_9_match],
submission=sub_4)
assert errors == 0
# submission that doesn't have a "previous period"
sub_5 = SubmissionFactory(submission_id=5, reporting_fiscal_year=2020, reporting_fiscal_period=5, cgac_code='test',
frec_code=None, is_quarter_format=True)
errors = number_of_errors(_FILE, database, models=[], submission=sub_5)
assert errors == 0
def test_failure(database):
"""
Test fail File C GrossOutlayAmountByAward_CPE balance for a TAS, DEFC, program activity code + name, object
class code, direct/reimbursable flag, and Award ID combination should continue to be reported in subsequent
periods during the FY, once it has been submitted to DATA Act, unless the most recently reported outlay balance
for this award breakdown was zero. This only applies to File C outlays, not TOA.
"""
populate_publish_status(database)
# Base submission
sub_1 = SubmissionFactory(submission_id=1, cgac_code='test', reporting_fiscal_year=2020, reporting_fiscal_period=3,
frec_code=None, publish_status_id=PUBLISH_STATUS_DICT['published'], d2_submission=False)
caf_fain = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='N',
program_activity_code=None, program_activity_name=None,
object_class=None, by_direct_reimbursable_fun=None,
gross_outlay_amount_by_awa_cpe=5)
caf_defc = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id='testingHere',
disaster_emergency_fund_code='O', program_activity_code=None,
program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
caf_defc_9 = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='abcd',
uri=None, piid=None, parent_award_id='testingHere',
disaster_emergency_fund_code='9', program_activity_code=None,
program_activity_name=None, object_class=None,
gross_outlay_amount_by_awa_cpe=5)
database.session.add_all([sub_1, caf_fain, caf_defc, caf_defc_9])
database.session.commit()
# submission missing previous period value, missing value of 9 still registers an error
sub_2 = SubmissionFactory(submission_id=2, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=False, d2_submission=False)
errors = number_of_errors(_FILE, database, models=[], submission=sub_2)
assert errors == 3
# submission with a row that has similar but not exact values (has a uri when the original didn't)
sub_3 = SubmissionFactory(submission_id=3, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=False, d2_submission=False)
af_other = AwardFinancialFactory(submission_id=sub_3.submission_id, tas='test_tas', fain='abcd', uri='efgh',
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
af_defc = AwardFinancialFactory(submission_id=sub_3.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id='testingHere', disaster_emergency_fund_code='O',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
af_defc_9 = AwardFinancialFactory(submission_id=sub_3.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id='testingHere', disaster_emergency_fund_code='9',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
errors = number_of_errors(_FILE, database, models=[af_other, af_defc, af_defc_9], submission=sub_3)
assert errors == 2
# submission with a row that matches but has gross outlay of NULL
sub_4 = SubmissionFactory(submission_id=4, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=False, d2_submission=False)
af_null = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=None)
af_defc_9 = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id='testingHere', disaster_emergency_fund_code='n',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
af_defc = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id='testingHere', disaster_emergency_fund_code='o',
program_activity_code=None, program_activity_name=None, object_class=None,
by_direct_reimbursable_fun=None, gross_outlay_amount_by_awa_cpe=5)
errors = number_of_errors(_FILE, database, models=[af_null, af_defc, af_defc_9], submission=sub_4)
assert errors == 2
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reads Summaries from and writes Summaries to event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import threading
import time
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.lib.io import tf_record
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
class SummaryWriter(object):
"""Writes `Summary` protocol buffers to event files.
The `SummaryWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
@@__init__
@@add_summary
@@add_event
@@add_graph
@@flush
@@close
"""
def __init__(self, logdir, graph_def=None, max_queue=10, flush_secs=120):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_event()`, or
`add_graph()`.
If you pass a `graph_def` protocol buffer to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph_def' to the event file.
writer = tf.train.SummaryWriter(<some-directory>, sess.graph_def)
```
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
graph_def: A `GraphDef` protocol buffer.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
"""
self._logdir = logdir
if not gfile.IsDirectory(self._logdir):
gfile.MakeDirs(self._logdir)
self._event_queue = six.moves.queue.Queue(max_queue)
self._ev_writer = pywrap_tensorflow.EventsWriter(
compat.as_bytes(os.path.join(self._logdir, "events")))
self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
flush_secs)
self._worker.start()
if graph_def is not None:
self.add_graph(graph_def)
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
[`Session.run()`](client.md#Session.run] or
[`Tensor.eval()`](framework.md#Tensor.eval), to this
function. Alternatively, you can pass a `tf.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
event = event_pb2.Event(wall_time=time.time(), summary=summary)
if global_step is not None:
event.step = int(global_step)
self.add_event(event)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
self._event_queue.put(event)
def add_graph(self, graph_def, global_step=None):
"""Adds a `GraphDef` protocol buffer to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph_def: A `GraphDef` protocol buffer.
global_step: Number. Optional global step counter to record with the
graph.
"""
event = event_pb2.Event(wall_time=time.time(), graph_def=graph_def)
if global_step is not None:
event.step = int(global_step)
self._event_queue.put(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._event_queue.join()
self._ev_writer.Flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.flush()
self._ev_writer.Close()
class _EventLoggerThread(threading.Thread):
"""Thread that logs events."""
def __init__(self, queue, ev_writer, flush_secs):
"""Creates an _EventLoggerThread.
Args:
queue: A Queue from which to dequeue events.
ev_writer: An event writer. Used to log brain events for
the visualizer.
flush_secs: How often, in seconds, to flush the
pending file to disk.
"""
threading.Thread.__init__(self)
self.daemon = True
self._queue = queue
self._ev_writer = ev_writer
self._flush_secs = flush_secs
# The first event will be flushed immediately.
self._next_event_flush_time = 0
def run(self):
while True:
event = self._queue.get()
try:
self._ev_writer.WriteEvent(event)
# Flush the event writer every so often.
now = time.time()
if now > self._next_event_flush_time:
self._ev_writer.Flush()
# Do it again in two minutes.
self._next_event_flush_time = now + self._flush_secs
finally:
self._queue.task_done()
def summary_iterator(path):
"""An iterator for reading `Event` protocol buffers from an event file.
You can use this function to read events written to an event file. It returns
a Python iterator that yields `Event` protocol buffers.
Example: Print the contents of an events file.
```python
for e in tf.summary_iterator(path to events file):
print(e)
```
Example: Print selected summary values.
```python
# This example supposes that the events file contains summaries with a
# summary value tag 'loss'. These could have been added by calling
# `add_summary()`, passing the output of a scalar summary op created with
# with: `tf.scalar_summary(['loss'], loss_tensor)`.
for e in tf.summary_iterator(path to events file):
for v in e.summary.value:
if v.tag == 'loss':
print(v.simple_value)
```
See the protocol buffer definitions of
[Event](https://www.tensorflow.org/code/tensorflow/core/util/event.proto)
and
[Summary](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
for more information about their attributes.
Args:
path: The path to an event file created by a `SummaryWriter`.
Yields:
`Event` protocol buffers.
"""
for r in tf_record.tf_record_iterator(path):
yield event_pb2.Event.FromString(r)
| |
"""Allow to set up simple automation rules via the config file."""
import asyncio
from functools import partial
import importlib
import logging
import voluptuous as vol
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.core import CoreState, Context
from homeassistant.loader import bind_hass
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_PLATFORM, STATE_ON, SERVICE_TURN_ON, SERVICE_TURN_OFF,
SERVICE_TOGGLE, SERVICE_RELOAD, EVENT_HOMEASSISTANT_START, CONF_ID,
EVENT_AUTOMATION_TRIGGERED, ATTR_NAME)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import extract_domain_configs, script, condition
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.util.dt import utcnow
import homeassistant.helpers.config_validation as cv
DOMAIN = 'automation'
DEPENDENCIES = ['group']
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GROUP_NAME_ALL_AUTOMATIONS = 'all automations'
CONF_ALIAS = 'alias'
CONF_HIDE_ENTITY = 'hide_entity'
CONF_CONDITION = 'condition'
CONF_ACTION = 'action'
CONF_TRIGGER = 'trigger'
CONF_CONDITION_TYPE = 'condition_type'
CONF_INITIAL_STATE = 'initial_state'
CONDITION_USE_TRIGGER_VALUES = 'use_trigger_values'
CONDITION_TYPE_AND = 'and'
CONDITION_TYPE_OR = 'or'
DEFAULT_CONDITION_TYPE = CONDITION_TYPE_AND
DEFAULT_HIDE_ENTITY = False
DEFAULT_INITIAL_STATE = True
ATTR_LAST_TRIGGERED = 'last_triggered'
ATTR_VARIABLES = 'variables'
SERVICE_TRIGGER = 'trigger'
_LOGGER = logging.getLogger(__name__)
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module(
'homeassistant.components.automation.{}'.format(
config[CONF_PLATFORM]))
except ImportError:
raise vol.Invalid('Invalid platform specified') from None
return platform.TRIGGER_SCHEMA(config)
_TRIGGER_SCHEMA = vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema({
vol.Required(CONF_PLATFORM): str
}, extra=vol.ALLOW_EXTRA),
_platform_validator
),
]
)
_CONDITION_SCHEMA = vol.All(cv.ensure_list, [cv.CONDITION_SCHEMA])
PLATFORM_SCHEMA = vol.Schema({
# str on purpose
CONF_ID: str,
CONF_ALIAS: cv.string,
vol.Optional(CONF_INITIAL_STATE): cv.boolean,
vol.Optional(CONF_HIDE_ENTITY, default=DEFAULT_HIDE_ENTITY): cv.boolean,
vol.Required(CONF_TRIGGER): _TRIGGER_SCHEMA,
vol.Optional(CONF_CONDITION): _CONDITION_SCHEMA,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
})
SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
})
TRIGGER_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_VARIABLES, default={}): dict,
})
RELOAD_SERVICE_SCHEMA = vol.Schema({})
@bind_hass
def is_on(hass, entity_id):
"""
Return true if specified automation entity_id is on.
Async friendly.
"""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Set up the automation."""
component = EntityComponent(_LOGGER, DOMAIN, hass,
group_name=GROUP_NAME_ALL_AUTOMATIONS)
await _async_process_config(hass, config, component)
async def trigger_service_handler(service_call):
"""Handle automation triggers."""
tasks = []
for entity in await component.async_extract_from_service(service_call):
tasks.append(entity.async_trigger(
service_call.data.get(ATTR_VARIABLES),
skip_condition=True,
context=service_call.context))
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
async def turn_onoff_service_handler(service_call):
"""Handle automation turn on/off service calls."""
tasks = []
method = 'async_{}'.format(service_call.service)
for entity in await component.async_extract_from_service(service_call):
tasks.append(getattr(entity, method)())
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
async def toggle_service_handler(service_call):
"""Handle automation toggle service calls."""
tasks = []
for entity in await component.async_extract_from_service(service_call):
if entity.is_on:
tasks.append(entity.async_turn_off())
else:
tasks.append(entity.async_turn_on())
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
async def reload_service_handler(service_call):
"""Remove all automations and load new ones from config."""
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
hass.services.async_register(
DOMAIN, SERVICE_TRIGGER, trigger_service_handler,
schema=TRIGGER_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, toggle_service_handler,
schema=SERVICE_SCHEMA)
for service in (SERVICE_TURN_ON, SERVICE_TURN_OFF):
hass.services.async_register(
DOMAIN, service, turn_onoff_service_handler,
schema=SERVICE_SCHEMA)
return True
class AutomationEntity(ToggleEntity, RestoreEntity):
"""Entity to show status of entity."""
def __init__(self, automation_id, name, async_attach_triggers, cond_func,
async_action, hidden, initial_state):
"""Initialize an automation entity."""
self._id = automation_id
self._name = name
self._async_attach_triggers = async_attach_triggers
self._async_detach_triggers = None
self._cond_func = cond_func
self._async_action = async_action
self._last_triggered = None
self._hidden = hidden
self._initial_state = initial_state
@property
def name(self):
"""Name of the automation."""
return self._name
@property
def should_poll(self):
"""No polling needed for automation entities."""
return False
@property
def state_attributes(self):
"""Return the entity state attributes."""
return {
ATTR_LAST_TRIGGERED: self._last_triggered
}
@property
def hidden(self) -> bool:
"""Return True if the automation entity should be hidden from UIs."""
return self._hidden
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._async_detach_triggers is not None
async def async_added_to_hass(self) -> None:
"""Startup with initial state or previous state."""
await super().async_added_to_hass()
if self._initial_state is not None:
enable_automation = self._initial_state
_LOGGER.debug("Automation %s initial state %s from config "
"initial_state", self.entity_id, enable_automation)
else:
state = await self.async_get_last_state()
if state:
enable_automation = state.state == STATE_ON
self._last_triggered = state.attributes.get('last_triggered')
_LOGGER.debug("Automation %s initial state %s from recorder "
"last state %s", self.entity_id,
enable_automation, state)
else:
enable_automation = DEFAULT_INITIAL_STATE
_LOGGER.debug("Automation %s initial state %s from default "
"initial state", self.entity_id,
enable_automation)
if not enable_automation:
return
# HomeAssistant is starting up
if self.hass.state == CoreState.not_running:
async def async_enable_automation(event):
"""Start automation on startup."""
await self.async_enable()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_enable_automation)
# HomeAssistant is running
else:
await self.async_enable()
async def async_turn_on(self, **kwargs) -> None:
"""Turn the entity on and update the state."""
if self.is_on:
return
await self.async_enable()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
if not self.is_on:
return
self._async_detach_triggers()
self._async_detach_triggers = None
await self.async_update_ha_state()
async def async_trigger(self, variables, skip_condition=False,
context=None):
"""Trigger automation.
This method is a coroutine.
"""
if not skip_condition and not self._cond_func(variables):
return
# Create a new context referring to the old context.
parent_id = None if context is None else context.id
trigger_context = Context(parent_id=parent_id)
self.async_set_context(trigger_context)
self.hass.bus.async_fire(EVENT_AUTOMATION_TRIGGERED, {
ATTR_NAME: self._name,
ATTR_ENTITY_ID: self.entity_id,
}, context=trigger_context)
await self._async_action(self.entity_id, variables, trigger_context)
self._last_triggered = utcnow()
await self.async_update_ha_state()
async def async_will_remove_from_hass(self):
"""Remove listeners when removing automation from HASS."""
await super().async_will_remove_from_hass()
await self.async_turn_off()
async def async_enable(self):
"""Enable this automation entity.
This method is a coroutine.
"""
if self.is_on:
return
self._async_detach_triggers = await self._async_attach_triggers(
self.async_trigger)
await self.async_update_ha_state()
@property
def device_state_attributes(self):
"""Return automation attributes."""
if self._id is None:
return None
return {
CONF_ID: self._id
}
async def _async_process_config(hass, config, component):
"""Process config and add automations.
This method is a coroutine.
"""
entities = []
for config_key in extract_domain_configs(config, DOMAIN):
conf = config[config_key]
for list_no, config_block in enumerate(conf):
automation_id = config_block.get(CONF_ID)
name = config_block.get(CONF_ALIAS) or "{} {}".format(config_key,
list_no)
hidden = config_block[CONF_HIDE_ENTITY]
initial_state = config_block.get(CONF_INITIAL_STATE)
action = _async_get_action(hass, config_block.get(CONF_ACTION, {}),
name)
if CONF_CONDITION in config_block:
cond_func = _async_process_if(hass, config, config_block)
if cond_func is None:
continue
else:
def cond_func(variables):
"""Condition will always pass."""
return True
async_attach_triggers = partial(
_async_process_trigger, hass, config,
config_block.get(CONF_TRIGGER, []), name
)
entity = AutomationEntity(
automation_id, name, async_attach_triggers, cond_func, action,
hidden, initial_state)
entities.append(entity)
if entities:
await component.async_add_entities(entities)
def _async_get_action(hass, config, name):
"""Return an action based on a configuration."""
script_obj = script.Script(hass, config, name)
async def action(entity_id, variables, context):
"""Execute an action."""
_LOGGER.info('Executing %s', name)
try:
await script_obj.async_run(variables, context)
except Exception as err: # pylint: disable=broad-except
script_obj.async_log_exception(
_LOGGER,
'Error while executing automation {}'.format(entity_id), err)
return action
def _async_process_if(hass, config, p_config):
"""Process if checks."""
if_configs = p_config.get(CONF_CONDITION)
checks = []
for if_config in if_configs:
try:
checks.append(condition.async_from_config(if_config, False))
except HomeAssistantError as ex:
_LOGGER.warning('Invalid condition: %s', ex)
return None
def if_action(variables=None):
"""AND all conditions."""
return all(check(hass, variables) for check in checks)
return if_action
async def _async_process_trigger(hass, config, trigger_configs, name, action):
"""Set up the triggers.
This method is a coroutine.
"""
removes = []
info = {
'name': name
}
for conf in trigger_configs:
platform = await async_prepare_setup_platform(
hass, config, DOMAIN, conf.get(CONF_PLATFORM))
if platform is None:
return None
remove = await platform.async_trigger(hass, conf, action, info)
if not remove:
_LOGGER.error("Error setting up trigger %s", name)
continue
_LOGGER.info("Initialized trigger %s", name)
removes.append(remove)
if not removes:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in removes:
remove()
return remove_triggers
| |
"""
Useful form fields for use with SQLAlchemy ORM.
"""
import operator
from wtforms import widgets
from wtforms.fields import SelectFieldBase
from wtforms.validators import ValidationError
from .tools import get_primary_key
from flask.ext.admin._compat import text_type, string_types
from flask.ext.admin.model.fields import InlineFieldList, InlineModelFormField
try:
from sqlalchemy.orm.util import identity_key
has_identity_key = True
except ImportError:
has_identity_key = False
__all__ = (
'QuerySelectField', 'QuerySelectMultipleField',
)
class QuerySelectField(SelectFieldBase):
"""
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, query_factory=None,
get_pk=None, get_label=None, allow_blank=False,
blank_text=u'', **kwargs):
super(QuerySelectField, self).__init__(label, validators, **kwargs)
self.query_factory = query_factory
if get_pk is None:
if not has_identity_key:
raise Exception(u'The sqlalchemy identity_key function could not be imported.')
self.get_pk = get_pk_from_identity
else:
self.get_pk = get_pk
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self.query = None
self._object_list = None
def _get_data(self):
if self._formdata is not None:
for pk, obj in self._get_object_list():
if pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _get_object_list(self):
if self._object_list is None:
query = self.query or self.query_factory()
get_pk = self.get_pk
self._object_list = [(text_type(get_pk(obj)), obj) for obj in query]
return self._object_list
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == u'__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for pk, obj in self._get_object_list():
if self.data == obj:
break
else:
raise ValidationError(self.gettext(u'Not a valid choice'))
class QuerySelectMultipleField(QuerySelectField):
"""
Very similar to QuerySelectField with the difference that this will
display a multiple select. The data property will hold a list with ORM
model instances and will be an empty list when no value is selected.
If any of the items in the data list or submitted form data cannot be
found in the query, this will result in a validation error.
"""
widget = widgets.Select(multiple=True)
def __init__(self, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(QuerySelectMultipleField, self).__init__(label, validators, default=default, **kwargs)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata is not None:
data = []
for pk, obj in self._get_object_list():
if not formdata:
break
elif pk in formdata:
formdata.remove(pk)
data.append(obj)
if formdata:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj in self.data)
def process_formdata(self, valuelist):
self._formdata = set(valuelist)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext(u'Not a valid choice'))
elif self.data:
obj_list = list(x[1] for x in self._get_object_list())
for v in self.data:
if v not in obj_list:
raise ValidationError(self.gettext(u'Not a valid choice'))
class InlineModelFormList(InlineFieldList):
"""
TODO: Documentation
"""
def __init__(self, form, session, model, prop, **kwargs):
"""
Default constructor.
:param form:
Form for the related model
:param session:
SQLAlchemy session
:param model:
Related model
:param prop:
Related property name
"""
self.form = form
self.session = session
self.model = model
self.prop = prop
self._pk = get_primary_key(model)
super(InlineModelFormList, self).__init__(InlineModelFormField(form, self._pk), **kwargs)
def display_row_controls(self, field):
return field.get_pk() is not None
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
if values is None:
return
# Create primary key map
pk_map = dict((str(getattr(v, self._pk)), v) for v in values)
# Handle request data
for field in self.entries:
field_id = field.get_pk()
if field_id in pk_map:
model = pk_map[field_id]
if self.should_delete(field):
self.session.delete(model)
continue
else:
model = self.model()
values.append(model)
field.populate_obj(model, None)
def get_pk_from_identity(obj):
# TODO: Remove me
cls, key = identity_key(instance=obj)
return u':'.join(text_type(x) for x in key)
| |
"""Support for APCUPSd sensors."""
from __future__ import annotations
import logging
from apcaccess.status import ALL_UNITS
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.const import (
CONF_RESOURCES,
DEVICE_CLASS_TEMPERATURE,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
FREQUENCY_HERTZ,
PERCENTAGE,
POWER_VOLT_AMPERE,
POWER_WATT,
TEMP_CELSIUS,
TIME_MINUTES,
TIME_SECONDS,
)
import homeassistant.helpers.config_validation as cv
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
SENSOR_PREFIX = "UPS "
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="alarmdel",
name="Alarm Delay",
icon="mdi:alarm",
),
SensorEntityDescription(
key="ambtemp",
name="Ambient Temperature",
icon="mdi:thermometer",
),
SensorEntityDescription(
key="apc",
name="Status Data",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="apcmodel",
name="Model",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="badbatts",
name="Bad Batteries",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="battdate",
name="Battery Replaced",
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="battstat",
name="Battery Status",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="battv",
name="Battery Voltage",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:flash",
),
SensorEntityDescription(
key="bcharge",
name="Battery",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:battery",
),
SensorEntityDescription(
key="cable",
name="Cable Type",
icon="mdi:ethernet-cable",
),
SensorEntityDescription(
key="cumonbatt",
name="Total Time on Battery",
icon="mdi:timer-outline",
),
SensorEntityDescription(
key="date",
name="Status Date",
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="dipsw",
name="Dip Switch Settings",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="dlowbatt",
name="Low Battery Signal",
icon="mdi:clock-alert",
),
SensorEntityDescription(
key="driver",
name="Driver",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="dshutd",
name="Shutdown Delay",
icon="mdi:timer-outline",
),
SensorEntityDescription(
key="dwake",
name="Wake Delay",
icon="mdi:timer-outline",
),
SensorEntityDescription(
key="end apc",
name="Date and Time",
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="extbatts",
name="External Batteries",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="firmware",
name="Firmware Version",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="hitrans",
name="Transfer High",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:flash",
),
SensorEntityDescription(
key="hostname",
name="Hostname",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="humidity",
name="Ambient Humidity",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:water-percent",
),
SensorEntityDescription(
key="itemp",
name="Internal Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
),
SensorEntityDescription(
key="lastxfer",
name="Last Transfer",
icon="mdi:transfer",
),
SensorEntityDescription(
key="linefail",
name="Input Voltage Status",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="linefreq",
name="Line Frequency",
native_unit_of_measurement=FREQUENCY_HERTZ,
icon="mdi:information-outline",
),
SensorEntityDescription(
key="linev",
name="Input Voltage",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:flash",
),
SensorEntityDescription(
key="loadpct",
name="Load",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:gauge",
),
SensorEntityDescription(
key="loadapnt",
name="Load Apparent Power",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:gauge",
),
SensorEntityDescription(
key="lotrans",
name="Transfer Low",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:flash",
),
SensorEntityDescription(
key="mandate",
name="Manufacture Date",
icon="mdi:calendar",
),
SensorEntityDescription(
key="masterupd",
name="Master Update",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="maxlinev",
name="Input Voltage High",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:flash",
),
SensorEntityDescription(
key="maxtime",
name="Battery Timeout",
icon="mdi:timer-off-outline",
),
SensorEntityDescription(
key="mbattchg",
name="Battery Shutdown",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:battery-alert",
),
SensorEntityDescription(
key="minlinev",
name="Input Voltage Low",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:flash",
),
SensorEntityDescription(
key="mintimel",
name="Shutdown Time",
icon="mdi:timer-outline",
),
SensorEntityDescription(
key="model",
name="Model",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="nombattv",
name="Battery Nominal Voltage",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:flash",
),
SensorEntityDescription(
key="nominv",
name="Nominal Input Voltage",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:flash",
),
SensorEntityDescription(
key="nomoutv",
name="Nominal Output Voltage",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:flash",
),
SensorEntityDescription(
key="nompower",
name="Nominal Output Power",
native_unit_of_measurement=POWER_WATT,
icon="mdi:flash",
),
SensorEntityDescription(
key="nomapnt",
name="Nominal Apparent Power",
native_unit_of_measurement=POWER_VOLT_AMPERE,
icon="mdi:flash",
),
SensorEntityDescription(
key="numxfers",
name="Transfer Count",
icon="mdi:counter",
),
SensorEntityDescription(
key="outcurnt",
name="Output Current",
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
icon="mdi:flash",
),
SensorEntityDescription(
key="outputv",
name="Output Voltage",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:flash",
),
SensorEntityDescription(
key="reg1",
name="Register 1 Fault",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="reg2",
name="Register 2 Fault",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="reg3",
name="Register 3 Fault",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="retpct",
name="Restore Requirement",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:battery-alert",
),
SensorEntityDescription(
key="selftest",
name="Last Self Test",
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="sense",
name="Sensitivity",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="serialno",
name="Serial Number",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="starttime",
name="Startup Time",
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="statflag",
name="Status Flag",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="status",
name="Status",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="stesti",
name="Self Test Interval",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="timeleft",
name="Time Left",
icon="mdi:clock-alert",
),
SensorEntityDescription(
key="tonbatt",
name="Time on Battery",
icon="mdi:timer-outline",
),
SensorEntityDescription(
key="upsmode",
name="Mode",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="upsname",
name="Name",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="version",
name="Daemon Info",
icon="mdi:information-outline",
),
SensorEntityDescription(
key="xoffbat",
name="Transfer from Battery",
icon="mdi:transfer",
),
SensorEntityDescription(
key="xoffbatt",
name="Transfer from Battery",
icon="mdi:transfer",
),
SensorEntityDescription(
key="xonbatt",
name="Transfer to Battery",
icon="mdi:transfer",
),
)
SENSOR_KEYS: list[str] = [desc.key for desc in SENSOR_TYPES]
SPECIFIC_UNITS = {"ITEMP": TEMP_CELSIUS}
INFERRED_UNITS = {
" Minutes": TIME_MINUTES,
" Seconds": TIME_SECONDS,
" Percent": PERCENTAGE,
" Volts": ELECTRIC_POTENTIAL_VOLT,
" Ampere": ELECTRIC_CURRENT_AMPERE,
" Volt-Ampere": POWER_VOLT_AMPERE,
" Watts": POWER_WATT,
" Hz": FREQUENCY_HERTZ,
" C": TEMP_CELSIUS,
" Percent Load Capacity": PERCENTAGE,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCES, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_KEYS)]
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the APCUPSd sensors."""
apcups_data = hass.data[DOMAIN]
resources = config[CONF_RESOURCES]
for resource in resources:
if resource.upper() not in apcups_data.status:
_LOGGER.warning(
"Sensor type: %s does not appear in the APCUPSd status output",
resource,
)
entities = [
APCUPSdSensor(apcups_data, description)
for description in SENSOR_TYPES
if description.key in resources
]
add_entities(entities, True)
def infer_unit(value):
"""If the value ends with any of the units from ALL_UNITS.
Split the unit off the end of the value and return the value, unit tuple
pair. Else return the original value and None as the unit.
"""
for unit in ALL_UNITS:
if value.endswith(unit):
return value[: -len(unit)], INFERRED_UNITS.get(unit, unit.strip())
return value, None
class APCUPSdSensor(SensorEntity):
"""Representation of a sensor entity for APCUPSd status values."""
def __init__(self, data, description: SensorEntityDescription):
"""Initialize the sensor."""
self.entity_description = description
self._data = data
self._attr_name = f"{SENSOR_PREFIX}{description.name}"
def update(self):
"""Get the latest status and use it to update our sensor state."""
key = self.entity_description.key.upper()
if key not in self._data.status:
self._attr_native_value = None
else:
self._attr_native_value, inferred_unit = infer_unit(self._data.status[key])
if not self.native_unit_of_measurement:
self._attr_native_unit_of_measurement = inferred_unit
| |
#!/usr/bin/env python3
__author__ = 'cjm'
import argparse
import logging
import re
import yaml
import json
import uuid
import csv
import itertools
import sys
import warnings
from collections import Counter
gcif = None
synmap = {}
titlemode = False
def main():
parser = argparse.ArgumentParser(description='DOSDB'
'fooo',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-t', '--to', type=str, required=False,
help='Renderer')
parser.add_argument('-n', '--name', type=str, required=False, default='auto',
help='Ontology name')
parser.add_argument('-b', '--base', type=str, required=False, default='http://purl.obolibrary.org/obo/',
help='URI base prefix')
parser.add_argument('-p', '--pattern', type=str, required=False,
help='YAML Pattern file')
parser.add_argument('-P', '--prefixes', type=str, required=False,
help='Prefix map file')
parser.add_argument('-i', '--input', type=str, required=False,
help='Input file for values to be filled in template')
parser.add_argument('-x', '--xpfiles', nargs='+', required=False,
help='Input file for values to be filled in template')
parser.add_argument('-a', '--annotate', type=bool, required=False,
help='Annotate each generated class with template values')
parser.add_argument('-U', '--titlemode', type=bool, required=False,
help='Auto-uppercasify (e.g. HPO)')
parser.add_argument('-G', '--gci', type=str, required=False,
help='Output file for GCI axioms not expressable in OMN')
parser.add_argument('-s', '--suppress', nargs='+', required=False, default=[],
help='Suppress annotations')
parser.add_argument('-S', '--synonym', type=str, required=False, default=[],
help='json synonym files')
args = parser.parse_args()
global titlemode
titlemode = args.titlemode
global synmap
if args.synonym:
f = open(args.synonym, 'r', encoding="ISO-8859-1")
synmap = json.load(f)
#s = f.read()
f.close()
prefixmap = {}
if args.prefixes:
f = open(args.prefixes, 'r')
prefixmap = yaml.load(f)
f.close()
pattern_name = args.pattern
f = open(args.pattern, 'r')
tobj = yaml.load(f)
if 'pattern_name' not in tobj:
tobj['pattern_name'] = pattern_name
ontology_iri = args.base + args.name
# REPAIR
# See https://github.com/dosumis/dead_simple_owl_design_patterns/issues/26
# historically we have used 'property', but the schema now defined as 'annotationProperty'
if 'annotations' in tobj:
for aobj in tobj['annotations']:
if 'property' in aobj:
warnings.warn("Updating deprecated key: property -> annotationProperty")
aobj['annotationProperty'] = aobj['property']
global gcif
if args.gci:
gcif = open(args.gci, 'w')
gcif.write('Prefix(:=<%s>)\n' % args.base)
for (k,v) in prefixmap.items():
gcif.write('Prefix(%s:=<%s>)\n' % (k,v))
gcif.write('Ontology(<%s-gci>\n' % ontology_iri)
bindings_list = []
if args.input:
bindings_list = parse_bindings_list(args.input)
if args.xpfiles:
bindings_list = parse_xp_files(args.xpfiles)
print('Prefix: : <%s>' % args.base)
print('Prefix: IAO: <http://purl.obolibrary.org/obo/IAO_>')
print('Prefix: DOSDP: <http://geneontology.org/foo/>')
print('Prefix: oio: <http://www.geneontology.org/formats/oboInOwl#>')
for (pfx,uri) in prefixmap.items():
print('Prefix: %s: <%s>' % (pfx,uri))
print()
print(" ## Auto-generated")
print()
print('Ontology: <%s>' % ontology_iri)
if 'imports' in tobj:
for uri in tobj['imports']:
print(' Import: <%s>' % uri)
print('AnnotationProperty: IAO:0000115')
print('AnnotationProperty: oio:hasDbXref')
for v in tobj['vars']:
print('AnnotationProperty: %s' % make_internal_annotation_property(tobj, v))
print('AnnotationProperty: %s' % get_applies_pattern_property())
print('AnnotationProperty: oio:hasRelatedSynonym')
if 'annotations' in tobj:
for aobj in tobj['annotations']:
print('AnnotationProperty: %s' % aobj['annotationProperty'])
##print('AnnotationProperty: %s' % make_internal_annotation_property(tobj['pattern_name']))
p = tobj
# build map of quoted entity replacements
qm = {}
for k in p['classes']:
iri = p['classes'][k]
qm[k] = iri
print('Class: %s ## %s' % (iri,k))
for k in p['relations']:
iri = p['relations'][k]
qm[k] = iri
print('ObjectProperty: %s ## %s' % (iri,k))
print('## Auto-generated classes')
for bindings in bindings_list:
cls_iri = uuid_iri()
if 'class_iri' in tobj:
cls_iri = apply_template(tobj['class_iri'], bindings)
if 'iri' in bindings:
cls_iri = bindings['iri']
elif 'defined_class' in bindings:
cls_iri = bindings['defined_class']
apply_pattern(tobj, qm, bindings, cls_iri, args)
if gcif:
gcif.write(')')
gcif.close
def uuid_iri():
return format('urn:uuid:%s' % str(uuid.uuid4()))
def render_iri(iri):
if iri.startswith("urn:") or iri.startswith("http"):
return "<"+iri+">"
return iri
def parse_bindings_list(fn):
delimiter='\t'
if fn.endswith("csv"):
delimiter=','
input_file = csv.DictReader(open(fn), delimiter=delimiter)
bindings_list = [row for row in input_file]
return bindings_list
## reeads tabular files and applies cross-product
def parse_xp_files(fns):
lists = []
for fn in fns:
delimiter='\t'
if fn.endswith("csv"):
delimiter=','
input_file = csv.DictReader(open(fn), delimiter=delimiter)
lists.append( [row for row in input_file] )
bindings_list = []
# assume pairwise: TODO: recurse for len>2
for i in lists[0]:
for j in lists[1]:
m = i.copy()
m.update(j)
bindings_list.append(m)
return bindings_list
## Returns an array of length N,
## where N is the same length as the number of vars in the template object
## [ [v1syn1, v1syn2, ...], ..., [vNsyn1, vNsyn2, ...]
def get_synonym_combos(tobj, bindings, synmap, label):
lvars = tobj['vars']
vals = []
for v in lvars:
syns = []
id = bindings[v]
lk = v+" label"
if lk in bindings:
syns.append(bindings[lk])
if id in synmap:
for s in synmap[id]:
syns.append(s['synonym'])
vals.append(syns)
combos = list(itertools.product(*vals))
textt = tobj['text']
texts = []
for combo in combos:
text = format(textt % combo)
if text != label:
texts.append( ('oio:hasRelatedSynonym', text) )
return texts
def get_values(tobj, bindings, isLabel=False):
lvars = tobj['vars']
vals = []
for v in lvars:
varval = ""
if isLabel:
k = v+" label"
varval = bindings[k] if k in bindings else bindings[v]
if varval == None:
varval = bindings[v]
else:
varval = render_iri(bindings[v])
vals.append(varval)
return vals
def apply_template(tobj, bindings, isLabel=False):
textt = tobj['text']
vals = get_values(tobj, bindings, isLabel)
text = format(textt % tuple(vals))
return text
def apply_pattern(p, qm, bindings, cls_iri, args):
print("")
var_bindings = {}
for v in p['vars']:
if v not in bindings:
sys.stderr.write("variable "+v+" is specified in vars: but is not in bindings:\n")
iri = bindings[v]
var_bindings[v] = iri
vl = v + " label"
lbl = bindings[vl] if vl in bindings else ''
if not lbl:
lbl = iri
print('Class: %s ## %s' % (iri,lbl))
print("## "+str(json.dumps(var_bindings)))
print('Class: %s' % render_iri(cls_iri))
label = ""
if 'name' in p:
tobj = p['name']
text = apply_template(tobj, bindings, True)
if 'label' not in args.suppress:
##TODO
if 'iri label' in bindings and bindings['iri label']:
label = bindings['iri label']
elif 'defined_class label' in bindings and bindings['defined_class label']:
label = bindings['defined_class label']
else:
label = text
write_annotation('rdfs:label', label, bindings)
if 'def' in p:
tobj = p['def']
text = apply_template(tobj, bindings, True)
# todo: protect against special characters
write_annotation('IAO:0000115', text, bindings, [('oio:hasDbXref', get_pattern_xref(args))])
if 'annotations' in p:
tanns = p['annotations']
for tobj in tanns:
ap = tobj['annotationProperty']
text = apply_template(tobj, bindings, True)
# todo: protect against special characters
write_annotation(ap, text, bindings, [('oio:hasDbXref', get_pattern_xref(args))])
if 'equivalentTo' in p:
tobj = p['equivalentTo']
expr_text = apply_template(tobj, bindings)
expr_cmt = apply_template(tobj, bindings,True).replace("\n", "")
expr_text = replace_quoted_entities(qm, expr_text)
print(' EquivalentTo: %s ## %s' % (expr_text,expr_cmt))
if 'subClassOf' in p:
tobj = p['subClassOf']
expr_text = apply_template(tobj, bindings)
expr_cmt = apply_template(tobj, bindings,True).replace("\n", "")
expr_text = replace_quoted_entities(qm, expr_text)
print(' SubClassOf: %s ## %s' % (expr_text,expr_cmt))
if 'axioms' in p:
for tobj in p['axioms']:
expr_text = apply_template(tobj, bindings)
expr_cmt = apply_template(tobj, bindings,True).replace("\n", "")
expr_text = replace_quoted_entities(qm, expr_text)
gcif.write(' %s ## %s\n' % (expr_text,expr_cmt))
if len(synmap.keys()) > 0:
if 'name' in p:
tobj = p['name']
texts = get_synonym_combos(tobj, bindings, synmap, label)
if len(texts) > 0:
print(" ## Auto-syns\n")
for (prop,text) in texts:
write_annotation(prop, text, bindings)
if args.annotate:
pn = p['pattern_name']
print(' Annotations: %s "%s"' % (get_applies_pattern_property(), pn))
for (k,v) in var_bindings.items():
print(' Annotations: %s %s' % (make_internal_annotation_property(p, k), v))
def get_applies_pattern_property():
return 'DOSDP:applies-pattern'
def make_internal_annotation_property(p, s):
return p['pattern_name'] + "/"+s
def write_annotation(ap, text, bindings={}, anns=[]):
if titlemode:
toks = text.split(" ")
toks[0] = toks[0].title()
text = " ".join(toks)
if ap in bindings:
# override
if bindings[ap] != '':
text = bindings[ap]
# todo: allow non-literal annotations
axiom_anns_str = ""
if len(anns) > 0:
for (p,v) in anns:
if axiom_anns_str == "":
axiom_anns_str = "Annotations: "
else:
axiom_anns_str += ', '
axiom_anns_str += '{} {}'.format(p, safe_quote(v))
print(' Annotations: %s %s %s' % (axiom_anns_str, ap, safe_quote(text)))
def safe_quote(text):
text = text.replace("\n"," ").replace('"','\\"')
return format('"%s"' % text)
# Stolen from DOS' code
def replace_quoted_entities(qm, text):
for k in qm:
v = qm[k]
text = re.sub("\'"+k+"\'", v, text) # Suspect this not Pythonic. Could probably be done with a fancy map lambda combo.
return text
def get_pattern_xref(args):
n = args.name
n = n.replace('.owl','')
n = n.replace('http://purl.obolibrary.org/obo/','')
toks = n.split('/')
n = toks[0].upper()
p = args.pattern.replace('.yaml','')
return '{}:patterns/{}'.format(n,p)
if __name__ == "__main__":
main()
| |
from __future__ import division
from foundation.Message import Message
from foundation.Bid import Bid
from foundation.Agent import Agent
from foundation.Service import Service
from foundation.DecisionVariable import DecisionVariable
from ProviderAgentException import ProviderException
from foundation.FoundationException import FoundationException
from foundation.AgentServer import AgentServerHandler
from foundation.AgentType import AgentType
import foundation.agent_properties
import uuid
import logging
import logging.handlers
import time
import xml.dom.minidom
from math import fabs
import os
import threading
LOG_FILENAME = 'customer.log'
# Check if log exists and should therefore be rolled
needRoll = os.path.isfile(LOG_FILENAME)
logger = logging.getLogger('consumer_application')
fh = logging.handlers.RotatingFileHandler(LOG_FILENAME, backupCount=5)
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(threadName)-10s) - (asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
# This is a stale log, so roll it
if needRoll:
# Roll over on application start
logger.handlers[0].doRollover()
class Consumer(Agent):
'''
The Consumer class defines methods to be used by the consumer
agent to purchase offers on the marketplace, initizalize the
agent, get the disutility function, and execute the buying
algorithm.
'''
def __init__(self, strID, Id, serviceId, customer_seed):
try:
self.lock = threading.RLock()
agent_type = AgentType(AgentType.CONSUMER_TYPE)
super(Consumer, self).__init__(strID, Id, agent_type, serviceId, customer_seed,' ',' ', ' ', ' ', self.lock)
logger.debug('Agent: %s - Consumer Created', self._list_vars['strId'])
except FoundationException as e:
raise ProviderException(e.__str__())
'''
Create the purchase message without indicating bid and quantity.
'''
def createPurchaseMessage(self):
messagePurchase = Message('')
messagePurchase.setMethod(Message.RECEIVE_PURCHASE)
uuidId = uuid.uuid1() # make a UUID based on the host ID and current time
idStr = str(uuidId)
messagePurchase.setParameter('Id', idStr)
messagePurchase.setParameter('Service', self._service.getId())
return messagePurchase
'''
The Purchase method assigns all the parameters and consumer ID
to the message to be send to the Marketplace.
In the end, the function sends the message to the marketplace
and checks if it was succesfully received.
'''
def purchase(self, messagePurchase, bid, quantity):
# Copy basic data from the purchase message given as parameter.
message = Message('')
message.setMethod(Message.RECEIVE_PURCHASE)
message.setParameter('Id', messagePurchase.getParameter('Id'))
message.setParameter('Service', messagePurchase.getParameter('Service'))
# Set the rest of the data from the bid and quantity given as parameters.
message.setParameter('Bid', bid.getId())
message.setParameter('Quantity', str(quantity))
for decisionVariable in (self._service)._decision_variables:
value = ((self._service)._decision_variables[decisionVariable]).getSample(DecisionVariable.PDST_VALUE)
message.setParameter(decisionVariable, str(value))
messageResult = self.sendMessageMarket(message)
# Check if message was succesfully received by the marketplace
if messageResult.isMessageStatusOk():
quantity = float(messageResult.getParameter("Quantity_Purchased"))
return quantity
else:
# logger.error('Agent: %s - Period: %s - Purchase not received! Communication failed - Message: %s', self._list_vars['strId'], str(self._list_vars['Current_Period']), messageResult.__str__())
raise ProviderException('Purchase not received! Communication failed')
def initialize(self):
'''
The initialize function is responsible for initializing the
consumer agent and get the decision variables from the simulation
environment (demand server).
'''
logger.debug('Period: %s Agent: %s - Initilizing consumer', str(self._list_vars['Current_Period']), self._list_vars['strId'])
for decisionVariable in (self._service)._decision_variables:
((self._service)._decision_variables[decisionVariable]).executeSample(self._list_vars['Random'])
'''
The getDisutility function is responsible for assigning the consumer
agent a disutility function from the simulation environment (
demand server)
'''
def getDisutility(self, bid):
# logger.debug('Agent: %s - Period: %s - Initiating getDisutility - Bid: %s', self._list_vars['strId'], str(self._list_vars['Current_Period']), bid.getId())
disutility_quality = 0 # can be only non negative
disutility_price = 0 # can be positive or negative
for decisionVariable in (self._service)._decision_variables:
# Obtains the sampled value
valueSample = ((self._service)._decision_variables[decisionVariable]).getSample(DecisionVariable.PDST_VALUE)
# logger.debug('Agent: %s - Decision Variable: %s - Value %s', self._list_vars['strId'], decisionVariable, str(valueSample))
# Obtains the sampled sensitivity
sensitivitySample = ((self._service)._decision_variables[decisionVariable]).getSample(DecisionVariable.PDST_SENSITIVITY)
offered = bid.getDecisionVariable(decisionVariable)
# logger.debug('Agent: %s - Decision Variable %s Sensitivity: %s - Offered %s', self._list_vars['strId'], decisionVariable, str(sensitivitySample), str(offered))
if (((self._service)._decision_variables[decisionVariable]).getModeling() == DecisionVariable.MODEL_QUALITY):
if (((self._service)._decision_variables[decisionVariable]).getOptimizationObjective() == DecisionVariable.OPT_MAXIMIZE):
if offered < valueSample:
disutility_quality = disutility_quality + (max(0, ((valueSample - offered)/ valueSample)) * sensitivitySample)
elif (((self._service)._decision_variables[decisionVariable]).getOptimizationObjective() == DecisionVariable.OPT_MINIMIZE):
if offered > valueSample:
disutility_quality = disutility_quality + (max(0, ((offered - valueSample)/ valueSample)) * sensitivitySample)
else:
disutility_price = disutility_price + (((offered - valueSample) / valueSample) * sensitivitySample)
# logger.debug('Agent: %s - Period: %s - Finishing getDisutility - Disutility price: %s - Disutility Quality %s', str(self._list_vars['strId']), str(self._list_vars['Current_Period']), str(disutility_price), str(disutility_quality))
disutility = disutility_price + disutility_quality
# logger.debug('Agent: %s - Period: %s - Finishing getDisutility - Disutility: %s', str(self._list_vars['strId']), str(self._list_vars['Current_Period']), str(disutility))
return disutility
'''
The exec_algorithm function finds available offerings and chooses
the one that fits the consumer needs the best, based on the prior
signals received by the simulation environment (demand server).
'''
def exec_algorithm(self):
Threshold = foundation.agent_properties.threshold
# logger.debug('Agent: %s - Period %s - Initiating exec_algorithm ', self._list_vars['strId'], str(self._list_vars['Current_Period']) )
if (self._list_vars['State'] == AgentServerHandler.ACTIVATE):
# Sends the request to the market place to find the best offerings
serviceId = (self._service).getId()
dic_return = self.createAskBids(serviceId)
parameters = self._list_vars['Parameters']
quantity = parameters['quantity']
# logger.debug('Agent: %s - Period: %s - Number of fronts: %s', self._list_vars['strId'], str(self._list_vars['Current_Period']), str(len(dic_return)))
purchased = False
# Sorts the offerings based on the customer's needs
keys_sorted = sorted(dic_return,reverse=True)
purchaseMessage = self.createPurchaseMessage()
bidId = ' '
evaluatedBids = {}
numBids = 0
# Get bids available for purchasing.
for front in keys_sorted:
bidList = dic_return[front]
# logger.debug('Agent: %s - Period: %s - Front: %s - Nbr Bids: %s', self._list_vars['strId'], str(self._list_vars['Current_Period']), str(front), str(len(bidList)))
for bid in bidList:
disutility = self.getDisutility(bid)
if disutility < Threshold:
numBids = numBids + 1
if disutility in evaluatedBids:
evaluatedBids[disutility].append(bid)
else:
evaluatedBids[disutility] = [bid]
disutilities_sorted = sorted(evaluatedBids)
# Purchase quantities requested.
for disutility in disutilities_sorted:
# logger.debug('Agent: %s - Period: %s - Front: %s disutility: %s Nbr Bids: %s Threshold %s', self._list_vars['strId'], str(self._list_vars['Current_Period']), str(front), str(disutility), str(len(evaluatedBids[disutility]) ), str(Threshold) )
lenght = len(evaluatedBids[disutility])
while (lenght > 0) and (quantity > 0) :
index_rand = (self._list_vars['Random']).randrange(0, lenght)
# logger.debug('Agent: %s - Period: %s - Index: %d \n', self._list_vars['strId'], str(self._list_vars['Current_Period']),index_rand)
bid = evaluatedBids[disutility].pop(index_rand)
qtyPurchased = self.purchase(purchaseMessage, bid, quantity)
# Register the bid as purchased.
if qtyPurchased > 0:
bidId = bidId + ',' + bid.getId()
# update quantities.
if (qtyPurchased >= quantity):
quantity = 0
break
else:
quantity = quantity - qtyPurchased
lenght = len(evaluatedBids[disutility])
if (quantity == 0):
break
qtyPurchased = parameters['quantity'] - quantity
logger.debug('Agent: %s - :Period: %s - :AvailBids: %s :initial qty:%s :qty_purchased:%s :Purchase the bid: %s', self._list_vars['strId'], str(self._list_vars['Current_Period']), str(numBids), str(parameters['quantity']), str(qtyPurchased), bidId )
else:
logger.debug(' Agent: %s - Period: %s - could not puchase', self._list_vars['strId'], str(self._list_vars['Current_Period']))
# logger.debug('Agent: %s - Period: %s - Ending exec_algorithm',self._list_vars['strId'], str(self._list_vars['Current_Period']))
'''
The run method activates the avaiable consumer agents.
'''
def run(self):
proc_name = self.name
self.start_agent()
while (True):
if (self._list_vars['State'] == AgentServerHandler.TERMINATE):
break
else:
if (self._list_vars['State'] == AgentServerHandler.TO_BE_ACTIVED):
self.initialize()
logger.debug('Agent: %s - Initialized' , self._list_vars['strId'])
self._list_vars['State'] = AgentServerHandler.ACTIVATE
logger.debug('Agent: %s - Now in state %s' , self._list_vars['strId'], self._list_vars['State'])
self.exec_algorithm()
self.lock.acquire()
try:
self._list_vars['State'] = AgentServerHandler.IDLE
finally:
self.lock.release()
elif (self._list_vars['State'] == AgentServerHandler.IDLE):
time.sleep(0.1)
# logger.debug('Agent: %s - Shuting down', self._list_vars['strId'])
# Close the sockets
self.stop_agent()
return
# End of Provider class
| |
#!/usr/bin/env python
"""
Since we do not want to store large binary data files in our Git repository,
we fetch_data_all from a network resource.
The data we download is described in a json file. The file format is a dictionary
of dictionaries. The top level key is the file name. The returned dictionary
contains a sha512 checksum and possibly a url and boolean flag indicating
the file is part of an archive. The sha512 checksum is mandatory.
When the optional url is given, we attempt to download from that url, otherwise
we attempt to download from the list of servers returned by the
get_servers() function. Files that are contained in archives are
identified by the archive flag.
Example json file contents:
{
"SimpleITK.jpg": {
"sha512": "f1b5ce1bf9d7ebc0bd66f1c3bc0f90d9e9798efc7d0c5ea7bebe0435f173355b27df632971d1771dc1fc3743c76753e6a28f6ed43c5531866bfa2b38f1b8fd46"
},
"POPI/meta/00-P.mhd": {
"url": "http://tux.creatis.insa-lyon.fr/~srit/POPI/Images/MetaImage/00-MetaImage.tar",
"archive": "true",
"sha512": "09fcb39c787eee3822040fcbf30d9c83fced4246c518a24ab14537af4b06ebd438e2f36be91e6e26f42a56250925cf1bfa0d2f2f2192ed2b98e6a9fb5f5069fc"
},
"CIRS057A_MR_CT_DICOM/readme.txt": {
"archive": "true",
"sha512": "d5130cfca8467c4efe1c6b4057684651d7b74a8e7028d9402aff8e3d62287761b215bc871ad200d4f177b462f7c9358f1518e6e48cece2b51c6d8e3bb89d3eef"
}
}
Notes:
1. The file we download can be inside an archive. In this case, the sha512
checksum is that of the archive.
"""
import hashlib
import sys
import os
import json
import errno
import warnings
# http://stackoverflow.com/questions/2028517/python-urllib2-progress-hook
def url_download_report(bytes_so_far, url_download_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent * 100, 2)
if bytes_so_far > url_download_size:
# Note that the carriage return is at the begining of the
# string and not the end. This accomodates usage in
# IPython usage notebooks. Otherwise the string is not
# displayed in the output.
sys.stdout.write("\rDownloaded %d of %d bytes (%0.2f%%)" %
(bytes_so_far, total_size, percent))
sys.stdout.flush()
if bytes_so_far >= total_size:
sys.stdout.write("\rDownloaded %d of %d bytes (%0.2f%%)\n" %
(bytes_so_far, total_size, percent))
sys.stdout.flush()
def url_download_read(url, outputfile, url_download_size=8192 * 2, report_hook=None):
# Use the urllib2 to download the data. The Requests package, highly
# recommended for this task, doesn't support the file scheme so we opted
# for urllib2 which does.
try:
# Python 3
from urllib.request import urlopen, URLError, HTTPError
except ImportError:
from urllib2 import urlopen, URLError, HTTPError
from xml.dom import minidom
# Open the url
try:
url_response = urlopen(url)
except HTTPError as e:
return "HTTP Error: {0} {1}\n".format(e.code, url)
except URLError as e:
return "URL Error: {0} {1}\n".format(e.reason, url)
# We download all content types - the assumption is that the sha512 ensures
# that what we received is the expected data.
try:
# Python 3
content_length = url_response.info().get("Content-Length")
except AttributeError:
content_length = url_response.info().getheader("Content-Length")
total_size = content_length.strip()
total_size = int(total_size)
bytes_so_far = 0
with open(outputfile, "wb") as local_file:
while 1:
try:
url_download = url_response.read(url_download_size)
bytes_so_far += len(url_download)
if not url_download:
break
local_file.write(url_download)
# handle errors
except HTTPError as e:
return "HTTP Error: {0} {1}\n".format(e.code, url)
except URLError as e:
return "URL Error: {0} {1}\n".format(e.reason, url)
if report_hook:
report_hook(bytes_so_far, url_download_size, total_size)
return "Downloaded Successfully"
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python?rq=1
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
#http://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) #turn off filter
warnings.warn("Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) #reset filter
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
def get_servers():
import os
servers = list()
# NIAID S3 data store
servers.append( "https://s3.amazonaws.com/simpleitk/public/notebooks/SHA512/%(hash)" )
# Girder server hosted by kitware
servers.append("https://data.kitware.com/api/v1/file/hashsum/sha512/%(hash)/download")
# Local file store
if 'ExternalData_OBJECT_STORES' in os.environ.keys():
local_object_stores = os.environ['ExternalData_OBJECT_STORES']
for local_object_store in local_object_stores.split(";"):
servers.append( "file://{0}/SHA512/%(hash)".format(local_object_store) )
return servers
def output_hash_is_valid(known_sha512, output_file):
sha512 = hashlib.sha512()
if not os.path.exists(output_file):
return False
with open(output_file, 'rb') as fp:
for url_download in iter(lambda: fp.read(128 * sha512.block_size), b''):
sha512.update(url_download)
retreived_sha512 = sha512.hexdigest()
return retreived_sha512 == known_sha512
def fetch_data_one(onefilename, output_directory, manifest_file, verify=True, force=False):
import tarfile, zipfile
with open(manifest_file, 'r') as fp:
manifest = json.load(fp)
assert onefilename in manifest, "ERROR: {0} does not exist in {1}".format(onefilename, manifest_file)
sys.stdout.write("Fetching {0}\n".format(onefilename))
output_file = os.path.realpath(os.path.join(output_directory, onefilename))
data_dictionary = manifest[onefilename]
sha512 = data_dictionary['sha512']
# List of places where the file can be downloaded from
all_urls = []
for url_base in get_servers():
all_urls.append(url_base.replace("%(hash)", sha512))
if "url" in data_dictionary:
all_urls.append(data_dictionary["url"])
new_download = False
for url in all_urls:
# Only download if force is true or the file does not exist.
if force or not os.path.exists(output_file):
mkdir_p(os.path.dirname(output_file))
url_download_read(url, output_file, report_hook=url_download_report)
# Check if a file was downloaded and has the correct hash
if output_hash_is_valid(sha512, output_file):
new_download = True
# Stop looking once found
break
# If the file exists this means the hash is invalid we have a problem.
elif os.path.exists(output_file):
error_msg = "File " + output_file
error_msg += " has incorrect hash value, " + sha512 + " was expected."
raise Exception(error_msg)
# Did not find the file anywhere.
if not os.path.exists(output_file):
error_msg = "File " + "\'" + os.path.basename(output_file) +"\'"
error_msg += " could not be found in any of the following locations:\n"
error_msg += ", ".join(all_urls)
raise Exception(error_msg)
if not new_download and verify:
# If the file was part of an archive then we don't verify it. These
# files are only verfied on download
if ( not "archive" in data_dictionary) and ( not output_hash_is_valid(sha512, output_file) ):
# Attempt to download if sha512 is incorrect.
fetch_data_one(onefilename, output_directory, manifest_file, verify,
force=True)
# If the file is in an archive, unpack it.
if tarfile.is_tarfile(output_file) or zipfile.is_zipfile(output_file):
tmp_output_file = output_file + ".tmp"
os.rename(output_file, tmp_output_file)
if tarfile.is_tarfile(tmp_output_file):
archive = tarfile.open(tmp_output_file)
if zipfile.is_zipfile(tmp_output_file):
archive = zipfile.ZipFile(tmp_output_file, 'r')
archive.extractall(os.path.dirname(tmp_output_file))
archive.close()
os.remove(tmp_output_file)
return output_file
def fetch_data_all(output_directory, manifest_file, verify=True):
with open(manifest_file, 'r') as fp:
manifest = json.load(fp)
for filename in manifest:
fetch_data_one(filename, output_directory, manifest_file, verify,
force=False)
def fetch_data(cache_file_name, verify=False, cache_directory_name="../Data"):
"""
fetch_data is a simplified interface that requires
relative pathing with a manifest.json file located in the
same cache_directory_name name.
By default the cache_directory_name is "Data" relative to the current
python script. An absolute path can also be given.
"""
if not os.path.isabs(cache_directory_name):
cache_root_directory_name = os.path.dirname(__file__)
cache_directory_name = os.path.join(cache_root_directory_name, cache_directory_name)
cache_manifest_file = os.path.join(cache_directory_name, 'manifest.json')
assert os.path.exists(cache_manifest_file), "ERROR, {0} does not exist".format(cache_manifest_file)
return fetch_data_one(cache_file_name, cache_directory_name, cache_manifest_file, verify=verify)
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: ' + sys.argv[0] + ' output_directory manifest.json')
sys.exit(1)
output_directory = sys.argv[1]
if not os.path.exists(output_directory):
os.makedirs(output_directory)
manifest = sys.argv[2]
fetch_data_all(output_directory, manifest)
| |
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import calendar
import logging
import queue
import threading
import time
import urllib.parse
import urllib.error
import urllib.request
import dcm.agent.plugins.loader as plugin_loader
from dcm.agent.events.globals import global_space as dcm_events
_g_logger = logging.getLogger(__name__)
class JobStatus(object):
WAITING = "WAITING"
RUNNING = "RUNNING"
ERROR = "ERROR"
COMPLETE = "COMPLETE"
class NewLongJob(object):
def __init__(self, items_map, name, arguments, job_id, request_id):
self.items_map = items_map
self.name = name
self.arguments = arguments
self.job_id = job_id
self.request_id = request_id
self.quit = False
class JobReply(object):
def __init__(self, job_id):
self.job_id = job_id
self.job_status = JobStatus.RUNNING
self.start_date = calendar.timegm(time.gmtime())
self.reply_doc = None
self.end_date = None
self.error = None
class JobRunner(threading.Thread):
def __init__(self, conf, queue, job_update_callback):
super(JobRunner, self).__init__()
self._queue = queue
self._exit = threading.Event()
self._current_job = None
self._job_update_callback = job_update_callback
self._conf = conf
# It should be safe to call done without a lock
def done(self):
_g_logger.debug("done() called on worker %s .." % self.getName())
self._exit.set()
if self._current_job is not None:
self._current_job.cancel()
def run(self):
_g_logger.info("Job runner %s thread starting." % self.getName())
done = False
while not done:
try:
work = self._queue.get(True)
if work.quit:
done = True
continue
try:
_g_logger.debug("Running the long job %s:%s" %
(work.name, work.request_id))
job_reply = JobReply(work.job_id)
dcm_events.register_callback(
self._job_update_callback, args=[job_reply])
plugin = plugin_loader.load_python_module(
work.items_map["module_name"],
self._conf,
work.request_id,
work.items_map,
work.name,
work.arguments)
reply_obj = plugin.run()
job_reply.reply_doc = reply_obj.get_reply_doc()
except Exception as ex:
_g_logger.exception("An error occurred")
job_reply.error = str(ex)
job_reply.job_status = JobStatus.ERROR
else:
if job_reply.reply_doc is None:
job_reply.job_status = JobStatus.COMPLETE
elif job_reply.reply_doc["return_code"] == 0:
job_reply.job_status = JobStatus.COMPLETE
else:
job_reply.job_status = JobStatus.ERROR
job_reply.error = job_reply.reply_doc["message"]
finally:
job_reply.end_date = calendar.timegm(time.gmtime())
dcm_events.register_callback(
self._job_update_callback, args=[job_reply])
_g_logger.debug("Completed the long job %s:%s "
"STATUS=%s" % (work.name, work.request_id,
job_reply.job_status))
except queue.Empty:
_g_logger.exception("The queue was empty. This shouldn't "
"happen often")
except Exception as ex:
_g_logger.exception("Something went wrong processing the job")
finally:
self._current_job = None
_g_logger.info("Job runner %s thread ending." % self.getName())
class LongRunner(object):
def __init__(self, conf):
self._job_table = {}
self._job_id = 0
self._lock = threading.RLock()
self._conf = conf
self._run_queue = queue.Queue()
self._runner_list = []
for i in range(conf.workers_long_runner_threads):
jr = JobRunner(conf, self._run_queue, self.job_update_callback)
self._runner_list.append(jr)
jr.start()
def shutdown(self):
# IF we want to make sure the queue is empty we must call
# self._run_queue.join()
for i in range(len(self._runner_list)):
quit_job = NewLongJob(None, None, None, None, None)
quit_job.quit = True
self._run_queue.put(quit_job)
for r in self._runner_list:
_g_logger.debug("Stopping worker %s" % str(r))
r.done()
r.join()
_g_logger.debug("Runner %s is done" % str(r))
_g_logger.info("The dispatcher is closed.")
def start_new_job(self, conf, request_id, items_map,
name, arguments):
module_name = items_map["module_name"]
plugin = plugin_loader.load_python_module(
module_name, conf, request_id, items_map, name, arguments)
with self._lock:
self._job_id += 1
new_job = NewLongJob(
items_map, name, arguments, self._job_id, request_id)
detached_job = DetachedJob(self._conf, self._job_id,
plugin, name, arguments)
self._job_table[detached_job.get_job_id()] = detached_job
_g_logger.debug("Starting new long job id=%s"
% str(detached_job.get_job_id()))
self._run_queue.put(new_job)
return detached_job
def job_complete(self, job_id):
if self._conf.jobs_retain_job_time == 0:
return
dcm_events.register_callback(self._job_cleanup,
args=[job_id],
delay=self._conf.jobs_retain_job_time)
def _job_cleanup(self, job_id):
with self._lock:
_g_logger.debug("Removing job %d from the table" % job_id)
del self._job_table[job_id]
def lookup_job(self, job_id):
with self._lock:
try:
return self._job_table[job_id]
except Exception:
return None
def job_update_callback(self, job_reply):
with self._lock:
try:
_g_logger.debug("long runner poll has the lock, "
"updating %s" % str(job_reply.job_id))
jd = self._job_table[job_reply.job_id]
if job_reply.error:
jd.update(job_reply, message=str(job_reply.error))
else:
jd.update(job_reply)
if jd._job_status == JobStatus.ERROR or\
jd._job_status == JobStatus.COMPLETE:
self.job_complete(job_reply.job_id)
except Exception:
_g_logger.exception("Failed to update")
return None
class DetachedJob(object):
def __init__(self, conf, job_id, plugin, command_name, arguments):
self._customer_id = conf.customer_id
self._description = command_name
self._start_date = 0
self._end_date = 0
self._job_id = job_id
self._job_status = JobStatus.WAITING
self._message = None
self._plugin = plugin
self._command_name = command_name
self._arguments = arguments
self._error = None
self._reply_doc = None
def update(self, work_reply, message=None):
self._job_status = work_reply.job_status
self._start_date = work_reply.start_date
self._reply_doc = work_reply.reply_doc
self._end_date = work_reply.end_date
self._error = work_reply.error
if message:
self._message = urllib.parse.quote(message)
if self._message is None and self._error is not None:
self._message = urllib.parse.quote(str(self._error))
def get_job_id(self):
return self._job_id
def get_message_payload(self):
reply_object = {
"customer_id": self._customer_id,
"description": self._description,
"job_id": self._job_id,
"job_status": self._job_status,
"message": self._message,
"start_date": self._start_date,
"end_date": self._end_date,
"command_reply": self._reply_doc
}
return reply_object
| |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Methods for decomposing StructuralTimeSeries models."""
import collections
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental import util as tfe_util
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.sts.internal import util as sts_util
def _split_covariance_into_marginals(covariance, block_sizes):
"""Split a covariance matrix into block-diagonal marginals of given sizes."""
start_dim = 0
marginals = []
for size in block_sizes:
end_dim = start_dim + size
marginals.append(covariance[..., start_dim:end_dim, start_dim:end_dim])
start_dim = end_dim
return marginals
def _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples, initial_step=0):
"""Utility method to decompose a joint posterior into components.
Args:
model: `tfp.sts.Sum` instance defining an additive STS model.
posterior_means: float `Tensor` of shape `concat(
[[num_posterior_draws], batch_shape, num_timesteps, latent_size])`
representing the posterior mean over latents in an
`AdditiveStateSpaceModel`.
posterior_covs: float `Tensor` of shape `concat(
[[num_posterior_draws], batch_shape, num_timesteps,
latent_size, latent_size])`
representing the posterior marginal covariances over latents in an
`AdditiveStateSpaceModel`.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
initial_step: optional `int` specifying the initial timestep of the
decomposition.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
"""
try:
model.components
except AttributeError:
raise ValueError('Model decomposed into components must be an instance of'
'`tfp.sts.Sum` (passed model {})'.format(model))
with tf.name_scope('decompose_from_posterior_marginals'):
# Extract the component means/covs from the joint latent posterior.
latent_sizes = [component.latent_size for component in model.components]
component_means = tf.split(posterior_means, latent_sizes, axis=-1)
component_covs = _split_covariance_into_marginals(
posterior_covs, latent_sizes)
# Instantiate per-component state space models, and use them to push the
# posterior means/covs through the observation model for each component.
num_timesteps = dist_util.prefer_static_value(
tf.shape(posterior_means))[-2]
component_ssms = model.make_component_state_space_models(
num_timesteps=num_timesteps,
param_vals=parameter_samples, initial_step=initial_step)
component_predictive_dists = collections.OrderedDict()
for (component, component_ssm,
component_mean, component_cov) in zip(model.components, component_ssms,
component_means, component_covs):
component_obs_mean, component_obs_cov = (
component_ssm.latents_to_observations(
latent_means=component_mean,
latent_covs=component_cov))
# Using the observation means and covs, build a mixture distribution
# that integrates over the posterior draws.
component_predictive_dists[component] = sts_util.mix_over_posterior_draws(
means=component_obs_mean[..., 0],
variances=component_obs_cov[..., 0, 0])
return component_predictive_dists
def decompose_by_component(model, observed_time_series, parameter_samples):
"""Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: optional `float` `Tensor` of shape
`batch_shape + [T, 1]` (omitting the trailing unit dimension is also
supported when `T > 1`), specifying an observed time series. Any `NaN`s
are interpreted as missing observations; missingness may be also be
explicitly specified by passing a `tfp.sts.MaskedTimeSeries` instance.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
"""
with tf.name_scope('decompose_by_component'):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run smoothing over the training timesteps to extract the
# posterior on latents.
num_timesteps = dist_util.prefer_static_value(
tf.shape(observed_time_series))[-2]
ssm = tfe_util.JitPublicMethods(
model.make_state_space_model(num_timesteps=num_timesteps,
param_vals=parameter_samples),
trace_only=True) # Avoid eager overhead w/o introducing XLA dependence.
posterior_means, posterior_covs = ssm.posterior_marginals(
observed_time_series, mask=is_missing)
return _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples)
def decompose_forecast_by_component(model, forecast_dist, parameter_samples):
"""Decompose a forecast distribution into contributions from each component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
forecast_dist: A `Distribution` instance returned by `tfp.sts.forecast()`.
(specifically, must be a `tfd.MixtureSameFamily` over a
`tfd.LinearGaussianStateSpaceModel` parameterized by posterior samples).
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
Returns:
component_forecasts: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the marginal forecast for
each component. Each distribution has batch and event shape matching
`forecast_dist` (specifically, the event shape is
`[num_steps_forecast]`).
#### Examples
Suppose we've built a model, fit it to data, and constructed a forecast
distribution:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=num_steps_forecast)
```
To extract the forecast for individual components, pass the forecast
distribution into `decompose_forecast_by_components`:
```python
component_forecasts = decompose_forecast_by_component(
model, forecast_dist, samples)
# Component mean and stddev have shape `[num_steps_forecast]`.
day_of_week_effect_mean = forecast_components[day_of_week].mean()
day_of_week_effect_stddev = forecast_components[day_of_week].stddev()
```
Using the component forecasts, we can visualize the uncertainty for each
component:
```
from matplotlib import pylab as plt
num_components = len(component_forecasts)
xs = np.arange(num_steps_forecast)
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_forecasts.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
"""
with tf.name_scope('decompose_forecast_by_component'):
try:
forecast_lgssm = forecast_dist.components_distribution
forecast_latent_mean, _ = forecast_lgssm._joint_mean() # pylint: disable=protected-access
forecast_latent_covs, _ = forecast_lgssm._joint_covariances() # pylint: disable=protected-access
except AttributeError as e:
raise ValueError(
'Forecast distribution must be a MixtureSameFamily of'
'LinearGaussianStateSpaceModel distributions, such as returned by'
'`tfp.sts.forecast()`. (saw exception: {})'.format(e))
# Since `parameter_samples` will have sample shape `[num_posterior_draws]`,
# we need to move the `num_posterior_draws` dimension of the forecast
# moments from the trailing batch dimension, where it's currently put by
# `sts.forecast`, back to the leading (sample shape) dimension.
forecast_latent_mean = dist_util.move_dimension(
forecast_latent_mean, source_idx=-3, dest_idx=0)
forecast_latent_covs = dist_util.move_dimension(
forecast_latent_covs, source_idx=-4, dest_idx=0)
return _decompose_from_posterior_marginals(
model, forecast_latent_mean, forecast_latent_covs, parameter_samples,
initial_step=forecast_lgssm.initial_step)
| |
# -*- coding: utf-8 -*-
import urllib
import urllib2
import json
import random
# turbogears imports
from tg import expose, redirect, validate, flash, session, request
from tg.decorators import *
# third party imports
from repoze.what import predicates, authorize
from repoze.what.predicates import not_anonymous, in_group, has_permission
# project specific imports
from gapproject.lib.base import BaseController
from gapproject.model import *
from gapproject.util.common import *
from gapproject.widgets.access import *
import traceback
from sqlalchemy.sql.expression import and_
class UserSearchForm( RPACForm ): fields = [ RPACText( "name", label_text = "User Name" ), ]
user_form = UserSearchForm()
class GroupSearchForm( RPACForm ): fields = [ RPACText( "name", label_text = "Role Name" ), ]
group_form = GroupSearchForm()
class PermissionSearchForm( RPACForm ): fields = [ RPACText( "name", label_text = "Permission Name" ), ]
permission_form = PermissionSearchForm()
class AccessController( BaseController ):
# Uncomment this line if your controller requires an authenticated user
allow_only = authorize.in_group( 'Admin' )
@expose( 'gapproject.templates.access' )
@tabFocus( tab_type = "access" )
def index( self, **kw ):
return {}
@expose( 'gapproject.templates.access.user' )
@paginate( "result", items_per_page = 20 )
@tabFocus( tab_type = "access" )
def user( self, **kw ):
if not kw.get( 'name', None ):
result = DBSession.query( User ).filter( User.active == 0 ).order_by( User.user_name ).all()
else:
result = DBSession.query( User ).filter( User.__table__.c.user_name.op( "ilike" )( "%%%s%%" % kw["name"] ) ).order_by( User.user_name ).all()
return {"widget" : user_form, "result" : result, "values" : kw}
@expose( 'gapproject.templates.access.user_edit' )
@tabFocus( tab_type = "access" )
def user_add( self, **kw ):
part1 = map( chr, random.sample( range( ord( 'a' ), ord( 'z' ) + 1 ), 2 ) )
part2 = random.sample( range( 0, 10 ), 4 )
part3 = random.sample( ["!", "@", "#", "$", "&", "*"], 1 )
pw = "".join( map( unicode, part1 + part2 + part3 ) )
groups = DBSession.query( Group ).filter( Group.active == 0 ).order_by( Group.group_name )
return {'values' : {'ACTION':'NEW', 'password' : pw} , 'groups' : groups , 'usergroups' : [], }
@expose( 'gapproject.templates.access.user_edit' )
@tabFocus( tab_type = "access" )
def user_edit( self, **kw ):
uid = kw.get( 'id', None ) or None
if not uid :
flash( "No ID provided!" )
return redirect( '/access/index' )
values = {'id' : uid, 'ACTION' : 'UPDATE', 'brands' : []}
user = DBSession.query( User ).get( uid )
for f in ['user_name', 'password', 'display_name', 'email_address']:
values[f] = getattr( user, f, '' ) or ''
groups = DBSession.query( Group ).filter( Group.active == 0 ).order_by( Group.group_name )
usergroups = [g.group_id for g in user.groups]
try:
info = DBSession.query( OnclAddress ).filter( and_( OnclAddress.active == 0 , OnclAddress.issuedById == uid ) ).one()
for f in ["shipCompany", "shipAttn", "shipAddress", "shipAddress2", "shipAddress3", "shipCity", "shipState",
"shipZip", "shipCountry", "shipTel", "shipFax", "shipEmail", "shipRemark",
"billCompany", "billAttn", "billAddress", "billAddress2", "billAddress3", "billCity", "billState",
"billZip", "billCountry", "billTel", "billFax", "billEmail", "billRemark", ] :
values[f] = getattr( info, f, '' ) or ''
except:
traceback.print_exc()
pass
return {'values' : values , 'groups' : groups , 'usergroups' : usergroups}
@expose()
def user_save( self, **kw ):
action = kw.get( 'ACTION', None ) or None
if action not in ['NEW', 'UPDATE'] :
flash( 'No such action!' )
return redirect( '/access/user' )
if action == 'NEW': # CREATE
try:
params = {}
for f in ['user_name', 'password', 'display_name', 'email_address']:
params[f] = kw.get( f, None ) or None
obj = User( **params )
DBSession.add( obj )
groups = kw.get( 'groups', [] ) or []
if type( groups ) != list : groups = [groups, ]
obj.groups = DBSession.query( Group ).filter( Group.group_id.in_( groups ) ).all()
bsparams = {'issuedBy' : obj}
for f in ["shipCompany", "shipAttn", "shipAddress", "shipAddress2", "shipAddress3", "shipCity", "shipState",
"shipZip", "shipCountry", "shipTel", "shipFax", "shipEmail", "shipRemark",
"billCompany", "billAttn", "billAddress", "billAddress2", "billAddress3", "billCity", "billState",
"billZip", "billCountry", "billTel", "billFax", "billEmail", "billRemark", ] :
bsparams[f] = kw.get( f, None ) or None
DBSession.add( OnclAddress( **bsparams ) )
result = self.synUser( 'add', obj )
if not result : raise makeException( "Error when creating the user!" )
except:
traceback.print_exc()
flash( "Error occur on the server side!" )
return redirect( '/access/user' )
else:
flash( "Save successfully!" )
return redirect( '/access/user' )
else: # UPDATE
uid = kw.get( 'id', None ) or None
if not uid :
flash( 'No ID provided!' )
return redirect( '/access/index' )
try:
obj = DBSession.query( User ).get( uid )
for f in ['user_name', 'password', 'display_name', 'email_address']:
setattr( obj, f, kw.get( f, None ) or None )
groups = kw.get( 'groups', [] ) or []
if type( groups ) != list : groups = [groups, ]
obj.groups = DBSession.query( Group ).filter( Group.group_id.in_( groups ) ).all()
fields = ["shipCompany", "shipAttn", "shipAddress", "shipAddress2", "shipAddress3", "shipCity", "shipState",
"shipZip", "shipCountry", "shipTel", "shipFax", "shipEmail", "shipRemark",
"billCompany", "billAttn", "billAddress", "billAddress2", "billAddress3", "billCity", "billState",
"billZip", "billCountry", "billTel", "billFax", "billEmail", "billRemark", ]
try:
info = DBSession.query( OnclAddress ).filter( and_( OnclAddress.active == 0 , OnclAddress.issuedById == obj.user_id ) ).one()
for f in fields : setattr( info, f, kw.get( f, None ) or None )
except:
bsparams = {'issuedBy' : obj}
for f in fields : bsparams[f] = kw.get( f, None ) or None
DBSession.add( OnclAddress( **bsparams ) )
result = self.synUser( 'update', obj )
if not result : raise makeException( "Error when updating the user!" )
except:
traceback.print_exc()
flash( "Error occur on the server side!" )
else:
flash( "Save successfully!" )
return redirect( '/access/user' )
@expose( 'gapproject.templates.access.group' )
@paginate( "result", items_per_page = 20 )
@tabFocus( tab_type = "access" )
def group( self, **kw ):
if not kw.get( 'name', None ):
result = DBSession.query( Group ).filter( Group.active == 0 ).order_by( Group.group_name ).all()
else:
result = DBSession.query( Group ).filter( Group.__table__.c.group_name.op( "ilike" )( "%%%s%%" % kw["name"] ) ).order_by( Group.group_name ).all()
return {"widget" : group_form, "result" : result, "values" : kw}
@expose( 'gapproject.templates.access.group_manage' )
@tabFocus( tab_type = "access" )
def group_add( self, **kw ):
excluded = DBSession.query( User ).order_by( User.user_name )
lost = DBSession.query( Permission ).order_by( Permission.order )
return {"widget" : group_update_form , "values" : { "id" : "", "group_name" : "", "display_name" : "", "ACTION" : 'NEW' },
"included" : [] , "excluded" : excluded,
"got" : [], "lost" : lost , 'showuser' : bool( kw.get( 'U', None ) )}
@expose( 'gapproject.templates.access.group_manage' )
@tabFocus( tab_type = "access" )
def group_manage( self, **kw ):
g = getOr404( Group, kw["id"] )
included = g.users
excluded = DBSession.query( User ).filter( ~User.groups.any( Group.group_id == g.group_id ) ).order_by( User.user_name )
got = g.permissions
lost = DBSession.query( Permission ).filter( ~Permission.groups.any( Group.group_id == g.group_id ) ).order_by( Permission.order )
return {"widget" : group_update_form , "values" : { "id" : g.group_id, "group_name" : g.group_name , "display_name" : g.display_name, "ACTION" : "UPDATE" },
"included" : included , "excluded" : excluded,
"got" : got, "lost" : lost , 'showuser' : bool( kw.get( 'U', None ) )}
@expose()
def save_group( self, **kw ):
gid = kw.get( 'id', None ) or None
name = kw.get( 'group_name', None ) or None
display_name = kw.get( 'display_name', None ) or None
if not gid:
g = Group( group_name = name, display_name = display_name )
DBSession.add( g )
else:
g = DBSession.query( Group ).get( gid )
g.group_name = name
g.display_name = display_name
uigs = kw.get( "uigs", '' )
pigs = kw.get( "pigs", '' )
if kw.get( 'U', None ):
if not uigs : g.users = []
else : g.users = DBSession.query( User ).filter( User.user_id.in_( uigs.split( "|" ) ) ).all()
if not pigs : g.permissions = []
else : g.permissions = DBSession.query( Permission ).filter( Permission.permission_id.in_( pigs.split( "|" ) ) ).all()
flash( "Save the update successfully!" )
redirect( "/access/group" )
@expose( "gapproject.templates.access.permission" )
@paginate( "result", items_per_page = 20 )
@tabFocus( tab_type = "access" )
def permission( self, **kw ):
if not kw.get( 'name', None ):
result = DBSession.query( Permission ).order_by( Permission.permission_name ).all()
else:
result = DBSession.query( Permission ).filter( Permission.__table__.c.permission_name.op( "ilike" )( "%%%s%%" % kw["name"] ) ).order_by( Permission.permission_name ).all()
return {"widget" : permission_form, "result" : result, "values" : kw}
@expose( 'gapproject.templates.access.permission_manage' )
@tabFocus( tab_type = "access" )
def permission_add( self, **kw ):
excluded = DBSession.query( Group ).filter( Group.active == 0 ).order_by( Group.group_name )
return {"widget" : permission_update_form,
"values" : {"id" : '', "permission_name" : ''},
"included" : [],
"excluded" : excluded
}
@expose( "gapproject.templates.access.permission_manage" )
@tabFocus( tab_type = "access" )
def permission_manage( self, **kw ):
p = getOr404( Permission, kw["id"] )
included = p.groups
excluded = DBSession.query( Group ).filter( ~Group.permissions.any( Permission.permission_id == p.permission_id ) ).order_by( Group.group_name )
return {"widget" : permission_update_form,
"values" : {"id" : p.permission_id, "permission_name" : p.permission_name},
"included" : included,
"excluded" : excluded
}
expose()
def save_permission( self, **kw ):
pid = kw.get( 'id', None ) or None
if not pid :
p = Permission( permission_name = kw.get( 'permission_name', None ) or None )
else:
p = DBSession.query( Permission ).get( pid )
p.permission_name = kw["permission_name"]
if not kw["igs"] : p.groups = []
else : p.groups = DBSession.query( Group ).filter( Group.group_id.in_( kw["igs"].split( "|" ) ) ).all()
flash( "Save the update successfully!" )
redirect( "/access/index" )
@expose( 'json' )
def ajaxCheck( self, **kw ):
k, v = kw.get( 'k', None ), kw.get( 'v', None )
if k not in ['USER', 'GROUP'] : return {'code' : 1 , 'msg' : 'No such action!'}
if k == 'USER' :
try:
obj = DBSession.query( User ).filter( User.user_name.op( 'ilike' )( v ) ).one()
if kw.get( 'action', None ) == 'NEW' : return {'code' : 1, 'msg' : 'Duplicated user name!'}
elif kw.get( 'action', None ) == 'UPDATE':
if unicode( obj.user_id ) == kw.get( 'id', None ) : return {'code' : 0}
else: return {'code' : 1, 'msg' : 'Duplicated user name!'}
else : return {'code' : 1 , 'msg' : 'No such action!'}
except:
return {'code' : 0}
elif k == 'GROUP':
try:
obj = DBSession.query( Group ).filter( Group.group_name.op( 'ilike' )( v ) ).one()
if kw.get( 'action', None ) == 'NEW' : return {'code' : 1, 'msg' : 'Duplicated group name!'}
elif kw.get( 'action', None ) == 'UPDATE':
if unicode( obj.group_id ) == kw.get( 'id', None ) : return {'code' : 0}
else: return {'code' : 1, 'msg' : 'Duplicated group name!'}
else : return {'code' : 1 , 'msg' : 'No such action!'}
except:
return {'code' : 0}
def synUser( self , action, obj ):
url = config.get( 'price_ticket_url' )
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
values = {
'action' : action ,
'uname' : obj.user_name,
'pwd' : obj.password,
'dname' : obj.display_name,
'email' : obj.email_address,
'code' : obj.code,
}
headers = { 'User-Agent' : user_agent }
data = urllib.urlencode( values )
req = urllib2.Request( url, data, headers )
response = urllib2.urlopen( req )
result = response.read()
print '-*' * 10
print result
print '~*' * 10
v = json.loads( result )
return v.get( 'Message', None ) == 'success'
'''
@expose('gapproject.templates.access.index')
@paginate("result", items_per_page=20)
@tabFocus(tab_type="access")
def index(self, **kw):
if not kw:
result = []
else:
result = DBSession.query(User).filter(User.__table__.c.user_name.op("ilike")("%%%s%%" % kw["user_name"])).order_by(User.user_name).all()
return {"widget" : access_search_form, "result" : result}
@expose()
def save_new(self, **kw):
if kw["type"] == "user" :
password = kw["password"] if kw["password"] else gerRandomStr(8, allAlpha)
u = User(user_name=kw["user_name"], display_name=kw["display_name"], email_address=kw["email_address"], password=password)
DBSession.add(u)
DBSession.flush()
redirect("/access/user_manage?id=%d" % u.user_id)
elif kw["type"] == "group" :
g = Group(group_name=kw["group_name"])
DBSession.add(g)
DBSession.flush()
redirect("/access/group_manage?id=%d" % g.group_id)
elif kw["type"] == "permission" :
p = Permission(permission_name=kw["permission_name"], description=kw["description"])
ag = DBSession.query(Group).filter_by(group_name="Admin").one()
ag.permissions.append(p)
DBSession.add(p)
DBSession.flush()
redirect("/access/permission_manage?id=%d" % p.permission_id)
else:
flash("No such type operation!")
redirect("/access/index")
@expose("gapproject.templates.access.user_manage")
def user_manage(self, **kw):
u = getOr404(User, kw["id"])
included = u.groups
excluded = DBSession.query(Group).filter(~Group.users.any(User.user_id == u.user_id)).order_by(Group.group_name)
return {
"widget" : user_update_form,
"values" : {"id" : u.user_id, "user_name" : u.user_name, "email_address" : u.email_address, "display_name" : u.display_name},
"included" : included,
"excluded" : excluded,
}
@expose()
def save_user(self, **kw):
u = getOr404(User, kw["id"])
if kw.get("user_name", None) : u.user_name = kw["user_name"]
if kw.get("password", None) : u.password = kw["password"]
if kw.get("display_name", None) : u.display_name = kw["display_name"]
if kw.get("email_address", None) : u.email_address = kw["email_address"]
if not kw["igs"] : u.groups = []
else : u.groups = DBSession.query(Group).filter(Group.group_id.in_(kw["igs"].split("|"))).all()
flash("Save the update successfully!")
redirect("/access/index")
@
@expose("gapproject.templates.access.test")
def test(self, **kw):
return {}
'''
| |
from django_nose.tools import (
assert_equal,
assert_false,
assert_not_in,
assert_raises,
assert_true
)
from mock import Mock, MagicMock, patch
from pontoon.base.models import Entity
from pontoon.base.tests import (
assert_attributes_equal,
TranslationFactory,
UserFactory,
)
from pontoon.base.utils import aware_datetime
from pontoon.sync.tests import FakeCheckoutTestCase
class ChangeSetTests(FakeCheckoutTestCase):
def test_execute_called_once(self):
"""Raise a RuntimeError if execute is called more than once."""
self.changeset.execute()
with assert_raises(RuntimeError):
self.changeset.execute()
def update_main_vcs_entity(self, **translation_changes):
for key, value in translation_changes.items():
setattr(self.main_db_translation, key, value)
self.main_db_translation.save()
self.changeset.update_vcs_entity(
self.translated_locale,
self.main_db_entity,
self.main_vcs_entity
)
self.changeset.execute()
def test_update_vcs_entity(self):
"""
Update the VCS translations with translations in the database.
"""
self.main_vcs_resource.save = Mock()
self.other_vcs_resource.save = Mock()
self.update_main_vcs_entity(string='New Translated String')
assert_equal(self.main_vcs_translation.strings, {None: 'New Translated String'})
# Ensure only resources that were updated are saved.
assert_true(self.main_vcs_resource.save.called)
assert_false(self.other_vcs_resource.save.called)
# Update the VCS translation with info about the last
# translation.
assert_equal(self.main_vcs_translation.last_updated, self.main_db_translation.date)
assert_equal(self.main_vcs_translation.last_translator, self.main_db_translation.user)
def test_update_vcs_entity_unapproved(self):
"""
Do not update VCS with unapproved translations. If no approved
translations exist, delete existing ones.
"""
self.update_main_vcs_entity(approved=False)
assert_equal(self.main_vcs_translation.strings, {})
def test_update_vcs_entity_fuzzy(self):
self.main_vcs_translation.fuzzy = False
self.update_main_vcs_entity(fuzzy=True)
assert_equal(self.main_vcs_translation.fuzzy, True)
def test_update_vcs_entity_not_fuzzy(self):
self.main_vcs_translation.fuzzy = True
self.update_main_vcs_entity(fuzzy=False)
assert_equal(self.main_vcs_translation.fuzzy, False)
def test_update_vcs_last_translation_no_translations(self):
"""
If there are no translations in the database, do not set the
last_updated and last_translator fields on the VCS translation.
"""
self.main_db_translation.delete()
self.changeset.update_vcs_entity(
self.translated_locale,
self.main_db_entity,
self.main_vcs_entity
)
self.changeset.execute()
assert_equal(self.main_vcs_translation.last_updated, None)
assert_equal(self.main_vcs_translation.last_translator, None)
def test_update_vcs_entity_user(self):
"""Track translation authors for use in the commit message."""
user = UserFactory.create()
self.update_main_vcs_entity(user=user)
assert_equal(self.changeset.commit_authors_per_locale['translated-locale'], [user])
def test_create_db(self):
"""Create new entity in the database."""
self.main_db_entity.delete()
self.main_vcs_entity.key = 'Source String'
self.main_vcs_entity.comments = ['first comment', 'second']
self.main_vcs_entity.order = 7
self.main_vcs_translation.fuzzy = False
self.main_vcs_entity.string_plural = 'plural string'
self.main_vcs_entity.source = ['foo.py:87']
self.changeset.create_db_entity(self.main_vcs_entity)
self.changeset.execute()
new_entity = Entity.objects.get(
resource__path=self.main_vcs_resource.path,
string=self.main_vcs_entity.string
)
assert_attributes_equal(
new_entity,
resource=self.main_db_resource,
string='Source String',
key='Source String',
comment='first comment\nsecond',
order=7,
string_plural='plural string',
source=['foo.py:87'],
)
new_translation = new_entity.translation_set.all()[0]
assert_attributes_equal(
new_translation,
locale=self.translated_locale,
string='Translated String',
plural_form=None,
approved=True,
approved_date=aware_datetime(1970, 1, 1),
fuzzy=False
)
def update_main_db_entity(self):
self.changeset.update_db_entity(
self.translated_locale,
self.main_db_entity,
self.main_vcs_entity
)
self.changeset.execute()
def test_update_db_existing_translation(self):
"""
Update an existing translation in the DB with changes from VCS.
"""
# Set up DB and VCS to differ and require an update.
self.main_db_translation.fuzzy = True
self.main_db_translation.extra = {}
self.main_db_translation.save()
self.main_vcs_entity.key = 'Source String'
self.main_vcs_entity.comments = ['first comment', 'second']
self.main_vcs_entity.order = 7
self.main_vcs_entity.string_plural = 'plural string'
self.main_vcs_entity.source = ['foo.py:87']
self.main_vcs_translation.fuzzy = False
# The test translation is from a langfile so we can use tags
# for testing extra.
self.main_vcs_translation.tags = set(['ok'])
self.update_main_db_entity()
self.main_db_entity.refresh_from_db()
assert_attributes_equal(
self.main_db_entity,
key='Source String',
comment='first comment\nsecond',
order=7,
string_plural='plural string',
source=['foo.py:87'],
)
self.main_db_translation.refresh_from_db()
assert_attributes_equal(
self.main_db_translation,
fuzzy=False,
extra={'tags': ['ok']}
)
def test_update_db_clean_entity_translation(self):
"""
If no changes have been made to the database entity or the
translation, do not bother updating them in the database.
"""
self.update_main_db_entity()
# TODO: It'd be nice if we didn't rely on internal changeset
# attributes to check this, but not vital.
assert_not_in(self.main_db_entity, self.changeset.entities_to_update)
assert_not_in(self.main_db_translation, self.changeset.translations_to_update)
def test_update_db_approve_translation(self):
"""
Approve any un-approved translations that have counterparts in
VCS.
"""
self.main_db_translation.approved = False
self.main_db_translation.approved_date = None
self.main_db_translation.save()
self.update_main_db_entity()
self.main_db_translation.refresh_from_db()
assert_attributes_equal(
self.main_db_translation,
approved=True,
approved_date=aware_datetime(1970, 1, 1)
)
def test_update_db_dont_approve_fuzzy(self):
"""
Do not approve un-approved translations that have non-fuzzy
counterparts in VCS.
"""
self.main_db_translation.approved = False
self.main_db_translation.approved_date = None
self.main_db_translation.save()
self.main_vcs_translation.fuzzy = True
self.update_main_db_entity()
self.main_db_translation.refresh_from_db()
assert_attributes_equal(
self.main_db_translation,
approved=False,
approved_date=None
)
def test_update_db_new_translation(self):
"""
If a matching translation does not exist in the database, create a new
one.
"""
self.main_db_translation.delete()
self.update_main_db_entity()
translation = self.main_db_entity.translation_set.all()[0]
assert_attributes_equal(
translation,
locale=self.translated_locale,
string='Translated String',
plural_form=None,
approved=True,
approved_date=aware_datetime(1970, 1, 1),
fuzzy=False,
extra={'tags': []}
)
def test_update_db_unfuzzy_existing(self):
"""
Any existing fuzzy translations get unfuzzied.
"""
self.main_db_translation.fuzzy = True
self.main_db_translation.save()
self.main_vcs_translation.strings[None] = 'New Translated String'
self.update_main_db_entity()
self.main_db_translation.refresh_from_db()
assert_attributes_equal(
self.main_db_translation,
fuzzy=False
)
def test_update_db_unapprove_existing(self):
"""
Any existing translations that don't match anything in VCS get
unapproved, unless they were created after self.now.
"""
self.main_db_translation.approved = True
self.main_db_translation.approved_date = aware_datetime(1970, 1, 1)
self.main_db_translation.approved_user = UserFactory.create()
self.main_db_translation.save()
self.main_vcs_translation.strings[None] = 'New Translated String'
created_after_translation = TranslationFactory.create(
entity=self.main_db_entity,
approved=True,
approved_date=aware_datetime(1970, 1, 3)
)
self.update_main_db_entity()
self.main_db_translation.refresh_from_db()
assert_attributes_equal(
self.main_db_translation,
approved=False,
approved_user=None,
approved_date=None
)
created_after_translation.refresh_from_db()
assert_attributes_equal(
created_after_translation,
approved=True,
approved_date=aware_datetime(1970, 1, 3)
)
def test_update_db_unapprove_clean(self):
"""
If translations that are set to be unapproved were already unapproved,
don't bother updating them.
"""
self.main_db_translation.approved = False
self.main_db_translation.approved_date = None
self.main_db_translation.approved_user = None
self.main_db_translation.save()
self.main_vcs_translation.strings[None] = 'New Translated String'
self.update_main_db_entity()
self.main_db_translation.refresh_from_db()
assert_not_in(self.main_db_translation, self.changeset.translations_to_update)
def test_update_db_reject_approved(self):
"""
When a translation is submitted through VCS, reject any existing approved translations.
"""
self.main_db_translation.approved = True
self.main_db_translation.approved_date = aware_datetime(1970, 1, 1)
self.main_db_translation.approved_user = UserFactory.create()
self.main_db_translation.rejected = False
self.main_db_translation.save()
self.main_vcs_translation.strings[None] = 'New Translated String'
self.update_main_db_entity()
self.main_db_translation.refresh_from_db()
assert_attributes_equal(
self.main_db_translation,
rejected=True,
)
def test_update_db_reject_approved_skip_fuzzy(self):
"""
When a translation is submitted through VCS, reject any existing approved translations.
Unless the same translation is submitted and only made fuzzy.
"""
self.main_db_translation.approved = True
self.main_db_translation.approved_date = aware_datetime(1970, 1, 1)
self.main_db_translation.approved_user = UserFactory.create()
self.main_db_translation.rejected = False
self.main_db_translation.save()
self.main_vcs_translation.strings[None] = self.main_db_translation.string
self.main_vcs_translation.fuzzy = True
self.update_main_db_entity()
self.main_db_translation.refresh_from_db()
assert_attributes_equal(
self.main_db_translation,
rejected=False,
)
def test_obsolete_db(self):
self.changeset.obsolete_db_entity(self.main_db_entity)
self.changeset.execute()
self.main_db_entity.refresh_from_db()
assert_true(self.main_db_entity.obsolete)
def test_no_new_translations(self):
"""
Don't change any resource if there aren't any new translations.
"""
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
approved=True,
date=aware_datetime(2015, 1, 1)
)
with patch.object(
self.main_db_entity, 'has_changed', return_value=False
) as mock_has_changed:
resource_file = MagicMock()
self.changeset.update_vcs_entity(
self.translated_locale, self.main_db_entity, MagicMock()
)
self.changeset.vcs_project.resources = {
self.main_db_entity.resource.path: resource_file
}
self.changeset.execute_update_vcs()
assert mock_has_changed.called
assert not resource_file.save.called
def test_changed_resources_sync(self):
"""
Synchronization should modify resource files if there
are changed translations.
"""
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
approved=True,
date=aware_datetime(2015, 1, 1)
)
resource_file = MagicMock()
self.changeset.vcs_project.resources = {
self.main_db_entity.resource.path: resource_file
}
with patch.object(
self.main_db_entity, 'has_changed', return_value=True
) as mock_has_changed:
self.changeset.update_vcs_entity(
self.translated_locale, self.main_db_entity, MagicMock()
)
self.changeset.execute_update_vcs()
assert mock_has_changed.called
assert resource_file.save.called
def test_unchanged_resources_sync(self):
"""
Synchronization shouldn't modify resources if their
entities weren't changed.
"""
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
approved=True,
date=aware_datetime(2015, 1, 1)
)
resource_file = MagicMock()
self.changeset.vcs_project.resources = {
self.main_db_entity.resource.path: resource_file
}
with patch.object(
self.main_db_entity, 'has_changed', return_value=False
) as mock_has_changed:
self.changeset.update_vcs_entity(
self.translated_locale, self.main_db_entity, MagicMock()
)
self.changeset.execute_update_vcs()
assert mock_has_changed.called
assert len(resource_file.save.mock_calls) == 0
class AuthorsTests(FakeCheckoutTestCase):
"""
Tests authors of translations passed to the final commit message.
"""
def test_multiple_authors(self):
"""
Commit message should include authors from translations of separate
entities.
"""
first_author, second_author = UserFactory.create_batch(2)
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
user=first_author,
approved=True
)
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
approved=False
)
TranslationFactory.create(
locale=self.translated_locale,
entity=self.other_db_entity,
user=second_author,
approved=True
)
TranslationFactory.create(
locale=self.translated_locale,
entity=self.other_db_entity,
approved=False
)
self.changeset.update_vcs_entity(self.translated_locale, self.main_db_entity, MagicMock())
self.changeset.update_vcs_entity(self.translated_locale, self.other_db_entity, MagicMock())
self.changeset.execute_update_vcs()
assert_equal(
self.changeset.commit_authors_per_locale[self.translated_locale.code],
[first_author, second_author]
)
def test_plural_translations(self):
"""
If entity has some plural translations and approved translations their authors
should be included in commit message.
"""
first_author, second_author, third_author = UserFactory.create_batch(3)
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
user=first_author,
approved=True
)
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
user=third_author,
approved=True,
plural_form=1
)
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
user=second_author,
approved=False
)
self.changeset.update_vcs_entity(self.translated_locale, self.main_db_entity, MagicMock())
self.changeset.execute_update_vcs()
assert_equal(
set(self.changeset.commit_authors_per_locale[self.translated_locale.code]),
{first_author, third_author}
)
def test_multiple_translations(self):
"""
If there are multiple translations to the same locale, only authors of
the final approved version should be returned.
"""
first_author, second_author = UserFactory.create_batch(2)
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
user=first_author,
approved=True
)
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
user=second_author,
approved=False
)
self.changeset.update_vcs_entity(self.translated_locale, self.main_db_entity, MagicMock())
self.changeset.execute_update_vcs()
assert_equal(
self.changeset.commit_authors_per_locale[self.translated_locale.code],
[first_author]
)
def test_no_translations(self):
"""
We don't attribute anyone if there aren't any new translations.
"""
TranslationFactory.create(
locale=self.translated_locale,
entity=self.main_db_entity,
approved=True,
date=aware_datetime(2015, 1, 1)
)
with patch.object(self.main_db_entity, 'has_changed', return_value=False):
self.changeset.update_vcs_entity(
self.translated_locale, self.main_db_entity, MagicMock()
)
self.changeset.execute_update_vcs()
assert_equal(self.changeset.commit_authors_per_locale[self.translated_locale.code], [])
| |
"""
Support for Hue lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.hue/
"""
import json
import logging
import os
import random
import socket
from datetime import timedelta
from urllib.parse import urlparse
import voluptuous as vol
import homeassistant.util as util
import homeassistant.util.color as color_util
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_RGB_COLOR,
ATTR_TRANSITION, ATTR_XY_COLOR, EFFECT_COLORLOOP, EFFECT_RANDOM,
FLASH_LONG, FLASH_SHORT, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT, SUPPORT_FLASH, SUPPORT_RGB_COLOR, SUPPORT_TRANSITION,
SUPPORT_XY_COLOR, Light, PLATFORM_SCHEMA)
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (CONF_FILENAME, CONF_HOST, DEVICE_DEFAULT_NAME)
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['phue==0.9']
# Track previously setup bridges
_CONFIGURED_BRIDGES = {}
# Map ip to request id for configuring
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
CONF_ALLOW_UNREACHABLE = 'allow_unreachable'
DEFAULT_ALLOW_UNREACHABLE = False
DOMAIN = "light"
SERVICE_HUE_SCENE = "hue_activate_scene"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
PHUE_CONFIG_FILE = 'phue.conf'
SUPPORT_HUE = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_EFFECT |
SUPPORT_FLASH | SUPPORT_RGB_COLOR | SUPPORT_TRANSITION |
SUPPORT_XY_COLOR)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_ALLOW_UNREACHABLE): cv.boolean,
vol.Optional(CONF_FILENAME): cv.string,
})
ATTR_GROUP_NAME = "group_name"
ATTR_SCENE_NAME = "scene_name"
SCENE_SCHEMA = vol.Schema({
vol.Required(ATTR_GROUP_NAME): cv.string,
vol.Required(ATTR_SCENE_NAME): cv.string,
})
def _find_host_from_config(hass, filename=PHUE_CONFIG_FILE):
"""Attempt to detect host based on existing configuration."""
path = hass.config.path(filename)
if not os.path.isfile(path):
return None
try:
with open(path) as inp:
return next(json.loads(''.join(inp)).keys().__iter__())
except (ValueError, AttributeError, StopIteration):
# ValueError if can't parse as JSON
# AttributeError if JSON value is not a dict
# StopIteration if no keys
return None
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Hue lights."""
# Default needed in case of discovery
filename = config.get(CONF_FILENAME, PHUE_CONFIG_FILE)
allow_unreachable = config.get(CONF_ALLOW_UNREACHABLE,
DEFAULT_ALLOW_UNREACHABLE)
if discovery_info is not None:
host = urlparse(discovery_info[1]).hostname
else:
host = config.get(CONF_HOST, None)
if host is None:
host = _find_host_from_config(hass, filename)
if host is None:
_LOGGER.error('No host found in configuration')
return False
# Only act if we are not already configuring this host
if host in _CONFIGURING or \
socket.gethostbyname(host) in _CONFIGURED_BRIDGES:
return
setup_bridge(host, hass, add_devices, filename, allow_unreachable)
def setup_bridge(host, hass, add_devices, filename, allow_unreachable):
"""Setup a phue bridge based on host parameter."""
import phue
try:
bridge = phue.Bridge(
host,
config_file_path=hass.config.path(filename))
except ConnectionRefusedError: # Wrong host was given
_LOGGER.error("Error connecting to the Hue bridge at %s", host)
return
except phue.PhueRegistrationException:
_LOGGER.warning("Connected to Hue at %s but not registered.", host)
request_configuration(host, hass, add_devices, filename,
allow_unreachable)
return
# If we came here and configuring this host, mark as done
if host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = get_component('configurator')
configurator.request_done(request_id)
lights = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_lights():
"""Update the Hue light objects with latest info from the bridge."""
try:
api = bridge.get_api()
except socket.error:
# socket.error when we cannot reach Hue
_LOGGER.exception("Cannot reach the bridge")
return
api_states = api.get('lights')
if not isinstance(api_states, dict):
_LOGGER.error("Got unexpected result from Hue API")
return
new_lights = []
api_name = api.get('config').get('name')
if api_name in ('RaspBee-GW', 'deCONZ-GW'):
bridge_type = 'deconz'
else:
bridge_type = 'hue'
for light_id, info in api_states.items():
if light_id not in lights:
lights[light_id] = HueLight(int(light_id), info,
bridge, update_lights,
bridge_type, allow_unreachable)
new_lights.append(lights[light_id])
else:
lights[light_id].info = info
if new_lights:
add_devices(new_lights)
_CONFIGURED_BRIDGES[socket.gethostbyname(host)] = True
# create a service for calling run_scene directly on the bridge,
# used to simplify automation rules.
def hue_activate_scene(call):
"""Service to call directly directly into bridge to set scenes."""
group_name = call.data[ATTR_GROUP_NAME]
scene_name = call.data[ATTR_SCENE_NAME]
bridge.run_scene(group_name, scene_name)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_HUE_SCENE, hue_activate_scene,
descriptions.get(SERVICE_HUE_SCENE),
schema=SCENE_SCHEMA)
update_lights()
def request_configuration(host, hass, add_devices, filename,
allow_unreachable):
"""Request configuration steps from the user."""
configurator = get_component('configurator')
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[host], "Failed to register, please try again.")
return
# pylint: disable=unused-argument
def hue_configuration_callback(data):
"""The actions to do when our configuration callback is called."""
setup_bridge(host, hass, add_devices, filename, allow_unreachable)
_CONFIGURING[host] = configurator.request_config(
hass, "Philips Hue", hue_configuration_callback,
description=("Press the button on the bridge to register Philips Hue "
"with Home Assistant."),
entity_picture="/static/images/logo_philips_hue.png",
description_image="/static/images/config_philips_hue.jpg",
submit_caption="I have pressed the button"
)
class HueLight(Light):
"""Representation of a Hue light."""
def __init__(self, light_id, info, bridge, update_lights,
bridge_type, allow_unreachable):
"""Initialize the light."""
self.light_id = light_id
self.info = info
self.bridge = bridge
self.update_lights = update_lights
self.bridge_type = bridge_type
self.allow_unreachable = allow_unreachable
@property
def unique_id(self):
"""Return the ID of this Hue light."""
return "{}.{}".format(
self.__class__, self.info.get('uniqueid', self.name))
@property
def name(self):
"""Return the mame of the Hue light."""
return self.info.get('name', DEVICE_DEFAULT_NAME)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self.info['state'].get('bri')
@property
def xy_color(self):
"""Return the XY color value."""
return self.info['state'].get('xy')
@property
def color_temp(self):
"""Return the CT color value."""
return self.info['state'].get('ct')
@property
def is_on(self):
"""Return true if device is on."""
self.update_lights()
if self.allow_unreachable:
return self.info['state']['on']
else:
return self.info['state']['reachable'] and self.info['state']['on']
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_HUE
def turn_on(self, **kwargs):
"""Turn the specified or all lights on."""
command = {'on': True}
if ATTR_TRANSITION in kwargs:
command['transitiontime'] = kwargs[ATTR_TRANSITION] * 10
if ATTR_XY_COLOR in kwargs:
command['xy'] = kwargs[ATTR_XY_COLOR]
elif ATTR_RGB_COLOR in kwargs:
xyb = color_util.color_RGB_to_xy(
*(int(val) for val in kwargs[ATTR_RGB_COLOR]))
command['xy'] = xyb[0], xyb[1]
command['bri'] = xyb[2]
if ATTR_BRIGHTNESS in kwargs:
command['bri'] = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
command['ct'] = kwargs[ATTR_COLOR_TEMP]
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command['alert'] = 'lselect'
del command['on']
elif flash == FLASH_SHORT:
command['alert'] = 'select'
del command['on']
elif self.bridge_type == 'hue':
command['alert'] = 'none'
effect = kwargs.get(ATTR_EFFECT)
if effect == EFFECT_COLORLOOP:
command['effect'] = 'colorloop'
elif effect == EFFECT_RANDOM:
command['hue'] = random.randrange(0, 65535)
command['sat'] = random.randrange(150, 254)
elif self.bridge_type == 'hue':
command['effect'] = 'none'
self.bridge.set_light(self.light_id, command)
def turn_off(self, **kwargs):
"""Turn the specified or all lights off."""
command = {'on': False}
if ATTR_TRANSITION in kwargs:
# Transition time is in 1/10th seconds and cannot exceed
# 900 seconds.
command['transitiontime'] = min(9000, kwargs[ATTR_TRANSITION] * 10)
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command['alert'] = 'lselect'
del command['on']
elif flash == FLASH_SHORT:
command['alert'] = 'select'
del command['on']
elif self.bridge_type == 'hue':
command['alert'] = 'none'
self.bridge.set_light(self.light_id, command)
def update(self):
"""Synchronize state with bridge."""
self.update_lights(no_throttle=True)
| |
""" This is the utils module that collects convenience functions and code that are
useful for charts ecosystem.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import itertools
import json
from collections import OrderedDict, defaultdict
from copy import copy
from math import cos, sin
from colorsys import hsv_to_rgb
from pandas.io.json import json_normalize
import pandas as pd
import numpy as np
from six import iteritems
from ..models.glyphs import (
Asterisk, Circle, CircleCross, CircleX, Cross, Diamond, DiamondCross,
InvertedTriangle, Square, SquareCross, SquareX, Triangle, X)
from ..models.sources import ColumnDataSource
from ..plotting.helpers import DEFAULT_PALETTE
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
DEFAULT_COLUMN_NAMES = 'abcdefghijklmnopqrstuvwxyz'
# map between distinct set of marker names and marker classes
marker_types = OrderedDict(
[
("circle", Circle),
("square", Square),
("triangle", Triangle),
("diamond", Diamond),
("inverted_triangle", InvertedTriangle),
("asterisk", Asterisk),
("cross", Cross),
("x", X),
("circle_cross", CircleCross),
("circle_x", CircleX),
("square_x", SquareX),
("square_cross", SquareCross),
("diamond_cross", DiamondCross),
]
)
def take(n, iterable):
"""Return first n items of the iterable as a list."""
return itertools.islice(iterable, n)
def cycle_colors(chunk, palette=DEFAULT_PALETTE):
""" Build a color list just cycling through a given palette.
Args:
chuck (seq): the chunk of elements to generate the color list
palette (seq[color]) : a palette of colors to cycle through
Returns:
colors
"""
colors = []
g = itertools.cycle(palette)
for i in range(len(chunk)):
colors.append(next(g))
return colors
def polar_to_cartesian(r, start_angles, end_angles):
"""Translate polar coordinates to cartesian.
Args:
r (float): radial coordinate
start_angles (list(float)): list of start angles
end_angles (list(float)): list of end_angles angles
Returns:
x, y points
"""
cartesian = lambda r, alpha: (r*cos(alpha), r*sin(alpha))
points = []
for r, start, end in zip(r, start_angles, end_angles):
points.append(cartesian(r, (end + start)/2))
return zip(*points)
def ordered_set(iterable):
"""Creates an ordered list from strings, tuples or other hashable items.
Returns:
list of unique and ordered values
"""
mmap = {}
ord_set = []
for item in iterable:
# Save unique items in input order
if item not in mmap:
mmap[item] = 1
ord_set.append(item)
return ord_set
def collect_attribute_columns(**specs):
"""Collect list of unique and ordered columns across attribute specifications.
Args:
specs (dict): attribute name, :class:`AttrSpec` mapping
Returns:
list of columns in order as they appear in attr spec and without duplicates
"""
# filter down to only the specs with columns assigned to them
selected_specs = {spec_name: spec for spec_name, spec in iteritems(specs)
if spec.columns}
# all columns used in selections of attribute specifications
spec_cols = list(itertools.chain.from_iterable([spec.columns
for spec in selected_specs.values()]))
# return a list of unique columns in order as they appear
return ordered_set(spec_cols)
def df_from_json(data, rename=True, **kwargs):
"""Attempt to produce :class:`pandas.DataFrame` from hierarchical json-like data.
This utility wraps the :func:`pandas.io.json.json_normalize` function and by
default will try to rename the columns produced by it.
Args:
data (str or list(dict) or dict(list(dict))): a path to json data or loaded json
data. This function will look into the data and try to parse it correctly
based on common structures of json data.
rename (bool, optional: try to rename column hierarchy to the base name. So
medals.bronze would end up being bronze. This will only rename to the base
column name if the name is unique, and only if the pandas json parser
produced columns that have a '.' in the column name.
**kwargs: any kwarg supported by :func:`pandas.io.json.json_normalize`
Returns:
a parsed pandas dataframe from the json data, unless the path does not exist,
the input data is nether a list or dict. In that case, it will return `None`.
"""
parsed = None
if isinstance(data, str):
with open(data) as data_file:
data = json.load(data_file)
if isinstance(data, list):
parsed = json_normalize(data)
elif isinstance(data, dict):
for k, v in iteritems(data):
if isinstance(v, list):
parsed = json_normalize(v)
# try to rename the columns if configured to
if rename and parsed is not None:
parsed = denormalize_column_names(parsed)
return parsed
def denormalize_column_names(parsed_data):
"""Attempts to remove the column hierarchy if possible when parsing from json.
Args:
parsed_data (:class:`pandas.DataFrame`): df parsed from json data using
:func:`pandas.io.json.json_normalize`.
Returns:
dataframe with updated column names
"""
cols = parsed_data.columns.tolist()
base_columns = defaultdict(list)
for col in cols:
if '.' in col:
# get last split of '.' to get primary column name
base_columns[col].append(col.split('.')[-1])
rename = {}
# only rename columns if they don't overlap another base column name
for col, new_cols in iteritems(base_columns):
if len(new_cols) == 1:
rename[col] = new_cols[0]
if len(list(rename.keys())) > 0:
return parsed_data.rename(columns=rename)
else:
return parsed_data
def get_index(data):
"""A generic function to return the index from values.
Should be used to abstract away from specific types of data.
Args:
data (:class:`pandas.Series`, :class:`pandas.DataFrame`): a data source to
return or derive an index for.
Returns:
a pandas index
"""
return data.index
def get_unity(data, value=1):
"""Returns a column of ones with the same length as input data.
Useful for charts that need this special data type when no input is provided
for one of the dimensions.
Args:
data (:class:`pandas.DataFrame`): the data to add constant column to.
value (str, int, object): a valid value for a dataframe, used as constant value
for each row.
Returns:
a copy of `data` with a column of '_charts_ones' added to it
"""
data_copy = data.copy()
data_copy['_charts_ones'] = value
return data_copy['_charts_ones']
special_columns = {'index': get_index,
'unity': get_unity}
def title_from_columns(cols):
"""Creates standard string representation of columns.
If cols is None, then None is returned.
"""
if cols is not None:
cols_title = copy(cols)
if not isinstance(cols_title, list):
cols_title = [cols_title]
return str(', '.join(cols_title).title()).title()
else:
return None
def gen_column_names(n):
"""Produces list of unique column names of length n.
Args:
n (int): count of column names to provide
Returns:
list(str) of length `n`
"""
col_names = list(DEFAULT_COLUMN_NAMES)
# a-z
if n < len(col_names):
return list(take(n, col_names))
# a-z and aa-zz (500+ columns)
else:
n_left = n - len(col_names)
labels = [''.join(item) for item in
take(n_left, itertools.product(DEFAULT_COLUMN_NAMES,
DEFAULT_COLUMN_NAMES))]
col_names.extend(labels)
return col_names
def generate_patch_base(x, y, base=0.0):
""" Adds base to the start and end of y, and extends x to match the length.
Args:
x (`pandas.Series`): x values for the area chart
y (`pandas.Series`): y values for the area chart
base (float): the flat side of the area glyph
Returns:
x, y: tuple containing padded x and y as `numpy.ndarray`
"""
x = x.values
y = y.values
# add base of area by starting and ending at base
y0 = np.insert(y, 0, base)
y0 = np.append(y0, base)
# make sure y is same length as x
x0 = np.insert(x, 0, x[0])
x0 = np.append(x0, x0[-1])
return x0, y0
class ChartHelp(object):
"""Builds, formats, and displays help for the chart function"""
def __init__(self, *builders):
self.builders = builders
def __repr__(self):
help_str = ''
for builder in self.builders:
help_str += builder.generate_help()
return help_str
def help(*builders):
"""Adds a ChartHelp object to the help attribute of the function."""
def add_help(f):
f.help = ChartHelp(*builders)
return f
return add_help
def derive_aggregation(dim_cols, agg_col, agg):
"""Produces consistent aggregation spec from optional column specification.
This utility provides some consistency to the flexible inputs that can be provided
to charts, such as not specifying dimensions to aggregate on, not specifying an
aggregation, and/or not specifying a column to aggregate on.
"""
if dim_cols == 'index' or agg_col == 'index' or dim_cols is None:
agg = None
agg_col = None
elif agg_col is None:
if isinstance(dim_cols, list):
agg_col = dim_cols[0]
else:
agg_col = dim_cols
agg = 'count'
return agg_col, agg
def build_wedge_source(df, cat_cols, agg_col=None, agg='mean', level_width=0.5,
level_spacing=0.01):
df = cat_to_polar(df, cat_cols, agg_col, agg, level_width)
add_wedge_spacing(df, level_spacing)
df['centers'] = df['outers'] - (df['outers'] - df['inners']) / 2.0
# scale level 0 text position towards outside of wedge if center is not a donut
if not isinstance(level_spacing, list):
df.ix[df['level'] == 0, 'centers'] *= 1.5
return df
def shift_series(s):
"""Produces a copy of the provided series shifted by one, starting with 0."""
s0 = s.copy()
s0 = s0.shift(1)
s0.iloc[0] = 0.0
return s0
def _create_start_end(levels):
"""Produces wedge start and end values from list of dataframes for each level.
Returns:
start, end: two series describing starting and ending angles in radians
"""
rads = levels[0].copy()
for level in levels[1:]:
rads = rads * level
rads *= (2 * np.pi)
end = rads.cumsum()
start = shift_series(end)
return start, end
def cat_to_polar(df, cat_cols, agg_col=None, agg='mean', level_width=0.5):
"""Return start and end angles for each index in series.
Returns:
df: a `pandas.DataFrame` describing each aggregated wedge
"""
agg_col, agg = derive_aggregation(cat_cols, agg_col, agg)
def calc_span_proportion(data):
"""How much of the circle should be assigned."""
return data/data.sum()
# group by each level
levels_cols = []
starts = []
ends = []
levels = []
agg_values = []
for i in range(0, len(cat_cols)):
level_cols = cat_cols[:i+1]
if agg_col is not None and agg is not None:
gb = getattr(getattr(df.groupby(level_cols), agg_col), agg)()
else:
cols = [col for col in df.columns if col != 'index']
gb = df[cols[0]]
# lower than top level, need to groupby next to lowest level
group_level = i - 1
if group_level >= 0:
levels.append(gb.groupby(level=group_level).apply(calc_span_proportion))
else:
levels.append(calc_span_proportion(gb))
start_ends = _create_start_end(levels)
starts.append(start_ends[0])
ends.append(start_ends[1])
agg_values.append(gb)
# build array of constant value representing the level
this_level = start_ends[0].copy()
this_level[:] = i
levels_cols.append(this_level)
df = pd.DataFrame({'start': pd.concat(starts),
'end': pd.concat(ends),
'level': pd.concat(levels_cols),
'values': pd.concat(agg_values)})
if len(cat_cols) > 1:
idx = df.index.copy().values
for i, val in enumerate(df.index):
if not isinstance(val, tuple):
val = (val, '')
idx[i] = val
df.index = pd.MultiIndex.from_tuples(idx)
df.index.names = cat_cols
# sort the index to avoid performance warning (might alter chart)
df.sortlevel(inplace=True)
inners, outers = calc_wedge_bounds(df['level'], level_width)
df['inners'] = inners
df['outers'] = outers
return df
def add_text_label_from_index(df):
"""Add column for text label, based on level-oriented index.
This is used for the donut chart, where there is a hierarchy of categories,
which are separated and encoded into the index of the data. If there are
3 levels (columns) used, then a 3 level multi-index is used. Level 0 will
have each of the values of the first column, then NaNs for the next two. The
last non-empty level is used for the label of that row.
"""
text = []
for idx in df.index:
row_text = ''
if isinstance(idx, tuple):
# the lowest, non-empty index is the label
for lev in reversed(idx):
if lev is not '' and row_text == '':
row_text = str(lev)
else:
row_text = str(idx)
text.append(row_text)
df['text'] = text
return df
def build_wedge_text_source(df, start_col='start', end_col='end',
center_col='centers'):
"""Generate `ColumnDataSource` for text representation of donut levels.
Returns a data source with 3 columns, 'text', 'x', and 'y', where 'text'
is a derived label from the `~pandas.MultiIndex` provided in `df`.
"""
x, y = polar_to_cartesian(df[center_col], df[start_col], df[end_col])
# extract text from the levels in index
df = add_text_label_from_index(df)
df['text_angle'] = calc_text_angle(df['start'], df['end'])
df.ix[df.level == 0, 'text_angle'] = 0.0
text_source = ColumnDataSource(dict(text=df['text'], x=x, y=y,
text_angle=df['text_angle']))
return text_source
def calc_text_angle(start, end):
"""Produce a column of text angle values based on the bounds of the wedge."""
text_angle = (start + end) / 2.0
shift_angles = ((text_angle > (np.pi / 2)) & (text_angle < (3 * np.pi / 2)))
text_angle[shift_angles] = text_angle[shift_angles] + np.pi
return text_angle
def calc_wedge_bounds(levels, level_width):
"""Calculate inner and outer radius bounds of the donut wedge based on levels."""
# add columns for the inner and outer size of the wedge glyph
inners = levels * level_width
outers = inners + level_width
return inners, outers
def add_wedge_spacing(df, spacing):
"""Add spacing to the `inners` column of the provided data based on level."""
# add spacing based on input settings
if isinstance(spacing, list):
# add spacing for each level given in order received
for i, space in enumerate(spacing):
df.ix[df['level'] == i, 'inners'] += space
else:
df.ix[df['level'] > 0, 'inners'] += spacing
def build_hover_tooltips(hover_spec=None, chart_cols=None):
"""Produce tooltips for column dimensions used in chart configuration.
Provides convenience for producing tooltips for data with labeled columns. If you
had two bars in a bar chart, one for female and one for male, you may also want to
have the tooltip say "Sex: female" and "Sex: male" when hovering.
Args:
hover_spec (bool, list(tuple(str, str), list(str), optional): either can be a
valid input to the `HoverTool` tooltips kwarg, or a boolean `True` to have
all dimensions specified in chart be added to the tooltip, or a list of
columns that you do want to be included in the tooltips.
chart_cols:
Returns:
list(tuple(str, str)): list of tooltips
"""
if isinstance(hover_spec, bool):
tooltips = [(col, '@' + col) for col in chart_cols]
elif isinstance(hover_spec[0], tuple):
tooltips = hover_spec
else:
tooltips = [(col, '@' + col) for col in hover_spec]
return tooltips
def build_agg_tooltip(hover_text=None, agg_text=None, aggregated_col=None):
"""Produce a consistent tooltip based on available chart configuration.
Args:
hover_text (str, optional): the desired label for the value to be shown in the
tooltip
agg_text (str, optional): any aggregation text used for the chart
aggregated_col (str, optional): any column name used for aggregation
Returns:
tuple(str, str): a single tooltip
"""
if hover_text is None:
if agg_text is None:
if isinstance(aggregated_col, str):
hover_text = aggregated_col
else:
hover_text = 'value'
else:
hover_text = agg_text
if isinstance(aggregated_col, str):
hover_text = '%s of %s' % (hover_text, aggregated_col)
return hover_text.title(), "@values"
def label_from_index_dict(chart_index, include_cols=False):
"""
Args:
chart_index (dict(str, any) or str or None): identifier for the data group,
representing either the value of a column (str), no grouping (None), or a dict
where each key represents a column, and the value is the unique value.
Returns:
str: a derived label representing the chart index value
"""
if isinstance(chart_index, str):
return chart_index
elif chart_index is None:
return 'None'
elif isinstance(chart_index, dict):
if include_cols:
label = ', '.join(['%s=%s' % (col, val) for col, val in iteritems(
chart_index)])
else:
label = tuple(chart_index.values())
if len(label) == 1:
label = label[0]
return label
else:
raise ValueError('chart_index type is not recognized, \
received %s' % type(chart_index))
def comp_glyphs_to_df(*comp_glyphs):
dfs = [glyph.df for glyph in comp_glyphs]
return pd.concat(dfs)
def color_in_equal_space(hue, saturation=0.55, value=2.3):
"""
Args:
hue (int or double): a numerical value that you want to assign a color
Returns:
str: hexadecimal color value to a given number
"""
golden_ratio = (1 + 5 ** 0.5) / 2
hue += golden_ratio
hue %= 1
return '#{:02X}{:02X}{:02X}'.format(*tuple(int(a*100) for a in hsv_to_rgb(hue, saturation, value)))
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream.state_backend import (_from_j_state_backend, CustomStateBackend,
MemoryStateBackend, FsStateBackend,
RocksDBStateBackend, PredefinedOptions,
EmbeddedRocksDBStateBackend)
from pyflink.java_gateway import get_gateway
from pyflink.pyflink_gateway_server import on_windows
from pyflink.testing.test_case_utils import PyFlinkTestCase
from pyflink.util.java_utils import load_java_class
class MemoryStateBackendTests(PyFlinkTestCase):
def test_constant(self):
gateway = get_gateway()
JMemoryStateBackend = gateway.jvm.org.apache.flink.runtime.state.memory \
.MemoryStateBackend
self.assertEqual(MemoryStateBackend.DEFAULT_MAX_STATE_SIZE,
JMemoryStateBackend.DEFAULT_MAX_STATE_SIZE)
def test_create_memory_state_backend(self):
self.assertIsNotNone(MemoryStateBackend("file://var/checkpoints/"))
self.assertIsNotNone(MemoryStateBackend("file://var/checkpoints/",
"file://var/savepoints/"))
self.assertIsNotNone(MemoryStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 10000000))
self.assertIsNotNone(MemoryStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 10000000, True))
self.assertIsNotNone(MemoryStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 10000000, False))
def test_get_max_state_size(self):
state_backend = MemoryStateBackend()
self.assertEqual(state_backend.get_max_state_size(),
MemoryStateBackend.DEFAULT_MAX_STATE_SIZE)
state_backend = MemoryStateBackend(max_state_size=50000)
self.assertEqual(state_backend.get_max_state_size(), 50000)
class FsStateBackendTests(PyFlinkTestCase):
def test_create_fs_state_backend(self):
self.assertIsNotNone(FsStateBackend("file://var/checkpoints/"))
self.assertIsNotNone(FsStateBackend("file://var/checkpoints/", "file://var/savepoints/"))
self.assertIsNotNone(FsStateBackend("file://var/checkpoints/",
"file://var/savepoints/", 2048))
self.assertIsNotNone(FsStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 2048, 2048, True))
self.assertIsNotNone(FsStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 2048, 4096))
def test_get_min_file_size_threshold(self):
state_backend = FsStateBackend("file://var/checkpoints/")
self.assertEqual(state_backend.get_min_file_size_threshold(), 20480)
state_backend = FsStateBackend("file://var/checkpoints/", file_state_size_threshold=2048)
self.assertEqual(state_backend.get_min_file_size_threshold(), 2048)
def test_get_checkpoint_path(self):
state_backend = FsStateBackend("file://var/checkpoints/")
self.assertEqual(state_backend.get_checkpoint_path(), "file://var/checkpoints")
class EmbeddedRocksDBStateBackendTests(PyFlinkTestCase):
def test_create_rocks_db_state_backend(self):
self.assertIsNotNone(EmbeddedRocksDBStateBackend())
self.assertIsNotNone(EmbeddedRocksDBStateBackend(True))
self.assertIsNotNone(EmbeddedRocksDBStateBackend(False))
def test_get_set_db_storage_paths(self):
if on_windows():
storage_path = ["file:/C:/var/db_storage_dir1/",
"file:/C:/var/db_storage_dir2/",
"file:/C:/var/db_storage_dir3/"]
expected = ["C:\\var\\db_storage_dir1",
"C:\\var\\db_storage_dir2",
"C:\\var\\db_storage_dir3"]
else:
storage_path = ["file://var/db_storage_dir1/",
"file://var/db_storage_dir2/",
"file://var/db_storage_dir3/"]
expected = ["/db_storage_dir1",
"/db_storage_dir2",
"/db_storage_dir3"]
state_backend = EmbeddedRocksDBStateBackend()
state_backend.set_db_storage_paths(*storage_path)
self.assertEqual(state_backend.get_db_storage_paths(), expected)
def test_get_set_predefined_options(self):
state_backend = EmbeddedRocksDBStateBackend()
self.assertEqual(state_backend.get_predefined_options(), PredefinedOptions.DEFAULT)
state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.SPINNING_DISK_OPTIMIZED)
state_backend.set_predefined_options(PredefinedOptions.FLASH_SSD_OPTIMIZED)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.FLASH_SSD_OPTIMIZED)
state_backend.set_predefined_options(PredefinedOptions.DEFAULT)
self.assertEqual(state_backend.get_predefined_options(), PredefinedOptions.DEFAULT)
def test_get_set_options(self):
state_backend = EmbeddedRocksDBStateBackend()
self.assertIsNone(state_backend.get_options())
state_backend.set_options(
"org.apache.flink.contrib.streaming.state.DefaultConfigurableOptionsFactory")
self.assertEqual(state_backend.get_options(),
"org.apache.flink.contrib.streaming.state."
"DefaultConfigurableOptionsFactory")
def test_get_set_number_of_transfer_threads(self):
state_backend = EmbeddedRocksDBStateBackend()
self.assertEqual(state_backend.get_number_of_transfer_threads(), 4)
state_backend.set_number_of_transfer_threads(8)
self.assertEqual(state_backend.get_number_of_transfer_threads(), 8)
class RocksDBStateBackendTests(PyFlinkTestCase):
def test_create_rocks_db_state_backend(self):
self.assertIsNotNone(RocksDBStateBackend("file://var/checkpoints/"))
self.assertIsNotNone(RocksDBStateBackend("file://var/checkpoints/", True))
self.assertIsNotNone(RocksDBStateBackend("file://var/checkpoints/", False))
self.assertIsNotNone(RocksDBStateBackend(
checkpoint_stream_backend=FsStateBackend("file://var/checkpoints/")))
def test_get_checkpoint_backend(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
checkpoint_backend = state_backend.get_checkpoint_backend()
self.assertIsInstance(checkpoint_backend, FsStateBackend)
self.assertEqual(checkpoint_backend.get_checkpoint_path(), "file://var/checkpoints")
def test_get_set_db_storage_paths(self):
if on_windows():
checkpoints_path = "file:/C:/var/checkpoints/"
storage_path = ["file:/C:/var/db_storage_dir1/",
"file:/C:/var/db_storage_dir2/",
"file:/C:/var/db_storage_dir3/"]
expected = ["C:\\var\\db_storage_dir1",
"C:\\var\\db_storage_dir2",
"C:\\var\\db_storage_dir3"]
else:
checkpoints_path = "file://var/checkpoints/"
storage_path = ["file://var/db_storage_dir1/",
"file://var/db_storage_dir2/",
"file://var/db_storage_dir3/"]
expected = ["/db_storage_dir1",
"/db_storage_dir2",
"/db_storage_dir3"]
state_backend = RocksDBStateBackend(checkpoints_path)
state_backend.set_db_storage_paths(*storage_path)
self.assertEqual(state_backend.get_db_storage_paths(), expected)
def test_get_set_predefined_options(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
self.assertEqual(state_backend.get_predefined_options(), PredefinedOptions.DEFAULT)
state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.SPINNING_DISK_OPTIMIZED)
state_backend.set_predefined_options(PredefinedOptions.FLASH_SSD_OPTIMIZED)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.FLASH_SSD_OPTIMIZED)
state_backend.set_predefined_options(PredefinedOptions.DEFAULT)
self.assertEqual(state_backend.get_predefined_options(), PredefinedOptions.DEFAULT)
def test_get_set_options(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
self.assertIsNone(state_backend.get_options())
state_backend.set_options(
"org.apache.flink.contrib.streaming.state.DefaultConfigurableOptionsFactory")
self.assertEqual(state_backend.get_options(),
"org.apache.flink.contrib.streaming.state."
"DefaultConfigurableOptionsFactory")
def test_get_set_number_of_transfering_threads(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
state_backend.set_number_of_transfering_threads(7)
self.assertEqual(state_backend.get_number_of_transfering_threads(), 7)
class CustomStateBackendTests(PyFlinkTestCase):
def test_create_custom_state_backend(self):
gateway = get_gateway()
JConfiguration = gateway.jvm.org.apache.flink.configuration.Configuration
j_config = JConfiguration()
j_factory = load_java_class("org.apache.flink.streaming.runtime.tasks."
"StreamTaskTest$TestMemoryStateBackendFactory").newInstance()
context_classloader = gateway.jvm.Thread.currentThread().getContextClassLoader()
state_backend = _from_j_state_backend(j_factory.createFromConfig(j_config,
context_classloader))
self.assertIsInstance(state_backend, CustomStateBackend)
| |
from logger import logger
from perfrunner.helpers.cbmonitor import timeit, with_stats
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.profiler import with_profiles
from perfrunner.tests import PerfTest, TargetIterator
class N1QLTest(PerfTest):
COLLECTORS = {
'iostat': False,
'memory': False,
'n1ql_latency': True,
'n1ql_stats': True,
'secondary_stats': True,
}
def load(self, *args):
"""Create two data sets with different key prefixes.
In order to run the N1QL tests we need to satisfy two contradicting
requirements:
* Fields should be changed so that the secondary indexes are being
updated.
* Fields remain the same (based on a deterministic random algorithm) so
that we can query them.
The following workaround was introduced:
* 50% of documents are being randomly mutated. These documents are not
used for queries.
* 50% of documents remain unchanged. Only these documents are used for
queries.
"""
load_settings = self.test_config.load_settings
load_settings.items //= 2
iterator = TargetIterator(self.cluster_spec, self.test_config, 'n1ql')
super().load(settings=load_settings, target_iterator=iterator)
super().load(settings=load_settings)
def access_bg(self, *args):
access_settings = self.test_config.access_settings
access_settings.items //= 2
access_settings.n1ql_workers = 0
super().access_bg(settings=access_settings)
@with_stats
@with_profiles
def access(self, *args):
self.download_certificate()
access_settings = self.test_config.access_settings
access_settings.items //= 2
access_settings.workers = 0
iterator = TargetIterator(self.cluster_spec, self.test_config, 'n1ql')
super().access(settings=access_settings, target_iterator=iterator)
def store_plans(self):
logger.info('Storing query plans')
for i, query in enumerate(self.test_config.access_settings.n1ql_queries):
plan = self.rest.explain_n1ql_statement(self.query_nodes[0],
query['statement'])
with open('query_plan_{}.json'.format(i), 'w') as fh:
fh.write(pretty_dict(plan))
def run(self):
self.load()
self.wait_for_persistence()
self.create_indexes()
self.wait_for_indexing()
self.store_plans()
self.access_bg()
self.access()
self.report_kpi()
class N1QLLatencyTest(N1QLTest):
def _report_kpi(self):
self.reporter.post(
*self.metrics.query_latency(percentile=90)
)
class N1QLThroughputTest(N1QLTest):
def _report_kpi(self):
self.reporter.post(
*self.metrics.avg_n1ql_throughput()
)
class N1QLJoinTest(N1QLTest):
ALL_BUCKETS = True
def load_regular(self, load_settings, target):
load_settings.items //= 2
super(N1QLTest, self).load(settings=load_settings,
target_iterator=(target, ))
target.prefix = 'n1ql'
super(N1QLTest, self).load(settings=load_settings,
target_iterator=(target, ))
def load_categories(self, load_settings, target):
load_settings.items = load_settings.num_categories
target.prefix = 'n1ql'
super(N1QLTest, self).load(settings=load_settings,
target_iterator=(target, ))
def load(self, *args):
doc_gens = self.test_config.load_settings.doc_gen.split(',')
for doc_gen, target in zip(doc_gens, self.target_iterator):
load_settings = self.test_config.load_settings
load_settings.doc_gen = doc_gen
if doc_gen == 'ref':
self.load_categories(load_settings, target)
else:
self.load_regular(load_settings, target)
def access_bg(self, *args):
doc_gens = self.test_config.load_settings.doc_gen.split(',')
for doc_gen, target in zip(doc_gens, self.target_iterator):
if doc_gen == 'ref':
continue
access_settings = self.test_config.access_settings
access_settings.doc_gen = doc_gen
access_settings.items //= 2
access_settings.n1ql_workers = 0
super(N1QLTest, self).access_bg(settings=access_settings,
target_iterator=(target, ))
@with_stats
def access(self, *args):
access_settings = self.test_config.access_settings
access_settings.items //= 2
access_settings.workers = 0
access_settings.buckets = self.test_config.buckets
access_settings.doc_gen = self.test_config.access_settings.n1ql_gen
iterator = TargetIterator(self.cluster_spec, self.test_config, 'n1ql')
super(N1QLTest, self).access(settings=access_settings,
target_iterator=iterator)
class N1QLJoinThroughputTest(N1QLJoinTest, N1QLThroughputTest):
pass
class N1QLJoinLatencyTest(N1QLJoinTest, N1QLLatencyTest):
pass
class N1QLBulkTest(N1QLTest):
@with_stats
@timeit
def access(self, *args):
statement = self.test_config.access_settings.n1ql_queries[0]['statement']
self.rest.exec_n1ql_statement(self.query_nodes[0], statement)
def _report_kpi(self, time_elapsed):
self.reporter.post(
*self.metrics.bulk_n1ql_throughput(time_elapsed)
)
def run(self):
self.load()
self.wait_for_persistence()
self.create_indexes()
self.wait_for_indexing()
self.store_plans()
time_elapsed = self.access()
self.report_kpi(time_elapsed)
class N1QLDGMTest(PerfTest):
COLLECTORS = {
'n1ql_latency': True,
'n1ql_stats': True,
'net': False,
'secondary_stats': True,
'secondary_storage_stats': True,
}
def load(self, *args):
PerfTest.load(self, *args)
def access_bg(self, *args):
access_settings = self.test_config.access_settings
access_settings.n1ql_workers = 0
PerfTest.access_bg(self, settings=access_settings)
@with_stats
@with_profiles
def access(self, *args):
access_settings = self.test_config.access_settings
access_settings.workers = 0
PerfTest.access(self, settings=access_settings)
def run(self):
self.load()
self.wait_for_persistence()
self.create_indexes()
self.wait_for_indexing()
self.access_bg()
self.access()
self.report_kpi()
class N1QLDGMThroughputTest(N1QLDGMTest, N1QLThroughputTest):
pass
class N1QLDGMLatencyTest(N1QLDGMTest, N1QLLatencyTest):
pass
class N1QLXattrThroughputTest(N1QLThroughputTest):
def xattr_load(self, *args, **kwargs):
iterator = TargetIterator(self.cluster_spec, self.test_config, 'n1ql')
super().xattr_load()
super().xattr_load(target_iterator=iterator)
def run(self):
self.load()
self.xattr_load()
self.wait_for_persistence()
self.create_indexes()
self.wait_for_indexing()
self.store_plans()
self.access_bg()
self.access()
self.report_kpi()
class TpcDsTest(N1QLTest):
COLLECTORS = {
'iostat': False,
'memory': False,
'n1ql_latency': True,
'n1ql_stats': True,
'net': False,
'secondary_debugstats_index': True,
}
def run(self):
self.import_data()
self.create_indexes()
self.wait_for_indexing()
self.store_plans()
self.access()
self.report_kpi()
class TpcDsLatencyTest(TpcDsTest, N1QLLatencyTest):
pass
class TpcDsThroughputTest(TpcDsTest, N1QLThroughputTest):
pass
class TpcDsIndexTest(TpcDsTest):
COLLECTORS = {
'memory': False,
'net': False,
'secondary_debugstats_index': True,
}
@with_stats
@with_profiles
@timeit
def create_indexes(self):
super().create_indexes()
self.wait_for_indexing()
def _report_kpi(self, indexing_time: float):
self.reporter.post(
*self.metrics.indexing_time(indexing_time)
)
def run(self):
self.import_data()
time_elapsed = self.create_indexes()
self.report_kpi(time_elapsed)
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class encapsulation(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/encapsulation. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the encapsulation
used for the network instance
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "encapsulation"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["network-instances", "network-instance", "encapsulation"]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/encapsulation/config (container)
YANG Description: Configuration parameters relating to the encapsulation
of the network instance
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/encapsulation/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the encapsulation
of the network instance
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/encapsulation/state (container)
YANG Description: State parameters relating to the encapsulation of
the network instance
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/encapsulation/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters relating to the encapsulation of
the network instance
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class encapsulation(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/encapsulation. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the encapsulation
used for the network instance
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "encapsulation"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["network-instances", "network-instance", "encapsulation"]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/encapsulation/config (container)
YANG Description: Configuration parameters relating to the encapsulation
of the network instance
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/encapsulation/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the encapsulation
of the network instance
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/encapsulation/state (container)
YANG Description: State parameters relating to the encapsulation of
the network instance
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/encapsulation/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters relating to the encapsulation of
the network instance
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| |
from __future__ import division
import random
import math
from .base import Element
from .fields import (ChoiceField, IntegerField, BooleanField,
StructField, StringField, ListField)
__all__ = ['Img', 'Rectangle', "Circle", "Arrow", "Line", "Bar"]
class Svg(Element):
tag = "svg"
colors = ["#006a4e","#50404d","#922724","#ffae42","#987654"]
def make_color(self, i):
return self.colors[i%len(self.colors)]
def get_content(self):
raise NotImplementedError
def render_content(self):
return self.get_content()
def get_attrib(self):
return {
"class": "img",
"viewBox": "0 0 100 100",
"preserveAspectRatio": "none"
}
class Img(Svg):
allow_children = False
def get_content(self):
return """
<g style="stroke-width:1px; stroke:#000; fill:none; ">
<rect width="100" height="100" style="stroke-width:1" />
<path d="M0,0 L100,100 " style="stroke-width:0.5"/>
<path d="M100,0 L0,100" style="stroke-width:0.5"/>
</g>
"""
class Rectangle(Svg):
rounded = BooleanField(default=True)
def get_content(self):
rounded = self.get("rounded")
if rounded:
return"""
<rect x="10" y="10" height="50" width="50"
rx="5" ry="5"
style="stroke:#000; fill: none"/>
"""
else:
return"""
<rect height="100" width="100" style="stroke:#000; fill: none"/>
"""
class Arrow(Svg):
dir = ChoiceField(["left", "right", "up", "down"], required=True)
def get_content(self):
dir = self.get("dir")
if dir == "left":
return """
<g style="stroke-width:1px; stroke:#000; fill:none; ">
<path d="M5,0 L0,2" style="stroke-width:0.5"/>
<path d="M0,2 L20,2" style="stroke-width:0.5"/>
<path d="M0,2 L5,4" style="stroke-width:0.5"/>
</g>
"""
elif dir == "right":
return """
<g style="stroke-width:1px; stroke:#000; fill:none; ">
<path d="M15,0 L20,2" style="stroke-width:0.5"/>
<path d="M0,2 L20,2" style="stroke-width:0.5"/>
<path d="M20,2 L15,4" style="stroke-width:0.5"/>
</g>
"""
elif dir == "up":
return """
<g style="stroke-width:1px; stroke:#000; fill:none; ">
<path d="M0,5 L2,0" style="stroke-width:0.5"/>
<path d="M2,0 L2,20" style="stroke-width:0.5"/>
<path d="M2,0 L4,5" style="stroke-width:0.5"/>
</g>
"""
elif dir == "down":
return """
<g style="stroke-width:1px; stroke:#000; fill:none; ">
<path d="M0,15 L2,20" style="stroke-width:0.5"/>
<path d="M2,0 L2,20" style="stroke-width:0.5"/>
<path d="M2,20 L4,15" style="stroke-width:0.5"/>
</g>
"""
class Circle(Svg):
def get_content(self):
return"""
<circle cx="50" cy="50" r="50" style="stroke:#000; fill:none"/>
"""
class Test(Element):
items = StructField(name=StringField(),
age=IntegerField(),
biceps=IntegerField(default=22),
triceps=BooleanField(required=True))
def render(self):
print(self.get("items"), "<<<<<<<<<")
return "<test></test>"
class Line(Svg):
align = ChoiceField(["h","v"], required=True)
def get_content(self):
align = self.get("align")
if align == "h":
return """
<path d="M0,2 L20,2" style="stroke-width:0.5; stroke:#000; "/>
"""
elif align == "v":
return """
<path d="M2,0 L2,20" style="stroke-width:0.5; stroke:#000; "/>
"""
class BarChart(Svg):
items = ListField(coerce=int, required=True, default=[1, 10, 5, 20, 2, 22])
def get_content(self):
items = self.get("items")
high = max(items) + 2
n=len(items)
bar=[]
for count,i in enumerate(items):
x=(count*96)/n+2
temp="""
<rect x="{x}" y="{y}" height="{height}" width="{width}" style="stroke:#000; fill:{color}"/>
""".format(x=x,y=100-(i*100)/high,height=(i*100)/high,width=96/n, color=self.make_color(count))
bar.append(temp)
return """
<g style="stroke-width:1px; stroke:#000; fill:none; ">
<path d="M0,0 L0,100" style="stroke-width:0.5; stroke:#000; "/>
<path d="M0,100 L100,100" style="stroke-width:0.5; stroke:#000; "/>
</g>
""".join(bar)
class PieChart(Svg):
items = ListField(coerce=int, required=True, default=[12, 9, 5, 90, 10, 30])
def get_content(self):
items = self.get("items")
total = sum(items)
pie = []
radius = 49
center = (50, 50)
last = (center[0], center[1]-radius)
sector = 0
arc=0
for c,i in enumerate(items):
angle = (i*math.pi*2)/total + sector
if angle-sector>math.pi:
arc=1
else:
arc=0
x = center[0] + radius*math.sin(angle)
y = center[1] - radius*math.cos(angle)
current = (x, y)
temp="""
<path d="M{center[0]},{center[1]} L{start[0]},{start[1]}
A{radius},{radius} 0 {arc},1 {end[0]},{end[1]} z"
style="stroke-width:0.5; stroke:#000; fill:{color} "/>
""".format(center=center, start=last, end=current, radius=radius, arc=arc, color=self.make_color(c))
last = current
sector = angle
pie.append(temp)
return "\n".join(pie)
class LineChart(Svg):
items =ListField(coerce=int, required=True, default=[1, 5, 1, 9])
def get_content(self):
items = self.get("items")
high = max(items)+2
line=[]
n=len(items)
start=(0,0)
next=(0,0)
for count,i in enumerate(items):
id="#markerCircle"
if count == n-1:
id=""
start=((count+1)*98/n,100-(i*100)/high)
if count != 0:
temp="""
<defs>
<marker id="markerCircle" markerWidth="8" markerHeight="8" refx="5" refy="5">
<circle cx="5" cy="5" r="2" style="stroke: none; fill:#000000;"/>
</marker>
</defs>
<path d="M{start[0]},{start[1]} L{next[0]},{next[1]}" style="stroke-width:0.5; stroke:#000; marker-start: url({id});"/>
""".format(start=start,next=next,id=id)
line.append(temp)
next=start
return"""
<g style="stroke-width:1px; stroke:#000; fill:none; ">
<path d="M0,0 L0,100" style="stroke-width:0.5; stroke:#000; "/>
<path d="M0,100 L100,100" style="stroke-width:0.5; stroke:#000; "/>
</g>
""".join(line)
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator as ea
class _EventGenerator(object):
def __init__(self):
self.items = []
def Load(self):
while self.items:
yield self.items.pop(0)
def AddScalar(self, tag, wall_time=0, step=0, value=0):
event = tf.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
simple_value=value)]))
self.AddEvent(event)
def AddHistogram(self,
tag,
wall_time=0,
step=0,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=None,
hbucket=None):
histo = tf.HistogramProto(min=hmin,
max=hmax,
num=hnum,
sum=hsum,
sum_squares=hsum_squares,
bucket_limit=hbucket_limit,
bucket=hbucket)
event = tf.Event(wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
histo=histo)]))
self.AddEvent(event)
def AddImage(self,
tag,
wall_time=0,
step=0,
encoded_image_string=b'imgstr',
width=150,
height=100):
image = tf.Summary.Image(encoded_image_string=encoded_image_string,
width=width,
height=height)
event = tf.Event(wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
image=image)]))
self.AddEvent(event)
def AddAudio(self,
tag,
wall_time=0,
step=0,
encoded_audio_string=b'sndstr',
content_type='audio/wav',
sample_rate=44100,
length_frames=22050):
audio = tf.Summary.Audio(encoded_audio_string=encoded_audio_string,
content_type=content_type,
sample_rate=sample_rate,
length_frames=length_frames)
event = tf.Event(wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
audio=audio)]))
self.AddEvent(event)
def AddEvent(self, event):
self.items.append(event)
class EventAccumulatorTest(tf.test.TestCase):
def assertTagsEqual(self, tags1, tags2):
# Make sure the two dictionaries have the same keys.
self.assertItemsEqual(tags1, tags2)
# Additionally, make sure each key in the dictionary maps to the same value.
for key in tags1:
if isinstance(tags1[key], list):
# We don't care about the order of the values in lists, thus asserting
# only if the items are equal.
self.assertItemsEqual(tags1[key], tags2[key])
else:
# Make sure the values are equal.
self.assertEqual(tags1[key], tags2[key])
class MockingEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(MockingEventAccumulatorTest, self).setUp()
self.stubs = googletest.StubOutForTesting()
self.empty = {ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: False,
ea.RUN_METADATA: []}
self._real_constructor = ea.EventAccumulator
self._real_generator = ea._GeneratorFromPath
def _FakeAccumulatorConstructor(generator, *args, **kwargs):
ea._GeneratorFromPath = lambda x: generator
return self._real_constructor(generator, *args, **kwargs)
ea.EventAccumulator = _FakeAccumulatorConstructor
def tearDown(self):
self.stubs.CleanUp()
ea.EventAccumulator = self._real_constructor
ea._GeneratorFromPath = self._real_generator
def testEmptyAccumulator(self):
gen = _EventGenerator()
x = ea.EventAccumulator(gen)
x.Reload()
self.assertEqual(x.Tags(), self.empty)
def testTags(self):
gen = _EventGenerator()
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False,
ea.RUN_METADATA: []
})
def testReload(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertEqual(acc.Tags(), self.empty)
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
self.assertEqual(acc.Tags(), self.empty)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False,
ea.RUN_METADATA: []
})
def testScalars(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
s1 = ea.ScalarEvent(wall_time=1, step=10, value=32)
s2 = ea.ScalarEvent(wall_time=2, step=12, value=64)
gen.AddScalar('s1', wall_time=1, step=10, value=32)
gen.AddScalar('s2', wall_time=2, step=12, value=64)
acc.Reload()
self.assertEqual(acc.Scalars('s1'), [s1])
self.assertEqual(acc.Scalars('s2'), [s2])
def testHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
val1 = ea.HistogramValue(min=1,
max=2,
num=3,
sum=4,
sum_squares=5,
bucket_limit=[1, 2, 3],
bucket=[0, 3, 0])
val2 = ea.HistogramValue(min=-2,
max=3,
num=4,
sum=5,
sum_squares=6,
bucket_limit=[2, 3, 4],
bucket=[1, 3, 0])
hst1 = ea.HistogramEvent(wall_time=1, step=10, histogram_value=val1)
hst2 = ea.HistogramEvent(wall_time=2, step=12, histogram_value=val2)
gen.AddHistogram('hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
self.assertEqual(acc.Histograms('hst1'), [hst1])
self.assertEqual(acc.Histograms('hst2'), [hst2])
def testCompressedHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, compression_bps=(0, 2500, 5000, 7500, 10000))
gen.AddHistogram('hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
# Create the expected values after compressing hst1
expected_vals1 = [ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, 1.0), (2500, 1.25), (5000, 1.5), (
7500, 1.75), (10000, 2.0)]]
expected_cmphst1 = ea.CompressedHistogramEvent(
wall_time=1,
step=10,
compressed_histogram_values=expected_vals1)
self.assertEqual(acc.CompressedHistograms('hst1'), [expected_cmphst1])
# Create the expected values after compressing hst2
expected_vals2 = [
ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, -2), (2500, 2), (5000, 2 + 1 / 3), (7500, 2 + 2 / 3
), (10000, 3)]
]
expected_cmphst2 = ea.CompressedHistogramEvent(
wall_time=2,
step=12,
compressed_histogram_values=expected_vals2)
self.assertEqual(acc.CompressedHistograms('hst2'), [expected_cmphst2])
def testPercentile(self):
def AssertExpectedForBps(bps, expected):
output = acc._Percentile(bps, bucket_limit, cumsum_weights, histo_min,
histo_max, histo_num)
self.assertAlmostEqual(expected, output)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
bucket_limit = [1, 2, 3, 4]
histo_num = 100
## All weights in the first bucket
cumsum_weights = [10000, 10000, 10000, 10000]
histo_min = -1
histo_max = .9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## All weights in second bucket
cumsum_weights = [0, 10000, 10000, 10000]
histo_min = 1.1
histo_max = 1.8
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## All weights in the last bucket
cumsum_weights = [0, 0, 0, 10000]
histo_min = 3.1
histo_max = 3.6
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## Weights distributed between two buckets
cumsum_weights = [0, 4000, 10000, 10000]
histo_min = 1.1
histo_max = 2.9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 4000, histo_min,
bucket_limit[1]))
AssertExpectedForBps(5000, ea._Remap(5000, 4000, 10000, bucket_limit[1],
histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 4000, 10000, bucket_limit[1],
histo_max))
AssertExpectedForBps(10000, histo_max)
## Weights distributed between all buckets
cumsum_weights = [1000, 4000, 8000, 10000]
histo_min = -1
histo_max = 3.9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 1000, 4000, bucket_limit[0],
bucket_limit[1]))
AssertExpectedForBps(5000, ea._Remap(5000, 4000, 8000, bucket_limit[1],
bucket_limit[2]))
AssertExpectedForBps(7500, ea._Remap(7500, 4000, 8000, bucket_limit[1],
bucket_limit[2]))
AssertExpectedForBps(9000, ea._Remap(9000, 8000, 10000, bucket_limit[2],
histo_max))
AssertExpectedForBps(10000, histo_max)
## Most weight in first bucket
cumsum_weights = [9000, 10000, 10000, 10000]
histo_min = -1
histo_max = 1.1
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(9500, ea._Remap(9500, 9000, 10000, bucket_limit[0],
histo_max))
AssertExpectedForBps(10000, histo_max)
def testImages(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
im1 = ea.ImageEvent(wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
im2 = ea.ImageEvent(wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
gen.AddImage('im1',
wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
gen.AddImage('im2',
wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
acc.Reload()
self.assertEqual(acc.Images('im1'), [im1])
self.assertEqual(acc.Images('im2'), [im2])
def testAudio(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
snd1 = ea.AudioEvent(wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
snd2 = ea.AudioEvent(wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
gen.AddAudio('snd1',
wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
gen.AddAudio('snd2',
wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
acc.Reload()
self.assertEqual(acc.Audio('snd1'), [snd1])
self.assertEqual(acc.Audio('snd2'), [snd2])
def testKeyError(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
with self.assertRaises(KeyError):
acc.Scalars('s1')
with self.assertRaises(KeyError):
acc.Scalars('hst1')
with self.assertRaises(KeyError):
acc.Scalars('im1')
with self.assertRaises(KeyError):
acc.Histograms('s1')
with self.assertRaises(KeyError):
acc.Histograms('im1')
with self.assertRaises(KeyError):
acc.Images('s1')
with self.assertRaises(KeyError):
acc.Images('hst1')
with self.assertRaises(KeyError):
acc.Audio('s1')
with self.assertRaises(KeyError):
acc.Audio('hst1')
def testNonValueEvents(self):
"""Tests that non-value events in the generator don't cause early exits."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=10, value=20)
gen.AddEvent(tf.Event(wall_time=2, step=20, file_version='nots2'))
gen.AddScalar('s3', wall_time=3, step=100, value=1)
gen.AddHistogram('hst1')
gen.AddImage('im1')
gen.AddAudio('snd1')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1'],
ea.AUDIO: ['snd1'],
ea.SCALARS: ['s1', 's3'],
ea.HISTOGRAMS: ['hst1'],
ea.COMPRESSED_HISTOGRAMS: ['hst1'],
ea.GRAPH: False,
ea.RUN_METADATA: []
})
def testExpiredDataDiscardedAfterRestartForFileVersionLessThan2(self):
"""Tests that events are discarded after a restart is detected.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items with the same tag
that are outdated.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
def testOrphanedDataNotDiscardedIfFlagUnset(self):
"""Tests that events are not discarded if purge_orphaned_data is false.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, purge_orphaned_data=False)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300, 101,
201, 301])
def testEventsDiscardedPerTagAfterRestartForFileVersionLessThan2(self):
"""Tests that event discards after restart, only affect the misordered tag.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items that are outdated, but
only for the out of order tag. Other tags should remain unaffected.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
gen.AddScalar('s2', wall_time=1, step=101, value=20)
gen.AddScalar('s2', wall_time=1, step=201, value=20)
gen.AddScalar('s2', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
## Check that s1 discards do not affect s2
## i.e. check that only events from the out of order tag are discarded
self.assertEqual([x.step for x in acc.Scalars('s2')], [101, 201, 301])
def testOnlySummaryEventsTriggerDiscards(self):
"""Test that file version event does not trigger data purge."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=100, value=20)
ev1 = tf.Event(wall_time=2, step=0, file_version='brain.Event:1')
graph_bytes = graph_pb2.GraphDef().SerializeToString()
ev2 = tf.Event(wall_time=3, step=0, graph_def=graph_bytes)
gen.AddEvent(ev1)
gen.AddEvent(ev2)
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100])
def testSessionLogStartMessageDiscardsExpiredEvents(self):
"""Test that SessionLog.START message discards expired events.
This discard logic is preferred over the out-of-order step discard logic,
but this logic can only be used for event protos which have the SessionLog
enum, which was introduced to event.proto for file_version >= brain.Event:2.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=1, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=400, value=20)
gen.AddScalar('s2', wall_time=1, step=202, value=20)
gen.AddScalar('s2', wall_time=1, step=203, value=20)
slog = SessionLog(status=SessionLog.START)
gen.AddEvent(tf.Event(wall_time=2, step=201, session_log=slog))
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200])
self.assertEqual([x.step for x in acc.Scalars('s2')], [])
class RealisticEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(RealisticEventAccumulatorTest, self).setUp()
def testScalarsRealistically(self):
"""Test accumulator by writing values and then reading them."""
def FakeScalarSummary(tag, value):
value = tf.Summary.Value(tag=tag, simple_value=value)
summary = tf.Summary(value=[value])
return summary
directory = os.path.join(self.get_temp_dir(), 'values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = tf.train.SummaryWriter(directory, max_queue=100)
with tf.Graph().as_default() as graph:
_ = tf.constant([2.0, 1.0])
# Add a graph to the summary writer.
writer.add_graph(graph)
run_metadata = tf.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# Write a bunch of events using the writer
for i in xrange(30):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: ['id', 'sq'],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.RUN_METADATA: ['test run']
})
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(30, len(id_events))
self.assertEqual(30, len(sq_events))
for i in xrange(30):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
# Write a few more events to test incremental reloading
for i in xrange(30, 40):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify we can now see all of the data
acc.Reload()
self.assertEqual(40, len(id_events))
self.assertEqual(40, len(sq_events))
for i in xrange(40):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
if __name__ == '__main__':
tf.test.main()
| |
# Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import re
import mock
from oslo_utils import units
from oslo_vmware import exceptions as vexc
from testtools import matchers
from nova import exception
from nova import test
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import ds_util
class DsUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(DsUtilTestCase, self).setUp()
self.session = fake.FakeSession()
self.flags(api_retry_count=1, group='vmware')
fake.reset()
def tearDown(self):
super(DsUtilTestCase, self).tearDown()
fake.reset()
def test_file_delete(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('DeleteDatastoreFile_Task', method)
name = kwargs.get('name')
self.assertEqual('[ds] fake/path', name)
datacenter = kwargs.get('datacenter')
self.assertEqual('fake-dc-ref', datacenter)
return 'fake_delete_task'
with contextlib.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
ds_path = ds_util.DatastorePath('ds', 'fake/path')
ds_util.file_delete(self.session,
ds_path, 'fake-dc-ref')
_wait_for_task.assert_has_calls([
mock.call('fake_delete_task')])
def test_file_copy(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('CopyDatastoreFile_Task', method)
src_name = kwargs.get('sourceName')
self.assertEqual('[ds] fake/path/src_file', src_name)
src_dc_ref = kwargs.get('sourceDatacenter')
self.assertEqual('fake-src-dc-ref', src_dc_ref)
dst_name = kwargs.get('destinationName')
self.assertEqual('[ds] fake/path/dst_file', dst_name)
dst_dc_ref = kwargs.get('destinationDatacenter')
self.assertEqual('fake-dst-dc-ref', dst_dc_ref)
return 'fake_copy_task'
with contextlib.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
src_ds_path = ds_util.DatastorePath('ds', 'fake/path', 'src_file')
dst_ds_path = ds_util.DatastorePath('ds', 'fake/path', 'dst_file')
ds_util.file_copy(self.session,
str(src_ds_path), 'fake-src-dc-ref',
str(dst_ds_path), 'fake-dst-dc-ref')
_wait_for_task.assert_has_calls([
mock.call('fake_copy_task')])
def test_file_move(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MoveDatastoreFile_Task', method)
sourceName = kwargs.get('sourceName')
self.assertEqual('[ds] tmp/src', sourceName)
destinationName = kwargs.get('destinationName')
self.assertEqual('[ds] base/dst', destinationName)
sourceDatacenter = kwargs.get('sourceDatacenter')
self.assertEqual('fake-dc-ref', sourceDatacenter)
destinationDatacenter = kwargs.get('destinationDatacenter')
self.assertEqual('fake-dc-ref', destinationDatacenter)
return 'fake_move_task'
with contextlib.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
src_ds_path = ds_util.DatastorePath('ds', 'tmp/src')
dst_ds_path = ds_util.DatastorePath('ds', 'base/dst')
ds_util.file_move(self.session,
'fake-dc-ref', src_ds_path, dst_ds_path)
_wait_for_task.assert_has_calls([
mock.call('fake_move_task')])
def test_disk_move(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MoveVirtualDisk_Task', method)
src_name = kwargs.get('sourceName')
self.assertEqual('[ds] tmp/src', src_name)
dest_name = kwargs.get('destName')
self.assertEqual('[ds] base/dst', dest_name)
src_datacenter = kwargs.get('sourceDatacenter')
self.assertEqual('fake-dc-ref', src_datacenter)
dest_datacenter = kwargs.get('destDatacenter')
self.assertEqual('fake-dc-ref', dest_datacenter)
return 'fake_move_task'
with contextlib.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
ds_util.disk_move(self.session,
'fake-dc-ref', '[ds] tmp/src', '[ds] base/dst')
_wait_for_task.assert_has_calls([
mock.call('fake_move_task')])
def test_disk_copy(self):
with contextlib.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
return_value=mock.sentinel.cm)
) as (_wait_for_task, _call_method):
ds_util.disk_copy(self.session, mock.sentinel.dc_ref,
mock.sentinel.source_ds, mock.sentinel.dest_ds)
_wait_for_task.assert_called_once_with(mock.sentinel.cm)
_call_method.assert_called_once_with(
mock.ANY, 'CopyVirtualDisk_Task', 'VirtualDiskManager',
sourceName='sentinel.source_ds',
destDatacenter=mock.sentinel.dc_ref,
sourceDatacenter=mock.sentinel.dc_ref, force=False,
destName='sentinel.dest_ds')
def test_disk_delete(self):
with contextlib.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
return_value=mock.sentinel.cm)
) as (_wait_for_task, _call_method):
ds_util.disk_delete(self.session,
'fake-dc-ref', '[ds] tmp/disk.vmdk')
_wait_for_task.assert_called_once_with(mock.sentinel.cm)
_call_method.assert_called_once_with(
mock.ANY, 'DeleteVirtualDisk_Task', 'VirtualDiskManager',
datacenter='fake-dc-ref', name='[ds] tmp/disk.vmdk')
def test_mkdir(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MakeDirectory', method)
name = kwargs.get('name')
self.assertEqual('[ds] fake/path', name)
datacenter = kwargs.get('datacenter')
self.assertEqual('fake-dc-ref', datacenter)
createParentDirectories = kwargs.get('createParentDirectories')
self.assertTrue(createParentDirectories)
with mock.patch.object(self.session, '_call_method',
fake_call_method):
ds_path = ds_util.DatastorePath('ds', 'fake/path')
ds_util.mkdir(self.session, ds_path, 'fake-dc-ref')
def test_file_exists(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'SearchDatastore_Task':
ds_browser = args[0]
self.assertEqual('fake-browser', ds_browser)
datastorePath = kwargs.get('datastorePath')
self.assertEqual('[ds] fake/path', datastorePath)
return 'fake_exists_task'
# Should never get here
self.fail()
def fake_wait_for_task(task_ref):
if task_ref == 'fake_exists_task':
result_file = fake.DataObject()
result_file.path = 'fake-file'
result = fake.DataObject()
result.file = [result_file]
result.path = '[ds] fake/path'
task_info = fake.DataObject()
task_info.result = result
return task_info
# Should never get here
self.fail()
with contextlib.nested(
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(self.session, '_wait_for_task',
fake_wait_for_task)):
ds_path = ds_util.DatastorePath('ds', 'fake/path')
file_exists = ds_util.file_exists(self.session,
'fake-browser', ds_path, 'fake-file')
self.assertTrue(file_exists)
def test_file_exists_fails(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'SearchDatastore_Task':
return 'fake_exists_task'
# Should never get here
self.fail()
def fake_wait_for_task(task_ref):
if task_ref == 'fake_exists_task':
raise vexc.FileNotFoundException()
# Should never get here
self.fail()
with contextlib.nested(
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(self.session, '_wait_for_task',
fake_wait_for_task)):
ds_path = ds_util.DatastorePath('ds', 'fake/path')
file_exists = ds_util.file_exists(self.session,
'fake-browser', ds_path, 'fake-file')
self.assertFalse(file_exists)
def _mock_get_datastore_calls(self, *datastores):
"""Mock vim_util calls made by get_datastore."""
datastores_i = [None]
# For the moment, at least, this list of datastores is simply passed to
# get_properties_for_a_collection_of_objects, which we mock below. We
# don't need to over-complicate the fake function by worrying about its
# contents.
fake_ds_list = ['fake-ds']
def fake_call_method(module, method, *args, **kwargs):
# Mock the call which returns a list of datastores for the cluster
if (module == ds_util.vim_util and
method == 'get_dynamic_property' and
args == ('fake-cluster', 'ClusterComputeResource',
'datastore')):
fake_ds_mor = fake.DataObject()
fake_ds_mor.ManagedObjectReference = fake_ds_list
return fake_ds_mor
# Return the datastore result sets we were passed in, in the order
# given
if (module == ds_util.vim_util and
method == 'get_properties_for_a_collection_of_objects' and
args[0] == 'Datastore' and
args[1] == fake_ds_list):
# Start a new iterator over given datastores
datastores_i[0] = iter(datastores)
return datastores_i[0].next()
# Continue returning results from the current iterator.
if (module == ds_util.vim_util and
method == 'continue_to_get_objects'):
try:
return datastores_i[0].next()
except StopIteration:
return None
# Sentinel that get_datastore's use of vim has changed
self.fail('Unexpected vim call in get_datastore: %s' % method)
return mock.patch.object(self.session, '_call_method',
side_effect=fake_call_method)
def test_get_datastore(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore())
fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000,
False, "normal"))
fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000,
True, "inMaintenance"))
with self._mock_get_datastore_calls(fake_objects):
result = ds_util.get_datastore(self.session, 'fake-cluster')
self.assertEqual("fake-ds", result.name)
self.assertEqual(units.Ti, result.capacity)
self.assertEqual(500 * units.Gi, result.freespace)
def test_get_datastore_with_regex(self):
# Test with a regex that matches with a datastore
datastore_valid_regex = re.compile("^openstack.*\d$")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
with self._mock_get_datastore_calls(fake_objects):
result = ds_util.get_datastore(self.session, 'fake-cluster',
datastore_valid_regex)
self.assertEqual("openstack-ds0", result.name)
def test_get_datastore_with_token(self):
regex = re.compile("^ds.*\d$")
fake0 = fake.FakeRetrieveResult()
fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
setattr(fake0, 'token', 'token-0')
fake1 = fake.FakeRetrieveResult()
fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
with self._mock_get_datastore_calls(fake0, fake1):
result = ds_util.get_datastore(self.session, 'fake-cluster', regex)
self.assertEqual("ds2", result.name)
def test_get_datastore_with_list(self):
# Test with a regex containing whitelist of datastores
datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("openstack-ds1"))
fake_objects.add_object(fake.Datastore("openstack-ds2"))
with self._mock_get_datastore_calls(fake_objects):
result = ds_util.get_datastore(self.session, 'fake-cluster',
datastore_valid_regex)
self.assertNotEqual("openstack-ds1", result.name)
def test_get_datastore_with_regex_error(self):
# Test with a regex that has no match
# Checks if code raises DatastoreNotFound with a specific message
datastore_invalid_regex = re.compile("unknown-ds")
exp_message = ("Datastore regex %s did not match any datastores"
% datastore_invalid_regex.pattern)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
# assertRaisesRegExp would have been a good choice instead of
# try/catch block, but it's available only from Py 2.7.
try:
with self._mock_get_datastore_calls(fake_objects):
ds_util.get_datastore(self.session, 'fake-cluster',
datastore_invalid_regex)
except exception.DatastoreNotFound as e:
self.assertEqual(exp_message, e.args[0])
else:
self.fail("DatastoreNotFound Exception was not raised with "
"message: %s" % exp_message)
def test_get_datastore_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
def test_get_datastore_inaccessible_ds(self):
data_store = fake.Datastore()
data_store.set("summary.accessible", False)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
with self._mock_get_datastore_calls(fake_objects):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
self.session, 'fake-cluster')
def test_get_datastore_ds_in_maintenance(self):
data_store = fake.Datastore()
data_store.set("summary.maintenanceMode", "inMaintenance")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
with self._mock_get_datastore_calls(fake_objects):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
self.session, 'fake-cluster')
def test_get_datastore_no_host_in_cluster(self):
def fake_call_method(module, method, *args, **kwargs):
return ''
with mock.patch.object(self.session, '_call_method',
fake_call_method):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
self.session, 'fake-cluster')
def _test_is_datastore_valid(self, accessible=True,
maintenance_mode="normal",
type="VMFS",
datastore_regex=None,
ds_types=ds_util.ALL_SUPPORTED_DS_TYPES):
propdict = {}
propdict["summary.accessible"] = accessible
propdict["summary.maintenanceMode"] = maintenance_mode
propdict["summary.type"] = type
propdict["summary.name"] = "ds-1"
return ds_util._is_datastore_valid(propdict, datastore_regex, ds_types)
def test_is_datastore_valid(self):
for ds_type in ds_util.ALL_SUPPORTED_DS_TYPES:
self.assertTrue(self._test_is_datastore_valid(True,
"normal",
ds_type))
def test_is_datastore_valid_inaccessible_ds(self):
self.assertFalse(self._test_is_datastore_valid(False,
"normal",
"VMFS"))
def test_is_datastore_valid_ds_in_maintenance(self):
self.assertFalse(self._test_is_datastore_valid(True,
"inMaintenance",
"VMFS"))
def test_is_datastore_valid_ds_type_invalid(self):
self.assertFalse(self._test_is_datastore_valid(True,
"normal",
"vfat"))
def test_is_datastore_valid_not_matching_regex(self):
datastore_regex = re.compile("ds-2")
self.assertFalse(self._test_is_datastore_valid(True,
"normal",
"VMFS",
datastore_regex))
def test_is_datastore_valid_matching_regex(self):
datastore_regex = re.compile("ds-1")
self.assertTrue(self._test_is_datastore_valid(True,
"normal",
"VMFS",
datastore_regex))
class DatastoreTestCase(test.NoDBTestCase):
def test_ds(self):
ds = ds_util.Datastore(
"fake_ref", "ds_name", 2 * units.Gi, 1 * units.Gi)
self.assertEqual('ds_name', ds.name)
self.assertEqual('fake_ref', ds.ref)
self.assertEqual(2 * units.Gi, ds.capacity)
self.assertEqual(1 * units.Gi, ds.freespace)
def test_ds_invalid_space(self):
self.assertRaises(ValueError, ds_util.Datastore,
"fake_ref", "ds_name", 1 * units.Gi, 2 * units.Gi)
self.assertRaises(ValueError, ds_util.Datastore,
"fake_ref", "ds_name", None, 2 * units.Gi)
def test_ds_no_capacity_no_freespace(self):
ds = ds_util.Datastore("fake_ref", "ds_name")
self.assertIsNone(ds.capacity)
self.assertIsNone(ds.freespace)
def test_ds_invalid(self):
self.assertRaises(ValueError, ds_util.Datastore, None, "ds_name")
self.assertRaises(ValueError, ds_util.Datastore, "fake_ref", None)
def test_build_path(self):
ds = ds_util.Datastore("fake_ref", "ds_name")
ds_path = ds.build_path("some_dir", "foo.vmdk")
self.assertEqual('[ds_name] some_dir/foo.vmdk', str(ds_path))
class DatastorePathTestCase(test.NoDBTestCase):
def test_ds_path(self):
p = ds_util.DatastorePath('dsname', 'a/b/c', 'file.iso')
self.assertEqual('[dsname] a/b/c/file.iso', str(p))
self.assertEqual('a/b/c/file.iso', p.rel_path)
self.assertEqual('a/b/c', p.parent.rel_path)
self.assertEqual('[dsname] a/b/c', str(p.parent))
self.assertEqual('dsname', p.datastore)
self.assertEqual('file.iso', p.basename)
self.assertEqual('a/b/c', p.dirname)
def test_ds_path_no_ds_name(self):
bad_args = [
('', ['a/b/c', 'file.iso']),
(None, ['a/b/c', 'file.iso'])]
for t in bad_args:
self.assertRaises(
ValueError, ds_util.DatastorePath,
t[0], *t[1])
def test_ds_path_invalid_path_components(self):
bad_args = [
('dsname', [None]),
('dsname', ['', None]),
('dsname', ['a', None]),
('dsname', ['a', None, 'b']),
('dsname', [None, '']),
('dsname', [None, 'b'])]
for t in bad_args:
self.assertRaises(
ValueError, ds_util.DatastorePath,
t[0], *t[1])
def test_ds_path_no_subdir(self):
args = [
('dsname', ['', 'x.vmdk']),
('dsname', ['x.vmdk'])]
canonical_p = ds_util.DatastorePath('dsname', 'x.vmdk')
self.assertEqual('[dsname] x.vmdk', str(canonical_p))
self.assertEqual('', canonical_p.dirname)
self.assertEqual('x.vmdk', canonical_p.basename)
self.assertEqual('x.vmdk', canonical_p.rel_path)
for t in args:
p = ds_util.DatastorePath(t[0], *t[1])
self.assertEqual(str(canonical_p), str(p))
def test_ds_path_ds_only(self):
args = [
('dsname', []),
('dsname', ['']),
('dsname', ['', ''])]
canonical_p = ds_util.DatastorePath('dsname')
self.assertEqual('[dsname]', str(canonical_p))
self.assertEqual('', canonical_p.rel_path)
self.assertEqual('', canonical_p.basename)
self.assertEqual('', canonical_p.dirname)
for t in args:
p = ds_util.DatastorePath(t[0], *t[1])
self.assertEqual(str(canonical_p), str(p))
self.assertEqual(canonical_p.rel_path, p.rel_path)
def test_ds_path_equivalence(self):
args = [
('dsname', ['a/b/c/', 'x.vmdk']),
('dsname', ['a/', 'b/c/', 'x.vmdk']),
('dsname', ['a', 'b', 'c', 'x.vmdk']),
('dsname', ['a/b/c', 'x.vmdk'])]
canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
for t in args:
p = ds_util.DatastorePath(t[0], *t[1])
self.assertEqual(str(canonical_p), str(p))
self.assertEqual(canonical_p.datastore, p.datastore)
self.assertEqual(canonical_p.rel_path, p.rel_path)
self.assertEqual(str(canonical_p.parent), str(p.parent))
def test_ds_path_non_equivalence(self):
args = [
# leading slash
('dsname', ['/a', 'b', 'c', 'x.vmdk']),
('dsname', ['/a/b/c/', 'x.vmdk']),
('dsname', ['a/b/c', '/x.vmdk']),
# leading space
('dsname', ['a/b/c/', ' x.vmdk']),
('dsname', ['a/', ' b/c/', 'x.vmdk']),
('dsname', [' a', 'b', 'c', 'x.vmdk']),
# trailing space
('dsname', ['/a/b/c/', 'x.vmdk ']),
('dsname', ['a/b/c/ ', 'x.vmdk'])]
canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
for t in args:
p = ds_util.DatastorePath(t[0], *t[1])
self.assertNotEqual(str(canonical_p), str(p))
def test_ds_path_hashable(self):
ds1 = ds_util.DatastorePath('dsname', 'path')
ds2 = ds_util.DatastorePath('dsname', 'path')
# If the above objects have the same hash, they will only be added to
# the set once
self.assertThat(set([ds1, ds2]), matchers.HasLength(1))
def test_equal(self):
a = ds_util.DatastorePath('ds_name', 'a')
b = ds_util.DatastorePath('ds_name', 'a')
self.assertEqual(a, b)
def test_join(self):
p = ds_util.DatastorePath('ds_name', 'a')
ds_path = p.join('b')
self.assertEqual('[ds_name] a/b', str(ds_path))
p = ds_util.DatastorePath('ds_name', 'a')
ds_path = p.join()
self.assertEqual('[ds_name] a', str(ds_path))
bad_args = [
[None],
['', None],
['a', None],
['a', None, 'b']]
for arg in bad_args:
self.assertRaises(ValueError, p.join, *arg)
def test_ds_path_parse(self):
p = ds_util.DatastorePath.parse('[dsname]')
self.assertEqual('dsname', p.datastore)
self.assertEqual('', p.rel_path)
p = ds_util.DatastorePath.parse('[dsname] folder')
self.assertEqual('dsname', p.datastore)
self.assertEqual('folder', p.rel_path)
p = ds_util.DatastorePath.parse('[dsname] folder/file')
self.assertEqual('dsname', p.datastore)
self.assertEqual('folder/file', p.rel_path)
for p in [None, '']:
self.assertRaises(ValueError, ds_util.DatastorePath.parse, p)
for p in ['bad path', '/a/b/c', 'a/b/c']:
self.assertRaises(IndexError, ds_util.DatastorePath.parse, p)
| |
'''
Created on Aug 10, 2012
:author: Sana Development Team
:version: 2.0
'''
try:
import json as simplejson
except ImportError, e:
import simplejson
import sys, traceback
import logging
from django.conf import settings
from django.forms import ValidationError
from piston.handler import BaseHandler
from mds.core.handlers import EventHandler as BaseRequestHandler
from mds.api import LOGGER
from mds.api.contrib import openmrslib
from mds.api.responses import succeed, fail
from mds.api.decorators import logged
from mds.api.signals import EventSignal, EventSignalHandler
from mds.api.utils import printstack
from mds.api.v1.json import (render_json_response,
notification_submit, email_notification_submit,
register_client_events,
binary_submit, binarychunk_submit,
binarychunk_hack_submit, patient_get,
patient_list, parseOne, parseAll )
#from mds.api.v1 import v2compatlib
from mds.api.v1.v2compatlib import spform_to_encounter, responses_to_observations
from mds.api.v1.api import register_saved_procedure
from .forms import ProcedureSubmitForm
from .models import RequestLog
__all__ = ['AuthHandler',
'SavedProcedureHandler',
'EventHandler',
'RequestLogHandler',
'NotificationHandler',
'SMTPHandler',
'BinaryHandler',
'BinaryPacketHandler',
'Base64PacketHandler',
'PatientHandler']
@logged
class AuthHandler(BaseHandler):
""" Handles status and authentication check requests. For working with
openMRS versions 1.6+
"""
allowed_methods = ('GET','POST')
signals = { LOGGER:( EventSignal(), EventSignalHandler(RequestLog))}
def read(self,request,*args, **kwargs):
return self.create(request)
def create(self,request, *args, **kwargs):
"""Validates user credentials with the backing data store.
Request parameters:
username
a valid username
password
a valid password
Parameters:
request
An authorization check request.
"""
try:
wsname = "sessions"
auth = {"username": request.REQUEST.get("username", None),
"password" : request.REQUEST.get("password", None)}
logging.info("username %s" % auth['username'])
opener = openmrslib.build_opener(host=settings.OPENMRS_SERVER_URL)
return succeed(opener.wsdispatch(wsname, auth=auth))
except Exception, e:
msg = "%s" % e
logging.error(msg)
return fail(msg)
@logged
class SavedProcedureHandler(BaseHandler):
""" Handles encounter requests. """
allowed_methods = ('POST',)
signals = { LOGGER:( EventSignal(), EventSignalHandler(RequestLog))}
def create(self,request, *args, **kwargs):
logging.info("Received saved procedure submission.")
response = ''
form = ProcedureSubmitForm(self.flatten_dict(request.POST))
logging.debug("Data: %s" % form.data)
try:
form.full_clean()
if not form.is_valid():
raise ValidationError(form._get_errors())
savedproc_guid = form.cleaned_data['savedproc_guid']
procedure_guid = form.cleaned_data['procedure_guid']
responses = form.cleaned_data['responses']
phone = form.cleaned_data['phone']
username = form.cleaned_data['username']
password = form.cleaned_data['password']
result, message = register_saved_procedure(savedproc_guid,
procedure_guid,
responses,
phone,
username,
password)
encounter, data, created = spform_to_encounter(form.cleaned_data)
encounter.save()
logging.debug("Saved encounter: " + encounter.uuid)
observations = responses_to_observations(encounter, data,sort=True)
for obs in observations:
obs.save()
if obs.is_complex:
obs.create_file()
#result, message = True, encounter
if result:
response = succeed("Successfully saved the procedure: %s" % message)
logging.info("Saved procedure successfully registered.")
else:
response = fail(message)
logging.error("Failed to register procedure: %s" % message)
except ValidationError, e:
for k,v in form._get_errors().items():
logging.error("SavedProcedure argument %s:%s" % (k,v))
response = fail("Invalid ProcedureSubmitForm data")
raise Exception('Saved procedure submission was invalid')
except Exception, e:
et, val, tb = sys.exc_info()
trace = traceback.format_tb(tb)
error = "Exception : %s %s %s" % (et, val, trace[0])
for tbm in trace:
logging.error(tbm)
response = fail(error)
return response
class EventHandler(BaseRequestHandler):
def create(self,request, *args, **kwargs):
"""Accepts a request for submitting client events.
Request Parameters:
client_id
The client phone number
events
The client events
Events should be submitted as a list in JSON formatted text with each
event having the following key/value pairs:
Event
event_type
An event type
event_value
An event value
event_time
The time of the event in milliseconds since epoch
encounter_reference
The encounter, or saved procedure, id
patient_reference
The patient id
user_reference
TODO
Parameters:
request
The client event log request.
"""
client_id = request.REQUEST.get('client_id', None)
events_json = request.REQUEST.get('events', None)
if events_json is None or client_id is None:
return render_json_response(fail("Could not parse eventlog submission."))
logging.info("Received events parameter: %s" % events_json)
try:
events = simplejson.loads(events_json)
result, message = register_client_events(client_id, events)
response = None
if result:
response = succeed(message)
else:
response = fail(message)
except Exception, e:
logging.error("Error while processing events: %s" % e)
response = fail("Could not parse eventlog submission.")
return render_json_response(response)
class RequestLogHandler(BaseRequestHandler):
""" Handles network request log requests. """
allowed_methods = ('GET', 'POST')
model = RequestLog
@logged
class NotificationHandler(BaseHandler):
""" Handles encounter requests. """
allowed_methods = ('POST',)
signals = { LOGGER:( EventSignal(), EventSignalHandler(RequestLog))}
def create(self,request, *args, **kwargs):
return notification_submit(request)
@logged
class SMTPHandler(BaseHandler):
""" Handles encounter requests. """
allowed_methods = ('POST',)
signals = { LOGGER:( EventSignal(), EventSignalHandler(RequestLog))}
def create(self,request, *args, **kwargs):
return email_notification_submit(request)
@logged
class BinaryHandler(BaseHandler):
allowed_methods = ('POST',)
signals = { LOGGER:( EventSignal(), EventSignalHandler(RequestLog))}
def create(self,request, *args, **kwargs):
return binary_submit(request)
@logged
class BinaryPacketHandler(BaseHandler):
allowed_methods = ('POST',)
signals = { LOGGER:( EventSignal(), EventSignalHandler(RequestLog))}
def create(self,request, *args, **kwargs):
return binarychunk_submit(request)
@logged
class Base64PacketHandler(BaseHandler):
allowed_methods = ('POST',)
signals = { LOGGER:( EventSignal(), EventSignalHandler(RequestLog))}
def create(self,request, *args, **kwargs):
return binarychunk_hack_submit(request)
@logged
class PatientHandler(BaseHandler):
""" Handles patient requests. """
allowed_methods = ('GET',)
signals = { LOGGER:( EventSignal(), EventSignalHandler(RequestLog))}
def read(self,request, id=None, **kwargs):
""" Returns zero or more patients from OpenMRS
"""
try:
opener = openmrslib.build_opener(host=settings.OPENMRS_SERVER_URL)
query = self.flatten_dict(request.GET)
username = query.pop("username")
password = query.pop("password")
if id and id != 'list':
response = opener.getPatient(username, password, id)
if openmrslib.OPENMRS_VERSION < 1.8:
message = parseOne(response)
else:
message =response
if len(message) == 0:
return fail("")
else:
response = opener.getAllPatients(username, password, query=query)
if openmrslib.OPENMRS_VERSION < 1.8:
message = parseAll(response)
else:
message = ""
logging.debug("Response: %s" % response)
for p in response:
logging.debug(p)
firstname = p["givenName"]
lastname = p["family_name"]
gender = p["gender"]
birthdate = p["birthdate"]
uuid = p["uuid"]
patient = "%s%s%s%s%s%s".format(firstname.lower(),
birthdate[0:4],
birthdate[5:7],
birthdate[8:10],
lastname.lower(),
gender.lower())
message.append(patient)
logging.debug("message: %s" % message)
return succeed(message)
except Exception, e:
logging.error("Error: %s" % str(e))
printstack(e)
return fail("%s" % e)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.cloud import oslogin_v1
from google.cloud.oslogin_v1.proto import common_pb2
from google.cloud.oslogin_v1.proto import oslogin_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestOsLoginServiceClient(object):
def test_delete_posix_account(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup Request
name = client.project_path('[USER]', '[PROJECT]')
client.delete_posix_account(name)
assert len(channel.requests) == 1
expected_request = oslogin_pb2.DeletePosixAccountRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_posix_account_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup request
name = client.project_path('[USER]', '[PROJECT]')
with pytest.raises(CustomException):
client.delete_posix_account(name)
def test_delete_ssh_public_key(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup Request
name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
client.delete_ssh_public_key(name)
assert len(channel.requests) == 1
expected_request = oslogin_pb2.DeleteSshPublicKeyRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_ssh_public_key_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup request
name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
with pytest.raises(CustomException):
client.delete_ssh_public_key(name)
def test_get_login_profile(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
suspended = False
expected_response = {'name': name_2, 'suspended': suspended}
expected_response = oslogin_pb2.LoginProfile(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup Request
name = client.user_path('[USER]')
response = client.get_login_profile(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = oslogin_pb2.GetLoginProfileRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_login_profile_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup request
name = client.user_path('[USER]')
with pytest.raises(CustomException):
client.get_login_profile(name)
def test_get_ssh_public_key(self):
# Setup Expected Response
key = 'key106079'
expiration_time_usec = 2058878882
fingerprint = 'fingerprint-1375934236'
expected_response = {
'key': key,
'expiration_time_usec': expiration_time_usec,
'fingerprint': fingerprint
}
expected_response = common_pb2.SshPublicKey(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup Request
name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
response = client.get_ssh_public_key(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = oslogin_pb2.GetSshPublicKeyRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_ssh_public_key_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup request
name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
with pytest.raises(CustomException):
client.get_ssh_public_key(name)
def test_import_ssh_public_key(self):
# Setup Expected Response
expected_response = {}
expected_response = oslogin_pb2.ImportSshPublicKeyResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup Request
parent = client.user_path('[USER]')
ssh_public_key = {}
response = client.import_ssh_public_key(parent, ssh_public_key)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = oslogin_pb2.ImportSshPublicKeyRequest(
parent=parent, ssh_public_key=ssh_public_key)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_import_ssh_public_key_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup request
parent = client.user_path('[USER]')
ssh_public_key = {}
with pytest.raises(CustomException):
client.import_ssh_public_key(parent, ssh_public_key)
def test_update_ssh_public_key(self):
# Setup Expected Response
key = 'key106079'
expiration_time_usec = 2058878882
fingerprint = 'fingerprint-1375934236'
expected_response = {
'key': key,
'expiration_time_usec': expiration_time_usec,
'fingerprint': fingerprint
}
expected_response = common_pb2.SshPublicKey(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup Request
name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
ssh_public_key = {}
response = client.update_ssh_public_key(name, ssh_public_key)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = oslogin_pb2.UpdateSshPublicKeyRequest(
name=name, ssh_public_key=ssh_public_key)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_ssh_public_key_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = oslogin_v1.OsLoginServiceClient()
# Setup request
name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
ssh_public_key = {}
with pytest.raises(CustomException):
client.update_ssh_public_key(name, ssh_public_key)
| |
from __future__ import absolute_import, unicode_literals
import re
from functools import partial
from inspect import getargspec
from django.conf import settings
from django.template.context import (Context, RequestContext,
ContextPopException)
from django.utils.importlib import import_module
from django.utils.itercompat import is_iterable
from django.utils.text import (smart_split, unescape_string_literal,
get_text_list)
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils.translation import ugettext_lazy, pgettext_lazy
from django.utils.safestring import (SafeData, EscapeData, mark_safe,
mark_for_escaping)
from django.utils.formats import localize
from django.utils.html import escape
from django.utils.module_loading import module_has_submodule
from django.utils.timezone import template_localtime
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
ALLOWED_VARIABLE_CHARS = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.')
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
# global dictionary of libraries that have been loaded using get_library
libraries = {}
# global list of libraries to load by default for a new parser
builtins = []
# True if TEMPLATE_STRING_IF_INVALID contains a format string (%s). None means
# uninitialised.
invalid_var_format_string = None
class TemplateSyntaxError(Exception):
pass
class TemplateDoesNotExist(Exception):
pass
class TemplateEncodingError(Exception):
pass
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.msg % tuple([force_unicode(p, errors='replace')
for p in self.params])
class InvalidTemplateLibrary(Exception):
pass
class Origin(object):
def __init__(self, name):
self.name = name
def reload(self):
raise NotImplementedError
def __str__(self):
return self.name
class StringOrigin(Origin):
def __init__(self, source):
super(StringOrigin, self).__init__(UNKNOWN_SOURCE)
self.source = source
def reload(self):
return self.source
class Template(object):
def __init__(self, template_string, origin=None,
name='<Unknown Template>'):
try:
template_string = smart_unicode(template_string)
except UnicodeDecodeError:
raise TemplateEncodingError("Templates can only be constructed "
"from unicode or UTF-8 strings.")
if settings.TEMPLATE_DEBUG and origin is None:
origin = StringOrigin(template_string)
self.nodelist = compile_string(template_string, origin)
self.name = name
def __iter__(self):
for node in self.nodelist:
for subnode in node:
yield subnode
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
context.render_context.push()
try:
return self._render(context)
finally:
context.render_context.pop()
def compile_string(template_string, origin):
"Compiles template_string into NodeList ready for rendering"
if settings.TEMPLATE_DEBUG:
from django.template.debug import DebugLexer, DebugParser
lexer_class, parser_class = DebugLexer, DebugParser
else:
lexer_class, parser_class = Lexer, Parser
lexer = lexer_class(template_string, origin)
parser = parser_class(lexer.tokenize())
return parser.parse()
class Token(object):
def __init__(self, token_type, contents):
# token_type must be TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK or
# TOKEN_COMMENT.
self.token_type, self.contents = token_type, contents
self.lineno = None
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith('_("') or bit.startswith("_('"):
sentinal = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinal):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer(object):
def __init__(self, template_string, origin):
self.template_string = template_string
self.origin = origin
self.lineno = 1
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, in_tag))
in_tag = not in_tag
return result
def create_token(self, token_string, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip())
elif token_string.startswith(BLOCK_TAG_START):
if block_content.startswith('verbatim'):
bits = block_content.split(' ', 1)
if bits[0] == 'verbatim':
if len(bits) > 1:
self.verbatim = bits[1]
else:
self.verbatim = 'endverbatim'
token = Token(TOKEN_BLOCK, block_content)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content)
else:
token = Token(TOKEN_TEXT, token_string)
token.lineno = self.lineno
self.lineno += token_string.count('\n')
return token
class Parser(object):
def __init__(self, tokens):
self.tokens = tokens
self.tags = {}
self.filters = {}
for lib in builtins:
self.add_library(lib)
def parse(self, parse_until=None):
if parse_until is None:
parse_until = []
nodelist = self.create_nodelist()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
self.empty_variable(token)
filter_expression = self.compile_filter(token.contents)
var_node = self.create_variable_node(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
self.empty_block_tag(token)
if command in parse_until:
# put token back on token list so calling
# code knows why it terminated
self.prepend_token(token)
return nodelist
# execute callback function for this tag and append
# resulting node
self.enter_command(command, token)
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
try:
compiled_result = compile_func(self, token)
except TemplateSyntaxError as e:
if not self.compile_function_error(token, e):
raise
self.extend_nodelist(nodelist, compiled_result, token)
self.exit_command()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def create_variable_node(self, filter_expression):
return VariableNode(filter_expression)
def create_nodelist(self):
return NodeList()
def extend_nodelist(self, nodelist, node, token):
if node.must_be_first and nodelist:
try:
if nodelist.contains_nontext:
raise AttributeError
except AttributeError:
raise TemplateSyntaxError("%r must be the first tag "
"in the template." % node)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
nodelist.append(node)
def enter_command(self, command, token):
pass
def exit_command(self):
pass
def error(self, token, msg):
return TemplateSyntaxError(msg)
def empty_variable(self, token):
raise self.error(token, "Empty variable tag")
def empty_block_tag(self, token):
raise self.error(token, "Empty block tag")
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(token, "Invalid block tag: '%s', expected %s" %
(command, get_text_list(["'%s'" % p for p in parse_until])))
raise self.error(token, "Invalid block tag: '%s'" % command)
def unclosed_block_tag(self, parse_until):
raise self.error(None, "Unclosed tags: %s " % ', '.join(parse_until))
def compile_function_error(self, token, e):
pass
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
class TokenParser(object):
"""
Subclass this and implement the top() method to parse a template line.
When instantiating the parser, pass in the line from the Django template
parser.
The parser's "tagname" instance-variable stores the name of the tag that
the filter was called with.
"""
def __init__(self, subject):
self.subject = subject
self.pointer = 0
self.backout = []
self.tagname = self.tag()
def top(self):
"""
Overload this method to do the actual parsing and return the result.
"""
raise NotImplementedError()
def more(self):
"""
Returns True if there is more stuff in the tag.
"""
return self.pointer < len(self.subject)
def back(self):
"""
Undoes the last microparser. Use this for lookahead and backtracking.
"""
if not len(self.backout):
raise TemplateSyntaxError("back called without some previous "
"parsing")
self.pointer = self.backout.pop()
def tag(self):
"""
A microparser that just returns the next tag from the line.
"""
subject = self.subject
i = self.pointer
if i >= len(subject):
raise TemplateSyntaxError("expected another tag, found "
"end of string: %s" % subject)
p = i
while i < len(subject) and subject[i] not in (' ', '\t'):
i += 1
s = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return s
def value(self):
"""
A microparser that parses for a value: some string constant or
variable name.
"""
subject = self.subject
i = self.pointer
def next_space_index(subject, i):
"""
Increment pointer until a real space (i.e. a space not within
quotes) is encountered
"""
while i < len(subject) and subject[i] not in (' ', '\t'):
if subject[i] in ('"', "'"):
c = subject[i]
i += 1
while i < len(subject) and subject[i] != c:
i += 1
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. "
"Unexpected end of string in column %d: %s" %
(i, subject))
i += 1
return i
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Expected another "
"value but found end of string: %s" %
subject)
if subject[i] in ('"', "'"):
p = i
i += 1
while i < len(subject) and subject[i] != subject[p]:
i += 1
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Unexpected "
"end of string in column %d: %s" %
(i, subject))
i += 1
# Continue parsing until next "real" space,
# so that filters are also included
i = next_space_index(subject, i)
res = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return res
else:
p = i
i = next_space_index(subject, i)
s = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return s
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': "\w\.",
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.UNICODE | re.VERBOSE)
class FilterExpression(object):
"""
Parses a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
This class should never be instantiated outside of the
get_filters_from_token helper function.
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
if settings.TEMPLATE_STRING_IF_INVALID:
global invalid_var_format_string
if invalid_var_format_string is None:
invalid_var_format_string = '%s' in settings.TEMPLATE_STRING_IF_INVALID
if invalid_var_format_string:
return settings.TEMPLATE_STRING_IF_INVALID % self.var
return settings.TEMPLATE_STRING_IF_INVALID
else:
obj = settings.TEMPLATE_STRING_IF_INVALID
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
elif isinstance(obj, EscapeData):
obj = mark_for_escaping(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
plen = len(provided)
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, varargs, varkw, defaults = getargspec(func)
# First argument is filter input.
args.pop(0)
if defaults:
nondefs = args[:-len(defaults)]
else:
nondefs = args
# Args without defaults must be provided.
try:
for arg in nondefs:
provided.pop(0)
except IndexError:
# Not enough
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, len(nondefs), plen))
# Defaults can be overridden.
defaults = defaults and list(defaults) or []
try:
for parg in provided:
defaults.pop(0)
except IndexError:
# Too many.
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, len(nondefs), plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
def resolve_variable(path, context):
"""
Returns the resolved variable, which may contain attribute syntax, within
the given context.
Deprecated; use the Variable class instead.
"""
return Variable(path).resolve(context)
class Variable(object):
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':u'News'}}
>>> Variable('article.section').resolve(c)
u'News'
>>> Variable('article').resolve(c)
{'section': u'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = u'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
if self.message_context:
return pgettext_lazy(self.message_context, value)
else:
return ugettext_lazy(value)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
except (TypeError, AttributeError, KeyError):
try: # attribute lookup
current = getattr(current, bit)
except (TypeError, AttributeError):
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = settings.TEMPLATE_STRING_IF_INVALID
else:
try: # method call (assuming no args required)
current = current()
except TypeError: # arguments *were* required
# GOTCHA: This will also catch any TypeError
# raised in the function itself.
current = settings.TEMPLATE_STRING_IF_INVALID # invalid method call
except Exception as e:
if getattr(e, 'silent_variable_failure', False):
current = settings.TEMPLATE_STRING_IF_INVALID
else:
raise
return current
class Node(object):
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = self.render_node(node, context)
else:
bit = node
bits.append(force_unicode(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
def render_node(self, node, context):
return node.render(context)
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
return "<Text Node: '%s'>" % smart_str(self.s[:25], 'ascii',
errors='replace')
def render(self, context):
return self.s
def _render_value_in_context(value, context):
"""
Converts any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a unicode object. If value
is a string, it is expected to have already been translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
value = force_unicode(value)
if ((context.autoescape and not isinstance(value, SafeData)) or
isinstance(value, EscapeData)):
return escape(value)
else:
return value
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return _render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
A utility method for parsing token keyword arguments.
:param bits: A list containing remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments will be removed
from this list.
:param support_legacy: If set to true ``True``, the legacy format
``1 as foo`` will be accepted. Otherwise, only the standard ``foo=1``
format is allowed.
:returns: A dictionary of the arguments retrieved from the ``bits`` token
list.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so the dictionary will be returned as soon as an invalid
argument format is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
"""
Parses bits for template tag helpers (simple_tag, include_tag and
assignment_tag), in particular by detecting syntax errors and by
extracting positional and keyword arguments.
"""
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError(
"'%s' is decorated with takes_context=True so it must "
"have a first argument of 'context'" % name)
args = []
kwargs = {}
unhandled_params = list(params)
for bit in bits:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
# The kwarg was successfully extracted
param, value = kwarg.items()[0]
if param not in params and varkw is None:
# An unexpected keyword argument was supplied
raise TemplateSyntaxError(
"'%s' received unexpected keyword argument '%s'" %
(name, param))
elif param in kwargs:
# The keyword argument has already been supplied once
raise TemplateSyntaxError(
"'%s' received multiple values for keyword argument '%s'" %
(name, param))
else:
# All good, record the keyword argument
kwargs[str(param)] = value
if param in unhandled_params:
# If using the keyword syntax for a positional arg, then
# consume it.
unhandled_params.remove(param)
else:
if kwargs:
raise TemplateSyntaxError(
"'%s' received some positional argument(s) after some "
"keyword argument(s)" % name)
else:
# Record the positional argument
args.append(parser.compile_filter(bit))
try:
# Consume from the list of expected positional arguments
unhandled_params.pop(0)
except IndexError:
if varargs is None:
raise TemplateSyntaxError(
"'%s' received too many positional arguments" %
name)
if defaults is not None:
# Consider the last n params handled, where n is the
# number of defaults.
unhandled_params = unhandled_params[:-len(defaults)]
if unhandled_params:
# Some positional arguments were not supplied
raise TemplateSyntaxError(
"'%s' did not receive value(s) for the argument(s): %s" %
(name, ", ".join(["'%s'" % p for p in unhandled_params])))
return args, kwargs
def generic_tag_compiler(parser, token, params, varargs, varkw, defaults,
name, takes_context, node_class):
"""
Returns a template.Node subclass.
"""
bits = token.split_contents()[1:]
args, kwargs = parse_bits(parser, bits, params, varargs, varkw,
defaults, takes_context, name)
return node_class(takes_context, args, kwargs)
class TagHelperNode(Node):
"""
Base class for tag helper nodes such as SimpleNode, InclusionNode and
AssignmentNode. Manages the positional and keyword arguments to be passed
to the decorated function.
"""
def __init__(self, takes_context, args, kwargs):
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = dict((k, v.resolve(context))
for k, v in self.kwargs.items())
return resolved_args, resolved_kwargs
class Library(object):
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name is None and compile_function is None:
# @register.tag()
return self.tag_function
elif name is not None and compile_function is None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name is not None and compile_function is not None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise InvalidTemplateLibrary("Unsupported arguments to "
"Library.tag: (%r, %r)", (name, compile_function))
def tag_function(self, func):
self.tags[getattr(func, "_decorated_function", func).__name__] = func
return func
def filter(self, name=None, filter_func=None, **flags):
if name is None and filter_func is None:
# @register.filter()
def dec(func):
return self.filter_function(func, **flags)
return dec
elif name is not None and filter_func is None:
if callable(name):
# @register.filter
return self.filter_function(name, **flags)
else:
# @register.filter('somename') or @register.filter(name='somename')
def dec(func):
return self.filter(name, func, **flags)
return dec
elif name is not None and filter_func is not None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'):
if attr in flags:
value = flags[attr]
# set the flag on the filter for FilterExpression.resolve
setattr(filter_func, attr, value)
# set the flag on the innermost decorated function
# for decorators that need it e.g. stringfilter
if hasattr(filter_func, "_decorated_function"):
setattr(filter_func._decorated_function, attr, value)
return filter_func
else:
raise InvalidTemplateLibrary("Unsupported arguments to "
"Library.filter: (%r, %r)", (name, filter_func))
def filter_function(self, func, **flags):
name = getattr(func, "_decorated_function", func).__name__
return self.filter(name, func, **flags)
def simple_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class SimpleNode(TagHelperNode):
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
return func(*resolved_args, **resolved_kwargs)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=SimpleNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to simple_tag")
def assignment_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class AssignmentNode(TagHelperNode):
def __init__(self, takes_context, args, kwargs, target_var):
super(AssignmentNode, self).__init__(takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
context[self.target_var] = func(*resolved_args, **resolved_kwargs)
return ''
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
def compile_func(parser, token):
bits = token.split_contents()[1:]
if len(bits) < 2 or bits[-2] != 'as':
raise TemplateSyntaxError(
"'%s' tag takes at least 2 arguments and the "
"second last argument must be 'as'" % function_name)
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(parser, bits, params,
varargs, varkw, defaults, takes_context, function_name)
return AssignmentNode(takes_context, args, kwargs, target_var)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.assignment_tag(...)
return dec
elif callable(func):
# @register.assignment_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to assignment_tag")
def inclusion_tag(self, file_name, context_class=Context, takes_context=False, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class InclusionNode(TagHelperNode):
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
_dict = func(*resolved_args, **resolved_kwargs)
if not getattr(self, 'nodelist', False):
from django.template.loader import get_template, select_template
if isinstance(file_name, Template):
t = file_name
elif not isinstance(file_name, basestring) and is_iterable(file_name):
t = select_template(file_name)
else:
t = get_template(file_name)
self.nodelist = t.nodelist
new_context = context_class(_dict, **{
'autoescape': context.autoescape,
'current_app': context.current_app,
'use_l10n': context.use_l10n,
'use_tz': context.use_tz,
})
# Copy across the CSRF token, if present, because
# inclusion tags are often used for forms, and we need
# instructions for using CSRF protection to be as simple
# as possible.
csrf_token = context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return self.nodelist.render(new_context)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=InclusionNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
return dec
def is_library_missing(name):
"""Check if library that failed to load cannot be found under any
templatetags directory or does exist but fails to import.
Non-existing condition is checked recursively for each subpackage in cases
like <appdir>/templatetags/subpackage/package/module.py.
"""
# Don't bother to check if '.' is in name since any name will be prefixed
# with some template root.
path, module = name.rsplit('.', 1)
try:
package = import_module(path)
return not module_has_submodule(package, module)
except ImportError:
return is_library_missing(path)
def import_library(taglib_module):
"""
Load a template tag library module.
Verifies that the library contains a 'register' attribute, and
returns that attribute as the representation of the library
"""
try:
mod = import_module(taglib_module)
except ImportError as e:
# If the ImportError is because the taglib submodule does not exist,
# that's not an error that should be raised. If the submodule exists
# and raised an ImportError on the attempt to load it, that we want
# to raise.
if is_library_missing(taglib_module):
return None
else:
raise InvalidTemplateLibrary("ImportError raised loading %s: %s" %
(taglib_module, e))
try:
return mod.register
except AttributeError:
raise InvalidTemplateLibrary("Template library %s does not have "
"a variable named 'register'" %
taglib_module)
templatetags_modules = []
def get_templatetags_modules():
"""
Return the list of all available template tag modules.
Caches the result for faster access.
"""
global templatetags_modules
if not templatetags_modules:
_templatetags_modules = []
# Populate list once per process. Mutate the local list first, and
# then assign it to the global name to ensure there are no cases where
# two threads try to populate it simultaneously.
for app_module in ['django'] + list(settings.INSTALLED_APPS):
try:
templatetag_module = '%s.templatetags' % app_module
import_module(templatetag_module)
_templatetags_modules.append(templatetag_module)
except ImportError:
continue
templatetags_modules = _templatetags_modules
return templatetags_modules
def get_library(library_name):
"""
Load the template library module with the given name.
If library is not already loaded loop over all templatetags modules
to locate it.
{% load somelib %} and {% load someotherlib %} loops twice.
Subsequent loads eg. {% load somelib %} in the same process will grab
the cached module from libraries.
"""
lib = libraries.get(library_name, None)
if not lib:
templatetags_modules = get_templatetags_modules()
tried_modules = []
for module in templatetags_modules:
taglib_module = '%s.%s' % (module, library_name)
tried_modules.append(taglib_module)
lib = import_library(taglib_module)
if lib:
libraries[library_name] = lib
break
if not lib:
raise InvalidTemplateLibrary("Template library %s not found, "
"tried %s" %
(library_name,
','.join(tried_modules)))
return lib
def add_to_builtins(module):
builtins.append(import_library(module))
add_to_builtins('django.template.defaulttags')
add_to_builtins('django.template.defaultfilters')
| |
from nose.tools import assert_regexp_matches, assert_equal, assert_false
from cassandra import (InvalidRequest, ReadFailure,
ReadTimeout, Unauthorized, Unavailable, WriteFailure,
WriteTimeout)
from cassandra.query import SimpleStatement
from time import sleep
import tools
"""
The assertion methods in this file are used to structure, execute, and test different queries and scenarios. Use these anytime you are trying
to check the content of a table, the row count of a table, if a query should raise an exception, etc. These methods handle error messaging
well, and will help discovering and treating bugs.
An example:
Imagine some table, test:
id | name
1 | John Doe
2 | Jane Doe
We could assert the row count is 2 by using:
assert_row_count(session, 'test', 2)
After inserting [3, 'Alex Smith'], we can ensure the table is correct by:
assert_all(session, "SELECT * FROM test", [[1, 'John Doe'], [2, 'Jane Doe'], [3, 'Alex Smith']])
or we could check the insert was successful:
assert_one(session, "SELECT * FROM test WHERE id = 3", [3, 'Alex Smith'])
We could remove all rows in test, and assert this was sucessful with:
assert_none(session, "SELECT * FROM test")
Perhaps we want to assert invalid queries will throw an exception:
assert_invalid(session, "SELECT FROM test")
or, maybe after shutting down all the nodes, we want to assert an Unavailable exception is raised:
assert_unavailable(session.execute, "SELECT * FROM test")
OR
assert_exception(session, "SELECT * FROM test", expected=Unavailable)
"""
def _assert_exception(fun, *args, **kwargs):
matching = kwargs.pop('matching', None)
expected = kwargs['expected']
try:
if len(args) == 0:
fun(None)
else:
fun(*args)
except expected as e:
if matching is not None:
assert_regexp_matches(str(e), matching)
except Exception as e:
raise e
else:
assert False, "Expecting query to raise an exception, but nothing was raised."
def assert_exception(session, query, matching=None, expected=None):
if expected is None:
assert False, "Expected exception should not be None. Your test code is wrong, please set `expected`."
_assert_exception(session.execute, query, matching=matching, expected=expected)
def assert_unavailable(fun, *args):
"""
Attempt to execute a function, and assert Unavailable, WriteTimeout, WriteFailure, ReadTimeout, or ReadFailure exception is raised.
@param fun Function to be executed
@param *args Arguments to be passed to the function
Examples:
assert_unavailable(session2.execute, "SELECT * FROM ttl_table;")
assert_unavailable(lambda c: debug(c.execute(statement)), session)
"""
_assert_exception(fun, *args, expected=(Unavailable, WriteTimeout, WriteFailure, ReadTimeout, ReadFailure))
def assert_invalid(session, query, matching=None, expected=InvalidRequest):
"""
Attempt to issue a query and assert that the query is invalid.
@param session Session to use
@param query Invalid query to run
@param matching Optional error message string contained within expected exception
@param expected Exception expected to be raised by the invalid query
Examples:
assert_invalid(session, 'DROP USER nonexistent', "nonexistent doesn't exist")
"""
assert_exception(session, query, matching=matching, expected=expected)
def assert_unauthorized(session, query, message):
"""
Attempt to issue a query, and assert Unauthorized is raised.
@param session Session to use
@param query Unauthorized query to run
@param message Expected error message
Examples:
assert_unauthorized(session, "ALTER USER cassandra NOSUPERUSER", "You aren't allowed to alter your own superuser status")
assert_unauthorized(cathy, "ALTER TABLE ks.cf ADD val int", "User cathy has no ALTER permission on <table ks.cf> or any of its parents")
"""
assert_exception(session, query, matching=message, expected=Unauthorized)
def assert_one(session, query, expected, cl=None):
"""
Assert query returns one row.
@param session Session to use
@param query Query to run
@param expected Expected results from query
@param cl Optional Consistency Level setting. Default ONE
Examples:
assert_one(session, "LIST USERS", ['cassandra', True])
assert_one(session, query, [0, 0])
"""
simple_query = SimpleStatement(query, consistency_level=cl)
res = session.execute(simple_query)
list_res = tools.rows_to_list(res)
assert list_res == [expected], "Expected {} from {}, but got {}".format([expected], query, list_res)
def assert_none(session, query, cl=None):
"""
Assert query returns nothing
@param session Session to use
@param query Query to run
@param cl Optional Consistency Level setting. Default ONE
Examples:
assert_none(self.session1, "SELECT * FROM test where key=2;")
assert_none(cursor, "SELECT * FROM test WHERE k=2", cl=ConsistencyLevel.SERIAL)
"""
simple_query = SimpleStatement(query, consistency_level=cl)
res = session.execute(simple_query)
list_res = tools.rows_to_list(res)
assert list_res == [], "Expected nothing from {}, but got {}".format(query, list_res)
def assert_all(session, query, expected, cl=None, ignore_order=False):
"""
Assert query returns all expected items optionally in the correct order
@param session Session in use
@param query Query to run
@param expected Expected results from query
@param cl Optional Consistency Level setting. Default ONE
@param ignore_order Optional boolean flag determining whether response is ordered
Examples:
assert_all(session, "LIST USERS", [['aleksey', False], ['cassandra', True]])
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, 1, 1]])
"""
simple_query = SimpleStatement(query, consistency_level=cl)
res = session.execute(simple_query)
list_res = tools.rows_to_list(res)
if ignore_order:
expected = sorted(expected)
list_res = sorted(list_res)
assert list_res == expected, "Expected {} from {}, but got {}".format(expected, query, list_res)
def assert_almost_equal(*args, **kwargs):
"""
Assert variable number of arguments all fall within a margin of error.
@params *args variable number of numerical arguments to check
@params error Optional margin of error. Default 0.16
@params error_message Optional error message to print. Default ''
Examples:
assert_almost_equal(sizes[2], init_size)
assert_almost_equal(ttl_session1, ttl_session2[0][0], error=0.005)
"""
error = kwargs['error'] if 'error' in kwargs else 0.16
vmax = max(args)
vmin = min(args)
error_message = '' if 'error_message' not in kwargs else kwargs['error_message']
assert vmin > vmax * (1.0 - error) or vmin == vmax, "values not within {.2f}% of the max: {} ({})".format(error * 100, args, error_message)
def assert_row_count(session, table_name, expected, where=None):
"""
Assert the number of rows in a table matches expected.
@params session Session to use
@param table_name Name of the table to query
@param expected Number of rows expected to be in table
Examples:
assert_row_count(self.session1, 'ttl_table', 1)
"""
if where is not None:
query = "SELECT count(*) FROM {} WHERE {};".format(table_name, where)
else:
query = "SELECT count(*) FROM {};".format(table_name)
res = session.execute(query)
count = res[0][0]
assert count == expected, "Expected a row count of {} in table '{}', but got {}".format(
expected, table_name, count
)
def assert_crc_check_chance_equal(session, table, expected, ks="ks", view=False):
"""
Assert crc_check_chance equals expected for a given table or view
@param session Session to use
@param table Name of the table or view to check
@param ks Optional Name of the keyspace
@param view Optional Boolean flag indicating if the table is a view
Examples:
assert_crc_check_chance_equal(session, "compression_opts_table", 0.25)
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
driver still doesn't support top-level crc_check_chance property,
so let's fetch directly from system_schema
"""
if view:
assert_one(session,
"SELECT crc_check_chance from system_schema.views WHERE keyspace_name = 'ks' AND "
"view_name = '{table}';".format(table=table),
[expected])
else:
assert_one(session,
"SELECT crc_check_chance from system_schema.tables WHERE keyspace_name = 'ks' AND "
"table_name = '{table}';".format(table=table),
[expected])
def assert_length_equal(object_with_length, expected_length):
"""
Assert an object has a specific length.
@param object_with_length The object whose length will be checked
@param expected_length The expected length of the object
Examples:
assert_length_equal(res, nb_counter)
"""
assert_equal(len(object_with_length), expected_length,
"Expected {} to have length {}, but instead is of length {}".format(object_with_length,
expected_length, len(object_with_length)))
def assert_not_running(node):
"""
Assert that a given node is not running
@param node The node to check status
"""
attempts = 0
while node.is_running() and attempts < 10:
sleep(1)
attempts = attempts + 1
assert_false(node.is_running())
| |
from synapse.tests.common import *
import synapse.lib.fifo as s_fifo
class FifoTest(SynTest):
def test_fifo_nack_past(self):
with self.getTestDir() as dirn:
conf = {
'dir': dirn,
'file:maxsize': 1024,
'window:max': 4,
'window:min': 2,
'window:fill': 1,
}
sent = []
with s_fifo.Fifo(conf) as fifo:
self.eq(fifo.wind.nack, 0)
while fifo.atom.size != 0:
fifo.put('whee')
nseq = fifo.nseq
path = fifo._getSeqPath(0)
os.unlink(path)
with s_fifo.Fifo(conf) as fifo:
self.eq(fifo.wind.nack, nseq)
def test_fifo_flush(self):
with self.getTestDir() as dirn:
conf = {'dir': dirn}
sent = []
with s_fifo.Fifo(conf) as fifo:
fifo.put('whee')
fifo.put('whee')
fifo.resync(xmit=sent.append)
fifo.ack(sent[0][1])
# dirty
fifo.flush()
# not dirty
fifo.flush()
def test_fifo_ack_neg1(self):
with self.getTestDir() as dirn:
conf = {'dir': dirn}
sent = []
with s_fifo.Fifo(conf, xmit=sent.append) as fifo:
fifo.put('foo')
fifo.put('bar')
slen = len(sent)
fifo.ack(-1)
self.eq(len(sent), slen * 2)
self.eq(sent[:slen], sent[slen:])
# also test ack of lower than nack
self.true(fifo.ack(sent[0][1]))
self.false(fifo.ack(sent[0][1]))
def test_fifo_fifo(self):
with self.getTestDir() as dirn:
# some very small settings so we trigger
# more of the cleanup / culling code...
conf = {
'dir': dirn,
'file:maxsize': 1024,
'window:max': 4,
'window:min': 2,
'window:fill': 1,
}
sent = []
with s_fifo.Fifo(conf) as fifo:
fifo.put('foo')
fifo.resync(xmit=sent.append)
self.eq(sent[0][2], 'foo')
self.len(1, fifo.wind.dequ)
# these should all end up in the window
fifo.put('bar')
fifo.put('baz')
fifo.put('faz')
self.eq(sent[1][2], 'bar')
self.eq(sent[2][2], 'baz')
self.eq(sent[3][2], 'faz')
self.len(4, fifo.wind.dequ)
self.true(fifo.wind.caught)
# the next should *not* make it in the window
fifo.put('hehe')
fifo.put('haha')
fifo.put('hoho')
self.len(4, fifo.wind.dequ)
self.false(fifo.wind.caught)
# ack 0 should shrink the window, but not fill()
self.true(fifo.ack(sent[0][0]))
self.len(3, fifo.wind.dequ)
# ack next should trigger fill and load hehe/haha
# but still not catch us up...
self.true(fifo.ack(sent[1][0]))
self.len(6, sent)
self.len(4, fifo.wind.dequ)
self.false(fifo.wind.caught)
# ack skip to the window end (which should also set caught)
self.true(fifo.ack(sent[-1][0]))
self.len(7, sent)
self.true(fifo.wind.caught)
# now that we are caught up again, a put should xmit
fifo.put('visi')
self.eq(sent[-1][2], 'visi')
# now lets check starting with an existing one...
with s_fifo.Fifo(conf) as fifo:
sent = []
fifo.resync(xmit=sent.append)
self.eq(sent[0][2], 'hoho')
self.eq(sent[1][2], 'visi')
self.true(fifo.wind.caught)
# send a skip ack
fifo.ack(sent[1][0])
# put in enough messages to cause file next
while fifo.atom.size != 0:
fifo.put('whee')
self.len(2, fifo.seqs)
self.false(fifo.wind.caught)
# put in enough that when we jump over to
# reading this file, we will not be caught up
fifo.put('foo1')
fifo.put('bar1')
fifo.put('baz1')
fifo.put('faz1')
fifo.put('zip1')
self.true(os.path.isfile(fifo._getSeqPath(0)))
while sent[-1][2] == 'whee':
fifo.ack(sent[-1][0])
self.len(1, fifo.seqs)
self.none(fifo.atoms.get(0))
self.false(os.path.isfile(fifo._getSeqPath(0)))
# the last message should be one of the following
# ( we may not know exactly which due to whee mod math )
self.true(sent[-1][2] in ('foo1', 'bar1', 'baz1', 'faz1'))
self.false(fifo.wind.caught)
# by acking until faz1 we will fill zip1 into the window
# and cause ourselves to be caught up again...
# faz1 should be lifted into the window
while sent[-1][2] != 'zip1':
fifo.ack(sent[-1][0])
self.true(fifo.wind.caught)
def test_fifo_puts(self):
with self.getTestDir() as dirn:
sent = []
conf = {'dir': dirn}
with s_fifo.Fifo(conf) as fifo:
fifo.resync(xmit=sent.append)
fifo.puts(('foo', 'bar'))
self.eq(tuple(sent), ((0, 4, 'foo'), (4, 8, 'bar')))
def test_fifo_resync_race_put(self):
with self.getTestDir() as dirn:
N = 1000
conf = {'dir': dirn}
evt = threading.Event()
items = ['foo' + str(i) for i in range(N)]
sent = []
def race(data):
evt.set()
time.sleep(0)
sent.append(data[2])
@firethread
def otherwrite():
evt.wait()
fifo.puts(('attempting to mutate', 'during iteration'))
with s_fifo.Fifo(conf) as fifo:
fifo.puts(items)
thr = otherwrite()
fifo.resync(xmit=race)
thr.join()
self.len(N + 2, sent)
self.eq(sent[0:N], items)
self.eq(sent[N:N + 1], ['attempting to mutate'])
self.eq(sent[N + 1:N + 2], ['during iteration'])
with s_fifo.Fifo(conf) as fifo:
fifo.resync(xmit=race)
self.len(2 * (N + 2), sent)
self.eq(sent[0:N], items)
self.eq(sent[N:N + 1], ['attempting to mutate'])
self.eq(sent[N + 1:N + 2], ['during iteration'])
self.eq(sent[N + 2:2 * N + 2], items)
self.eq(sent[2 * N + 2:2 * N + 3], ['attempting to mutate'])
self.eq(sent[2 * N + 3:2 * N + 4], ['during iteration'])
def test_fifo_resync_race_ack(self):
with self.getTestDir() as dirn:
N = 1000
conf = {'dir': dirn}
evt = threading.Event()
items = ['foo' + str(i) for i in range(N)]
sent = []
def race(data):
evt.set()
time.sleep(0)
sent.append(data[2])
@firethread
def otherwrite():
evt.wait()
# This call to ack will not actually cull anything because
# it won't run until after iteration has completed.
fifo.ack(100)
with s_fifo.Fifo(conf) as fifo:
fifo.puts(items)
thr = otherwrite()
fifo.resync(xmit=race)
thr.join()
# The end result should be all of the items in order.
self.len(N, sent)
self.eq(sent, items)
def test_fifo_resync_race_ack_resync(self):
with self.getTestDir() as dirn:
N = 1000
conf = {'dir': dirn}
evt = threading.Event()
items = ['foo' + str(i) for i in range(N)]
sent = []
def race(data):
evt.set()
time.sleep(0)
sent.append(data[2])
@firethread
def otherwrite():
evt.wait()
# This call to ack will not actually cull anything because
# its seqn is -1. Instead, it will call resync, which won't
# until after iteration has completed.
fifo.ack(-1)
with s_fifo.Fifo(conf) as fifo:
fifo.puts(items)
thr = otherwrite()
fifo.resync(xmit=race)
thr.join()
# The end result should be all of the items in order, followed by all of the items in order again
self.len(2 * N, sent)
self.eq(sent[0:N], items)
self.eq(sent[N:2 * N], items)
| |
import simplejson as json
from flask import render_template_string
from nose.tools import eq_
from standup.apps.api2.decorators import api_key_required
from standup.apps.users.models import Team
from standup.database import get_session
from standup.tests import BaseTestCase, project, status, team, user
from urllib import urlencode
class DecoratorsTestCase(BaseTestCase):
def setUp(self):
super(DecoratorsTestCase, self).setUp()
@self.app.route('/_tests/_api/_protected', methods=['POST'])
@api_key_required
def protected():
return render_template_string('Success!')
def test_api_key_required(self):
"""Test the API key required decorator"""
# Test with API key
data = dict(api_key=self.app.config.get('API_KEY'))
response = self.client.post(
'/_tests/_api/_protected', data=data,
content_type='application/x-www-form-urlencoded')
eq_(response.status_code, 200)
# Test without API key
data = json.dumps({})
response = self.client.post(
'/_tests/_api/_protected', data=data,
content_type='application/x-www-form-urlencoded')
eq_(response.status_code, 403)
class TimelinesMixin(object):
"""Mixin to test standard timeline params."""
def _url(self, query=None):
if query:
self.query.update(query)
return '%s?%s' % (self.url, urlencode(self.query))
def test_timeline(self):
"""Test the home_timeline endpoint"""
with self.app.app_context():
u = user(save=True, team={})
p = project(save=True)
status(user=u, project=p, save=True)
response = self.client.get(self._url())
eq_(response.status_code, 200)
eq_(response.content_type, 'application/json')
def test_timeline_count(self):
"""Test the count parameter of home_timeline"""
self.app.config['API2_TIMELINE_MAX_RESULTS'] = 50
with self.app.app_context():
u = user(save=True, team={})
p = project(save=True)
for i in range(60):
status(project=p, user=u, save=True)
response = self.client.get(self._url())
data = json.loads(response.data)
eq_(len(data), 20)
# Test with an acceptable count
response = self.client.get(self._url(dict(count=50)))
data = json.loads(response.data)
eq_(len(data), 50)
# Test with a count that is too large
response = self.client.get(self._url(dict(count=60)))
eq_(response.status_code, 400)
# Test with a count that is too small
response = self.client.get(self._url(dict(count=0)))
eq_(response.status_code, 400)
# Test with an invalid count
response = self.client.get(self._url(dict(count='a')))
eq_(response.status_code, 400)
def test_timeline_since_id(self):
"""Test the since_id parameter of home_timeline"""
with self.app.app_context():
u = user(save=True, team={})
p = project(save=True)
for i in range(30):
status(project=p, user=u, save=True)
response = self.client.get(self._url(dict(since_id=10, count=20)))
data = json.loads(response.data)
eq_(data[19]['id'], 11)
response = self.client.get(self._url(dict(since_id=10, count=10)))
data = json.loads(response.data)
eq_(data[9]['id'], 21)
response = self.client.get(self._url(dict(since_id=10, count=30)))
data = json.loads(response.data)
eq_(len(data), 20)
eq_(data[19]['id'], 11)
response = self.client.get(self._url(dict(since_id=0)))
eq_(response.status_code, 400)
response = self.client.get(self._url(dict(since_id='a')))
eq_(response.status_code, 400)
def test_timeline_max_id(self):
"""Test the max_id parameter of home_timeline"""
with self.app.app_context():
u = user(save=True, team={})
p = project(save=True)
for i in range(30):
status(project=p, user=u, save=True)
response = self.client.get(self._url(dict(max_id=10, count=20)))
data = json.loads(response.data)
eq_(len(data), 10)
eq_(data[0]['id'], 10)
response = self.client.get(self._url(dict(max_id=10, since_id=5)))
data = json.loads(response.data)
eq_(len(data), 5)
response = self.client.get(self._url(dict(max_id=0)))
eq_(response.status_code, 400)
response = self.client.get(self._url(dict(max_id='a')))
eq_(response.status_code, 400)
def test_timeline_trim_user(self):
"""Test the trim_user parameter of home_timeline"""
with self.app.app_context():
u = user(save=True, team={})
p = project(save=True)
status(user=u, project=p, save=True)
response = self.client.get(self._url())
data = json.loads(response.data)
eq_(data[0]['user'], u.dictify())
response = self.client.get(self._url(dict(trim_user=1)))
data = json.loads(response.data)
eq_(data[0]['user'], u.id)
def test_timeline_trim_project(self):
"""Test the trim_project parameter of home_timeline"""
with self.app.app_context():
u = user(save=True, team={})
p = project(save=True)
status(user=u, project=p, save=True)
response = self.client.get(self._url())
data = json.loads(response.data)
eq_(data[0]['project'], p.dictify())
response = self.client.get(self._url(dict(trim_project=1)))
data = json.loads(response.data)
eq_(data[0]['project'], p.id)
def test_timeline_include_replies(self):
"""Test the include_replies parameter of home_timeline"""
with self.app.app_context():
u = user(save=True, team={})
p = project(save=True)
for i in range(10):
s = status(project=p, user=u, save=True)
for i in range(10):
status(project=p, user=u, reply_to=s, save=True)
response = self.client.get(self._url())
data = json.loads(response.data)
eq_(len(data), 10)
response = self.client.get(self._url(dict(include_replies=1)))
data = json.loads(response.data)
eq_(len(data), 20)
class HomeTimelinesTestCase(BaseTestCase, TimelinesMixin):
def setUp(self):
super(HomeTimelinesTestCase, self).setUp()
self.url = '/api/v2/statuses/home_timeline.json'
self.query = {}
class UserTimelinesTestCase(BaseTestCase, TimelinesMixin):
def setUp(self):
super(UserTimelinesTestCase, self).setUp()
self.url = '/api/v2/statuses/user_timeline.json'
self.query = {'screen_name': 'jdoe'}
def test_no_user_query(self):
self.query = {}
response = self.client.get(self._url())
eq_(response.status_code, 400)
def test_user_404(self):
self.query = {'screen_name': 'xxx'}
response = self.client.get(self._url())
eq_(response.status_code, 404)
def test_timeline_filters_user(self):
"""Test the timeline only shows the passed in user."""
with self.app.app_context():
u = user(save=True)
status(user=u, project=None, save=True)
u2 = user(username='janedoe', email='jane@doe.com',
slug='janedoe', save=True)
status(user=u2, project=None, save=True)
response = self.client.get(self._url())
data = json.loads(response.data)
eq_(len(data), 1)
eq_(data[0]['user'], u.dictify())
def test_timeline_filter_by_user_id(self):
with self.app.app_context():
u = user(save=True)
self.query = {'user_id': u.id}
response = self.client.get(self._url())
eq_(response.status_code, 200)
class ProjectTimelinesTestCase(BaseTestCase, TimelinesMixin):
def setUp(self):
super(ProjectTimelinesTestCase, self).setUp()
self.url = '/api/v2/statuses/project_timeline.json'
self.query = {'slug': 'test-project'}
def test_no_project_query(self):
self.query = {}
response = self.client.get(self._url())
eq_(response.status_code, 400)
def test_project_404(self):
self.query = {'slug': 'xxx'}
response = self.client.get(self._url())
eq_(response.status_code, 404)
def test_timeline_filters_project(self):
"""Test the timeline only shows the passed in project."""
with self.app.app_context():
u = user(save=True)
p = project(save=True)
status(user=u, project=p, save=True)
p2 = project(name='Test Project 2', slug='test-project-2',
save=True)
status(user=u, project=p2, save=True)
response = self.client.get(self._url())
data = json.loads(response.data)
eq_(len(data), 1)
eq_(data[0]['project'], p.dictify())
def test_timeline_filter_by_project_id(self):
with self.app.app_context():
p = project(save=True)
self.query = {'project_id': p.id}
response = self.client.get(self._url())
eq_(response.status_code, 200)
class TeamTimelinesTestCase(BaseTestCase, TimelinesMixin):
def setUp(self):
super(TeamTimelinesTestCase, self).setUp()
self.url = '/api/v2/statuses/team_timeline.json'
self.query = {'slug': 'test-team'}
def test_no_team_query(self):
self.query = {}
response = self.client.get(self._url())
eq_(response.status_code, 400)
def test_team_404(self):
self.query = {'slug': 'xxx'}
response = self.client.get(self._url())
eq_(response.status_code, 404)
def test_timeline_filters_team(self):
"""Test the timeline only shows the passed in team."""
with self.app.app_context():
u = user(save=True, team={})
u2 = user(username='janedoe', email='jane@doe.com',
slug='janedoe', save=True, team={'name': 'XXX',
'slug': 'xxx'})
p = project(save=True)
status(user=u, project=p, save=True)
status(user=u2, project=p, save=True)
response = self.client.get(self._url(dict(team_id=u.teams[0].id)))
data = json.loads(response.data)
eq_(len(data), 1)
eq_(data[0]['user'], u.dictify())
def test_timeline_filter_by_team_id(self):
with self.app.app_context():
t = team(save=True)
self.query = {'team_id': t.id}
response = self.client.get(self._url())
eq_(response.status_code, 200)
class CreateTeamTestCase(BaseTestCase):
def setUp(self):
super(CreateTeamTestCase, self).setUp()
self.url = '/api/v2/teams/create.json'
self.data = {'slug': 'test', 'name': 'Test Team',
'api_key': self.app.config.get('API_KEY')}
def test_create(self):
"""Test creation of a team."""
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 200)
db = get_session(self.app)
team = db.query(Team).filter_by(slug=self.data['slug']).one()
eq_(team.slug, self.data['slug'])
eq_(team.name, self.data['name'])
def test_no_api_key(self):
"""Test request with no API key."""
self.data.pop('api_key')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 403)
def test_no_slug(self):
"""Test attempted team creation with no slug."""
self.data.pop('slug')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 400)
def test_no_name(self):
"""Test team creation with no name."""
self.data.pop('name')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 200)
db = get_session(self.app)
team = db.query(Team).filter_by(slug=self.data['slug']).one()
eq_(team.name, self.data['slug'])
def test_duplicate(self):
"""Test attempted team creation with duplicate slug."""
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 200)
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 400)
class DestroyTeamTestCase(BaseTestCase):
def setUp(self):
super(DestroyTeamTestCase, self).setUp()
self.url = '/api/v2/teams/destroy.json'
self.data = {'slug': 'test', 'api_key': self.app.config.get('API_KEY')}
with self.app.app_context():
team(slug=self.data['slug'], name='Test Team', save=True)
def test_destroy(self):
"""Test removing a team."""
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 200)
db = get_session(self.app)
eq_(0, db.query(Team).count())
def test_no_api_key(self):
"""Test request with no API key"""
self.data.pop('api_key')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 403)
def test_no_slug(self):
"""Test attempted team removal with no slug."""
self.data.pop('slug')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 400)
def test_invalid_slug(self):
"""Test attempted team removal with invalid slug."""
self.data['slug'] = 'xxx'
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 404)
class UpdateTeamTestCase(BaseTestCase):
def setUp(self):
super(UpdateTeamTestCase, self).setUp()
self.url = '/api/v2/teams/update.json'
self.data = {'slug': 'test', 'name': 'Updated Team',
'api_key': self.app.config.get('API_KEY')}
with self.app.app_context():
team(slug=self.data['slug'], name='Test Team', save=True)
def test_update(self):
"""Test update team info."""
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 200)
db = get_session(self.app)
team = db.query(Team).filter_by(slug=self.data['slug']).one()
eq_(team.name, self.data['name'])
def test_no_api_key(self):
"""Test request with no API key"""
self.data.pop('api_key')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 403)
def test_no_slug(self):
"""Test attempted team update with no slug."""
self.data.pop('slug')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 400)
def test_invalid_slug(self):
"""Test attempted team update with invalid slug."""
self.data['slug'] = 'xxx'
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 404)
class TeamMembersTestCase(BaseTestCase):
def setUp(self):
super(TeamMembersTestCase, self).setUp()
self.query = {'slug': 'test'}
self.url = '/api/v2/teams/members.json'
with self.app.app_context():
self.team = team(slug=self.query['slug'], name='Test Team',
save=True)
self.user = user(save=True)
def test_no_team_members(self):
"""Test list members when no members in team."""
url = '%s?%s' % (self.url, urlencode(self.query))
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response.data, json.dumps({'users': []}))
def test_team_members(self):
"""Test list members when members are in team."""
url = '%s?%s' % (self.url, urlencode(self.query))
db = get_session(self.app)
self.team.users.append(self.user)
data = json.dumps({'users': [self.user.dictify()]})
db.commit()
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response.data, data)
def test_no_slug(self):
"""Test team members list with no slug."""
response = self.client.get(self.url)
eq_(response.status_code, 400)
def test_invalid_slug(self):
"""Test attempted team update with invalid slug."""
self.query['slug'] = 'xxx'
url = '%s?%s' % (self.url, urlencode(self.query))
response = self.client.get(url)
eq_(response.status_code, 404)
class CreateTeamMemberTestCase(BaseTestCase):
def setUp(self):
super(CreateTeamMemberTestCase, self).setUp()
with self.app.app_context():
self.user = user(save=True)
self.team = team(save=True)
self.url = '/api/v2/teams/members/create.json'
self.data = {'slug': self.team.slug,
'screen_name': self.user.username,
'api_key': self.app.config.get('API_KEY')}
def test_create_team_member(self):
"""Test team member addition."""
eq_(0, self.team.users.count())
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 200)
db = get_session(self.app)
team = db.query(Team).filter_by(slug=self.data['slug']).one()
eq_(1, team.users.count())
def test_duplicate_member(self):
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 200)
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 400)
def test_no_api_key(self):
"""Test request with no API key"""
self.data.pop('api_key')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 403)
def test_no_slug(self):
"""Test attempted team member addition with no slug."""
self.data.pop('slug')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 400)
def test_invalid_slug(self):
"""Test attempted team member addition with invalid slug."""
self.data['slug'] = 'xxx'
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 404)
def test_no_screen_name(self):
"""Test attempted team member addition with no screen_name."""
self.data.pop('screen_name')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 400)
def test_invalid_screen_name(self):
"""Test attempted team member addition with invalid screen_name."""
self.data['screen_name'] = 'xxx'
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 404)
class DestroyTeamMemberTestCase(BaseTestCase):
def setUp(self):
super(DestroyTeamMemberTestCase, self).setUp()
with self.app.app_context():
self.user = user(save=True)
self.team = team(save=True)
db = get_session(self.app)
self.team.users.append(self.user)
db.commit()
self.url = '/api/v2/teams/members/destroy.json'
self.data = {'slug': self.team.slug,
'screen_name': self.user.username,
'api_key': self.app.config.get('API_KEY')}
def test_destroy_team_member(self):
"""Test team member deletion."""
eq_(1, self.team.users.count())
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 200)
db = get_session(self.app)
team = db.query(Team).filter_by(slug=self.data['slug']).one()
eq_(0, team.users.count())
def test_no_api_key(self):
"""Test request with no API key"""
self.data.pop('api_key')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 403)
def test_no_slug(self):
"""Test attempted team member deletion with no slug."""
self.data.pop('slug')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 400)
def test_invalid_slug(self):
"""Test attempted team member deletion with invalid slug."""
self.data['slug'] = 'xxx'
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 404)
def test_no_screen_name(self):
"""Test attempted team member deletion with no screen_name."""
self.data.pop('screen_name')
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 400)
def test_invalid_screen_name(self):
"""Test attempted team member deletion with invalid screen_name."""
self.data['screen_name'] = 'xxx'
response = self.client.post(self.url, data=self.data)
eq_(response.status_code, 404)
class TimesinceLastUpdateTestCase(BaseTestCase):
def setUp(self):
super(TimesinceLastUpdateTestCase, self).setUp()
with self.app.app_context():
self.user = user(save=True)
self.url = '/api/v2/info/timesince_last_update.json'
self.query = {'screen_name': self.user.username}
def test_timesince(self):
"""Test the timesince last update endpoint."""
with self.app.app_context():
status(user_id=self.user.id, save=True)
url = '%s?%s' % (self.url, urlencode(self.query))
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response.data, '{"timesince": 0}')
def test_no_statuses(self):
"""Test a user with no statuses."""
url = '%s?%s' % (self.url, urlencode(self.query))
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response.data, '{"timesince": null}')
def test_no_screen_name(self):
"""Test request with no screen_name."""
self.query.pop('screen_name')
url = '%s?%s' % (self.url, urlencode(self.query))
response = self.client.get(url)
eq_(response.status_code, 400)
def test_invalid_screen_name(self):
"""Test request with invalid screen_name."""
self.query['screen_name'] = 'xxx'
url = '%s?%s' % (self.url, urlencode(self.query))
response = self.client.get(url)
eq_(response.status_code, 404)
| |
#########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA #
# 02111-1307 USA #
# #
# #
#########################################################################
import sys, os, tempfile, rtf2xml.copy
class Fonts:
"""
Change lines with font info from font numbers to the actual font names.
"""
def __init__(self,
in_file,
bug_handler,
default_font_num,
copy = None,
run_level = 1,
):
"""
Required:
'file'--file to parse
'default_font_num'--the default font number
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__default_font_num = default_font_num
self.__write_to = tempfile.mktemp()
self.__run_level = run_level
def __initiate_values(self):
"""
Initiate all values.
"""
self.__special_font_dict = {
'Symbol' : 0,
'Wingdings' : 0,
'Zapf Dingbats' : 0,
}
self.__special_font_list = [
'Symbol', 'Wingdings', 'Zapf Dingbats'
]
self.__state = 'default'
self.__state_dict = {
'default' : self.__default_func,
'font_table' : self.__font_table_func,
'after_font_table' : self.__after_font_table_func,
'font_in_table' : self.__font_in_table_func,
}
self.__font_table = {}
# individual font written
self.__wrote_ind_font = 0
def __default_func(self, line):
"""
Requires:
line
Returns:
nothing
Handle all lines before the font table. Check for the beginning of the
font table. If found, change the state. Print out all lines.
"""
if self.__token_info == 'mi<mk<fonttb-beg':
self.__state = 'font_table'
self.__write_obj.write(line)
def __font_table_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
If the self.__token_info indicates that you have reached the end of
the font table, then change the state to after the font table.
If the self.__token_info indicates that there is a font in the
table, change the state to font in table. Reset the number of the
font to the default font (in case there is no number provided, in
which case RTF assumes the number will be the default font.) Reset
the test string (for the font name) to ''
"""
if self.__token_info == 'mi<mk<fonttb-end':
self.__state = 'after_font_table'
elif self.__token_info == 'mi<mk<fontit-beg':
self.__state = 'font_in_table'
self.__font_num = self.__default_font_num
self.__text_line = ''
##self.__write_obj.write(line)
def __font_in_table_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
Check for four conditions:
The line contains font-info. In this case, store the number in
self.__font_num.
The line contains text. In this case, add to the text string
self.__text_string.
The line marks the end of the individual font in the table. In
this case, add a new key-> value pair to the font-table
dictionary. Also create an empty tag with the name and number
as attributes.
Preamture end of font table
"""
#cw<ci<font-style<nu<4
#tx<nu<__________<Times;
if self.__token_info == 'mi<mk<fontit-end':
self.__wrote_ind_font = 1
self.__state = 'font_table'
self.__text_line = self.__text_line[:-1] # get rid of last ';'
self.__font_table[self.__font_num] = self.__text_line
self.__write_obj.write(
'mi<tg<empty-att_'
'<font-in-table<name>%s<num>%s\n' % (self.__text_line, self.__font_num)
)
elif self.__token_info == 'cw<ci<font-style':
self.__font_num = line[20:-1]
elif self.__token_info == 'tx<nu<__________' or \
self.__token_info == 'tx<ut<__________':
self.__text_line += line[17:-1]
elif self.__token_info == 'mi<mk<fonttb-end':
self.__found_end_font_table_func()
self.__state = 'after_font_table'
def __found_end_font_table_func(self):
"""
Required:
nothing
Returns:
nothing
Logic:
If not individual fonts have been written, write one out
"""
if not self.__wrote_ind_font:
self.__write_obj.write(
'mi<tg<empty-att_'
'<font-in-table<name>Times<num>0\n' )
def __after_font_table_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
Check the self.__token_info. If this matches a token with font
info, then extract the number from the line, and look up the font
name in the font dictionary. If no name exists for that number,
print out an error. Otherwise print out the same line, except with
the name rather than the number.
If the line does not contain font info, simply print it out to the
file.
"""
if self.__token_info == 'cw<ci<font-style':
font_num = line[20:-1]
font_name = self.__font_table.get(font_num)
if font_name == None:
if self.__run_level > 3:
msg = 'no value for %s in self.__font_table\n' % font_num
raise self.__bug_handler, msg
else:
# self.__special_font_dict
if font_name in self.__special_font_list:
self.__special_font_dict[font_name] = 1
self.__write_obj.write(
'cw<ci<font-style<nu<%s\n' % font_name
)
else:
self.__write_obj.write(line)
def convert_fonts(self):
"""
Required:
nothing
Returns:
a dictionary indicating with values for special fonts
Logic:
Read one line in at a time. Determine what action to take based on
the state. If the state is font_table, looke for individual fonts
and add the number and font name to a dictionary. Also create a
tag for each individual font in the font table.
If the state is after the font table, look for lines with font
info. Substitute a font name for a font number.
"""
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action == None:
sys.stderr.write('no no matching state in module fonts.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
default_font_name = self.__font_table.get(self.__default_font_num)
if not default_font_name:
default_font_name = 'Not Defined'
self.__special_font_dict['default-font'] = default_font_name
copy_obj = rtf2xml.copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "fonts.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__special_font_dict
| |
#
# Copyright (c) 2015,2016 Red Hat, Inc.
#
# Author: Loic Dachary <loic@dachary.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import argparse
import logging
import os
import pytest
import subprocess
import tempfile
import time
from mock import patch
import teuthology
from teuthology import misc
from teuthology.config import set_config_attr
from teuthology.openstack import TeuthologyOpenStack, OpenStack, OpenStackInstance
from teuthology.openstack import NoFlavorException
import scripts.openstack
class TestOpenStackBase(object):
def setup(self):
OpenStack.token = None
OpenStack.token_expires = None
self.environ = {}
for k in os.environ.keys():
if k.startswith('OS_'):
self.environ[k] = os.environ[k]
def teardown(self):
OpenStack.token = None
OpenStack.token_expires = None
for k in os.environ.keys():
if k.startswith('OS_'):
if k in self.environ:
os.environ[k] = self.environ[k]
else:
del os.environ[k]
class TestOpenStackInstance(TestOpenStackBase):
teuthology_instance = """
{
"OS-EXT-STS:task_state": null,
"addresses": "Ext-Net=167.114.233.32",
"image": "Ubuntu 14.04 (0d315a8d-75e3-418a-80e4-48e62d599627)",
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2015-08-17T12:22:13.000000",
"flavor": "vps-ssd-1 (164fcc7e-7771-414f-a607-b388cb7b7aa0)",
"id": "f3ca32d7-212b-458b-a0d4-57d1085af953",
"security_groups": [
{
"name": "default"
}
],
"user_id": "3a075820e5d24fda96cd340b87fd94e9",
"OS-DCF:diskConfig": "AUTO",
"accessIPv4": "",
"accessIPv6": "",
"progress": 0,
"OS-EXT-STS:power_state": 1,
"project_id": "62cf1be03cec403c8ed8e64df55732ea",
"config_drive": "",
"status": "ACTIVE",
"updated": "2015-11-03T13:48:53Z",
"hostId": "bcdf964b6f724e573c07156ff85b4db1707f6f0969f571cf33e0468d",
"OS-SRV-USG:terminated_at": null,
"key_name": "loic",
"properties": "",
"OS-EXT-AZ:availability_zone": "nova",
"name": "mrdarkdragon",
"created": "2015-08-17T12:21:31Z",
"os-extended-volumes:volumes_attached": [{"id": "627e2631-fbb3-48cd-b801-d29cd2a76f74"}, {"id": "09837649-0881-4ee2-a560-adabefc28764"}, {"id": "44e5175b-6044-40be-885a-c9ddfb6f75bb"}]
}
"""
teuthology_instance_no_addresses = """
{
"OS-EXT-STS:task_state": null,
"addresses": "",
"image": "Ubuntu 14.04 (0d315a8d-75e3-418a-80e4-48e62d599627)",
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2015-08-17T12:22:13.000000",
"flavor": "vps-ssd-1 (164fcc7e-7771-414f-a607-b388cb7b7aa0)",
"id": "f3ca32d7-212b-458b-a0d4-57d1085af953",
"security_groups": [
{
"name": "default"
}
],
"user_id": "3a075820e5d24fda96cd340b87fd94e9",
"OS-DCF:diskConfig": "AUTO",
"accessIPv4": "",
"accessIPv6": "",
"progress": 0,
"OS-EXT-STS:power_state": 1,
"project_id": "62cf1be03cec403c8ed8e64df55732ea",
"config_drive": "",
"status": "ACTIVE",
"updated": "2015-11-03T13:48:53Z",
"hostId": "bcdf964b6f724e573c07156ff85b4db1707f6f0969f571cf33e0468d",
"OS-SRV-USG:terminated_at": null,
"key_name": "loic",
"properties": "",
"OS-EXT-AZ:availability_zone": "nova",
"name": "mrdarkdragon",
"created": "2015-08-17T12:21:31Z",
"os-extended-volumes:volumes_attached": []
}
"""
@classmethod
def setup_class(self):
if 'OS_AUTH_URL' not in os.environ:
pytest.skip('no OS_AUTH_URL environment variable')
def test_init(self):
with patch.multiple(
misc,
sh=lambda cmd: self.teuthology_instance,
):
o = OpenStackInstance('NAME')
assert o['id'] == 'f3ca32d7-212b-458b-a0d4-57d1085af953'
o = OpenStackInstance('NAME', {"id": "OTHER"})
assert o['id'] == "OTHER"
def test_get_created(self):
with patch.multiple(
misc,
sh=lambda cmd: self.teuthology_instance,
):
o = OpenStackInstance('NAME')
assert o.get_created() > 0
def test_exists(self):
with patch.multiple(
misc,
sh=lambda cmd: self.teuthology_instance,
):
o = OpenStackInstance('NAME')
assert o.exists()
def sh_raises(cmd):
raise subprocess.CalledProcessError('FAIL', 'BAD')
with patch.multiple(
misc,
sh=sh_raises,
):
o = OpenStackInstance('NAME')
assert not o.exists()
def test_volumes(self):
with patch.multiple(
misc,
sh=lambda cmd: self.teuthology_instance,
):
o = OpenStackInstance('NAME')
assert len(o.get_volumes()) == 3
def test_get_addresses(self):
answers = [
self.teuthology_instance_no_addresses,
self.teuthology_instance,
]
def sh(self):
return answers.pop(0)
with patch.multiple(
misc,
sh=sh,
):
o = OpenStackInstance('NAME')
assert o.get_addresses() == 'Ext-Net=167.114.233.32'
def test_get_ip_neutron(self):
instance_id = '8e1fd70a-3065-46f8-9c30-84dc028c1834'
ip = '10.10.10.4'
def sh(cmd):
if 'neutron subnet-list' in cmd:
return """
[
{
"ip_version": 6,
"id": "c45b9661-b2ba-4817-9e3a-f8f63bf32989"
},
{
"ip_version": 4,
"id": "e03a3dbc-afc8-4b52-952e-7bf755397b50"
}
]
"""
elif 'neutron port-list' in cmd:
return ("""
[
{
"device_id": "915504ad-368b-4cce-be7c-4f8a83902e28",
"fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"10.10.10.1\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc::1\\"}"
},
{
"device_id": "{instance_id}",
"fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"{ip}\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc:f816:3eff:fe07:76c1\\"}"
},
{
"device_id": "17e4a968-4caa-4cee-8e4b-f950683a02bd",
"fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"10.10.10.5\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc:f816:3eff:fe9c:37f0\\"}"
}
]
""".replace('{instance_id}', instance_id).
replace('{ip}', ip))
else:
raise Exception("unexpected " + cmd)
with patch.multiple(
misc,
sh=sh,
):
assert ip == OpenStackInstance(
instance_id,
{ 'id': instance_id },
).get_ip_neutron()
class TestOpenStack(TestOpenStackBase):
flavors = """[
{
"Name": "eg-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 800,
"ID": "008f75de-c467-4d15-8f70-79c8fbe19538"
},
{
"Name": "hg-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 1600,
"ID": "0297d7ac-fe6f-4ff1-b6e7-0b8b0908c94f"
},
{
"Name": "win-sp-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "039e31f2-6541-46c8-85cf-7f47fab7ad78"
},
{
"Name": "win-sp-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "0417a0e6-f68a-4b8b-a642-ca5ecb9652f7"
},
{
"Name": "hg-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 800,
"ID": "042aefc6-b713-4a7e-ada5-3ff81daa1960"
},
{
"Name": "win-sp-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "0609290c-ad2a-40f0-8c66-c755dd38fe3f"
},
{
"Name": "win-eg-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 800,
"ID": "0651080f-5d07-44b1-a759-7ea4594b669e"
},
{
"Name": "win-sp-240",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 1600,
"ID": "07885848-8831-486d-8525-91484c09cc7e"
},
{
"Name": "win-hg-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "079aa0a2-5e48-4e58-8205-719bc962736e"
},
{
"Name": "eg-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 1600,
"ID": "090f8b8c-673c-4ab8-9a07-6e54a8776e7b"
},
{
"Name": "win-hg-15-ssd-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "10e10c58-d29f-4ff6-a1fd-085c35a3bd9b"
},
{
"Name": "eg-15-ssd",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "1340a920-0f2f-4c1b-8d74-e2502258da73"
},
{
"Name": "win-eg-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "13e54752-fbd0-47a6-aa93-e5a67dfbc743"
},
{
"Name": "eg-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "15c07a54-2dfb-41d9-aa73-6989fd8cafc2"
},
{
"Name": "win-eg-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "15e0dfcc-10f4-4e70-8ac1-30bc323273e2"
},
{
"Name": "vps-ssd-1",
"RAM": 2000,
"Ephemeral": 0,
"VCPUs": 1,
"Is Public": true,
"Disk": 10,
"ID": "164fcc7e-7771-414f-a607-b388cb7b7aa0"
},
{
"Name": "win-sp-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "169415e1-0979-4527-94fb-638c885bbd8c"
},
{
"Name": "win-hg-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "16f13d5b-be27-4b8b-88da-959d3904d3ba"
},
{
"Name": "win-sp-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "1788102b-ab80-4a0c-b819-541deaca7515"
},
{
"Name": "win-sp-240-flex",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "17bcfa14-135f-442f-9397-a4dc25265560"
},
{
"Name": "win-eg-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "194ca9ba-04af-4d86-ba37-d7da883a7eab"
},
{
"Name": "win-eg-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "19ff8837-4751-4f6c-a82b-290bc53c83c1"
},
{
"Name": "win-eg-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "1aaef5e5-4df9-4462-80d3-701683ab9ff0"
},
{
"Name": "eg-15",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "1cd85b81-5e4d-477a-a127-eb496b1d75de"
},
{
"Name": "hg-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 1600,
"ID": "1f1efedf-ec91-4a42-acd7-f5cf64b02d3c"
},
{
"Name": "hg-15-ssd-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "20347a07-a289-4c07-a645-93cb5e8e2d30"
},
{
"Name": "win-eg-7-ssd",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "20689394-bd77-4f4d-900e-52cc8a86aeb4"
},
{
"Name": "win-sp-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "21104d99-ba7b-47a0-9133-7e884710089b"
},
{
"Name": "win-sp-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "23c21ecc-9ee8-4ad3-bd9f-aa17a3faf84e"
},
{
"Name": "win-hg-15-ssd",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "24e293ed-bc54-4f26-8fb7-7b9457d08e66"
},
{
"Name": "eg-15-ssd-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "25f3534a-89e5-489d-aa8b-63f62e76875b"
},
{
"Name": "win-eg-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "291173f1-ea1d-410b-8045-667361a4addb"
},
{
"Name": "sp-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "2b646463-2efa-428b-94ed-4059923c3636"
},
{
"Name": "win-eg-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "2c74df82-29d2-4b1a-a32c-d5633e7359b4"
},
{
"Name": "win-eg-15-ssd",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "2fe4344f-d701-4bc4-8dcd-6d0b5d83fa13"
},
{
"Name": "sp-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "31487b30-eeb6-472f-a9b6-38ace6587ebc"
},
{
"Name": "win-sp-240-ssd",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "325b602f-ecc4-4444-90bd-5a2cf4e0da53"
},
{
"Name": "win-hg-7",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "377ded36-491f-4ad7-9eb4-876798b2aea9"
},
{
"Name": "sp-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "382f2831-4dba-40c4-bb7a-6fadff71c4db"
},
{
"Name": "hg-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "3c1d6170-0097-4b5c-a3b3-adff1b7a86e0"
},
{
"Name": "hg-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "3c669730-b5cd-4e44-8bd2-bc8d9f984ab2"
},
{
"Name": "sp-240-ssd-flex",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "3d66fea3-26f2-4195-97ab-fdea3b836099"
},
{
"Name": "sp-240-flex",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "40c781f7-d7a7-4b0d-bcca-5304aeabbcd9"
},
{
"Name": "hg-7-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "42730e52-147d-46b8-9546-18e31e5ac8a9"
},
{
"Name": "eg-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "463f30e9-7d7a-4693-944f-142067cf553b"
},
{
"Name": "hg-15-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "534f07c6-91af-44c8-9e62-156360fe8359"
},
{
"Name": "win-sp-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "55533fdf-ad57-4aa7-a2c6-ee31bb94e77b"
},
{
"Name": "win-hg-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "58b24234-3804-4c4f-9eb6-5406a3a13758"
},
{
"Name": "hg-7-ssd-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "596c1276-8e53-40a0-b183-cdd9e9b1907d"
},
{
"Name": "win-hg-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "5c54dc08-28b9-4860-9f24-a2451b2a28ec"
},
{
"Name": "eg-7",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "5e409dbc-3f4b-46e8-a629-a418c8497922"
},
{
"Name": "hg-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "656423ea-0551-48c6-9e0f-ec6e15952029"
},
{
"Name": "hg-15",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "675558ea-04fe-47a2-83de-40be9b2eacd4"
},
{
"Name": "eg-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "68a8e4e1-d291-46e8-a724-fbb1c4b9b051"
},
{
"Name": "hg-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "6ab72807-e0a5-4e9f-bbb9-7cbbf0038b26"
},
{
"Name": "win-hg-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "6e12cae3-0492-483c-aa39-54a0dcaf86dd"
},
{
"Name": "win-hg-7-ssd",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "6ead771c-e8b9-424c-afa0-671280416422"
},
{
"Name": "win-hg-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "70ded741-8f58-4bb9-8cfd-5e838b66b5f3"
},
{
"Name": "win-sp-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "7284d104-a260-421d-8cee-6dc905107b25"
},
{
"Name": "win-eg-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 800,
"ID": "72c0b262-855d-40bb-a3e9-fd989a1bc466"
},
{
"Name": "win-hg-7-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "73961591-c5f1-436f-b641-1a506eddaef4"
},
{
"Name": "sp-240-ssd",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "7568d834-3b16-42ce-a2c1-0654e0781160"
},
{
"Name": "win-eg-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "75f7fe5c-f87a-41d8-a961-a0169d02c268"
},
{
"Name": "eg-7-ssd-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "77e1db73-0b36-4e37-8e47-32c2d2437ca9"
},
{
"Name": "eg-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "78df4e30-98ca-4362-af68-037d958edaf0"
},
{
"Name": "vps-ssd-2",
"RAM": 4000,
"Ephemeral": 0,
"VCPUs": 1,
"Is Public": true,
"Disk": 20,
"ID": "7939cc5c-79b1-45c0-be2d-aa935d92faa1"
},
{
"Name": "sp-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "80d8510a-79cc-4307-8db7-d1965c9e8ddb"
},
{
"Name": "win-hg-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "835e734a-46b6-4cb2-be68-e8678fd71059"
},
{
"Name": "win-eg-7",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "84869b00-b43a-4523-babd-d47d206694e9"
},
{
"Name": "win-eg-7-ssd-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "852308f8-b8bf-44a4-af41-cbc27437b275"
},
{
"Name": "win-sp-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "8be9dc29-3eca-499b-ae2d-e3c99699131a"
},
{
"Name": "win-hg-7-ssd-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "8d704cfd-05b2-4d4a-add2-e2868bcc081f"
},
{
"Name": "eg-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "901f77c2-73f6-4fae-b28a-18b829b55a17"
},
{
"Name": "sp-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "944b92fb-9a0c-406d-bb9f-a1d93cda9f01"
},
{
"Name": "eg-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "965472c7-eb54-4d4d-bd6e-01ebb694a631"
},
{
"Name": "sp-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "97824a8c-e683-49a8-a70a-ead64240395c"
},
{
"Name": "hg-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "9831d7f1-3e79-483d-8958-88e3952c7ea2"
},
{
"Name": "eg-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 1600,
"ID": "9e1f13d0-4fcc-4abc-a9e6-9c76d662c92d"
},
{
"Name": "win-eg-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "9e6b85fa-6f37-45ce-a3d6-11ab40a28fad"
},
{
"Name": "hg-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "9ed787cc-a0db-400b-8cc1-49b6384a1000"
},
{
"Name": "sp-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "9f3cfdf7-b850-47cc-92be-33aefbfd2b05"
},
{
"Name": "hg-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "a37bdf17-e1b1-41cc-a67f-ed665a120446"
},
{
"Name": "win-hg-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 800,
"ID": "aa753e73-dadb-4528-9c4a-24e36fc41bf4"
},
{
"Name": "win-sp-240-ssd-flex",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "abc007b8-cc44-4b6b-9606-fd647b03e101"
},
{
"Name": "sp-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "ac74cb45-d895-47dd-b9cf-c17778033d83"
},
{
"Name": "win-hg-15",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "ae900175-72bd-4fbc-8ab2-4673b468aa5b"
},
{
"Name": "win-eg-15-ssd-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "aeb37dbf-d7c9-4fd7-93f1-f3818e488ede"
},
{
"Name": "hg-7-ssd",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "b1dc776c-b6e3-4a96-b230-850f570db3d5"
},
{
"Name": "sp-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "b24df495-10f3-466e-95ab-26f0f6839a2f"
},
{
"Name": "win-hg-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 1600,
"ID": "b798e44e-bf71-488c-9335-f20bf5976547"
},
{
"Name": "eg-7-ssd",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "b94e6623-913d-4147-b2a3-34ccf6fe7a5e"
},
{
"Name": "eg-15-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "bb5fdda8-34ec-40c8-a4e3-308b9e2c9ee2"
},
{
"Name": "win-eg-7-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "c65384f6-4665-461a-a292-2f3f5a016244"
},
{
"Name": "eg-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "c678f1a8-6542-4f9d-89af-ffc98715d674"
},
{
"Name": "hg-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "d147a094-b653-41e7-9250-8d4da3044334"
},
{
"Name": "sp-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "d1acf88d-6f55-4c5c-a914-4ecbdbd50d6b"
},
{
"Name": "sp-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "d2d33e8e-58b1-4661-8141-826c47f82166"
},
{
"Name": "hg-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "d7322c37-9881-4a57-9b40-2499fe2e8f42"
},
{
"Name": "win-hg-15-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "daf597ea-fbbc-4c71-a35e-5b41d33ccc6c"
},
{
"Name": "win-hg-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "dcfd834c-3932-47a3-8b4b-cdfeecdfde2c"
},
{
"Name": "win-hg-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 1600,
"ID": "def75cbd-a4b1-4f82-9152-90c65df9587b"
},
{
"Name": "eg-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "e04c7ad6-a5de-45f5-93c9-f3343bdfe8d1"
},
{
"Name": "vps-ssd-3",
"RAM": 8000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 40,
"ID": "e43d7458-6b82-4a78-a712-3a4dc6748cf4"
},
{
"Name": "win-eg-15-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "e8bd3402-7310-4a0f-8b99-d9212359c957"
},
{
"Name": "win-eg-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "ebf7a997-e2f8-42f4-84f7-33a3d53d1af9"
},
{
"Name": "eg-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "ec852ed3-1e42-4c59-abc3-12bcd26abec8"
},
{
"Name": "sp-240",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 1600,
"ID": "ed286e2c-769f-4c47-ac52-b8de7a4891f6"
},
{
"Name": "win-sp-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "ed835a73-d9a0-43ee-bd89-999c51d8426d"
},
{
"Name": "win-eg-15",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "f06056c1-a2d4-40e7-a7d8-e5bfabada72e"
},
{
"Name": "win-sp-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "f247dc56-395b-49de-9a62-93ccc4fff4ed"
},
{
"Name": "eg-7-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "f476f959-ffa6-46f2-94d8-72293570604d"
},
{
"Name": "sp-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "f52db47a-315f-49d4-bc5c-67dd118e7ac0"
},
{
"Name": "win-hg-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "f6cb8144-5d98-4057-b44f-46da342fb571"
},
{
"Name": "hg-7",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "fa3cc551-0358-4170-be64-56ea432b064c"
},
{
"Name": "hg-15-ssd",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "ff48c2cf-c17f-4682-aaf6-31d66786f808"
}
]"""
@classmethod
def setup_class(self):
if 'OS_AUTH_URL' not in os.environ:
pytest.skip('no OS_AUTH_URL environment variable')
@patch('teuthology.misc.sh')
def test_sorted_flavors(self, m_sh):
o = OpenStack()
select = '^(vps|hg)-.*ssd'
m_sh.return_value = TestOpenStack.flavors
flavors = o.get_sorted_flavors('arch', select)
assert [u'vps-ssd-1',
u'vps-ssd-2',
u'hg-7-ssd-flex',
u'hg-7-ssd',
u'vps-ssd-3',
u'hg-15-ssd-flex',
u'hg-15-ssd',
u'hg-30-ssd-flex',
u'hg-30-ssd',
u'hg-60-ssd-flex',
u'hg-60-ssd',
u'hg-120-ssd-flex',
u'hg-120-ssd',
] == [ f['Name'] for f in flavors ]
m_sh.assert_called_with("openstack --quiet flavor list -f json")
def test_flavor(self):
def get_sorted_flavors(self, arch, select):
return [
{
'Name': 'too_small',
'RAM': 2048,
'Disk': 50,
'VCPUs': 1,
},
]
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
with pytest.raises(NoFlavorException):
hint = { 'ram': 1000, 'disk': 40, 'cpus': 2 }
OpenStack().flavor(hint, 'arch')
flavor = 'good-flavor'
def get_sorted_flavors(self, arch, select):
return [
{
'Name': flavor,
'RAM': 2048,
'Disk': 50,
'VCPUs': 2,
},
]
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
hint = { 'ram': 1000, 'disk': 40, 'cpus': 2 }
assert flavor == OpenStack().flavor(hint, 'arch')
def test_flavor_range(self):
flavors = [
{
'Name': 'too_small',
'RAM': 2048,
'Disk': 50,
'VCPUs': 1,
},
]
def get_sorted_flavors(self, arch, select):
return flavors
min = { 'ram': 1000, 'disk': 40, 'cpus': 2 }
good = { 'ram': 4000, 'disk': 40, 'cpus': 2 }
#
# there are no flavors in the required range
#
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
with pytest.raises(NoFlavorException):
OpenStack().flavor_range(min, good, 'arch')
#
# there is one flavor in the required range
#
flavors.append({
'Name': 'min',
'RAM': 2048,
'Disk': 40,
'VCPUs': 2,
})
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
assert 'min' == OpenStack().flavor_range(min, good, 'arch')
#
# out of the two flavors in the required range, get the bigger one
#
flavors.append({
'Name': 'good',
'RAM': 3000,
'Disk': 40,
'VCPUs': 2,
})
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
assert 'good' == OpenStack().flavor_range(min, good, 'arch')
#
# there is one flavor bigger or equal to good, get this one
#
flavors.append({
'Name': 'best',
'RAM': 4000,
'Disk': 40,
'VCPUs': 2,
})
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
assert 'best' == OpenStack().flavor_range(min, good, 'arch')
#
# there are two flavors bigger or equal to good, get the smallest one
#
flavors.append({
'Name': 'too_big',
'RAM': 30000,
'Disk': 400,
'VCPUs': 20,
})
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
assert 'best' == OpenStack().flavor_range(min, good, 'arch')
def test_interpret_hints(self):
defaults = {
'machine': {
'ram': 0,
'disk': 0,
'cpus': 0,
},
'volumes': {
'count': 0,
'size': 0,
},
}
expected_disk = 10 # first hint larger than the second
expected_ram = 20 # second hint larger than the first
expected_cpus = 0 # not set, hence zero by default
expected_count = 30 # second hint larger than the first
expected_size = 40 # does not exist in the first hint
hints = [
{
'machine': {
'ram': 2,
'disk': expected_disk,
},
'volumes': {
'count': 9,
'size': expected_size,
},
},
{
'machine': {
'ram': expected_ram,
'disk': 3,
},
'volumes': {
'count': expected_count,
},
},
]
hint = OpenStack().interpret_hints(defaults, hints)
assert hint == {
'machine': {
'ram': expected_ram,
'disk': expected_disk,
'cpus': expected_cpus,
},
'volumes': {
'count': expected_count,
'size': expected_size,
}
}
assert defaults == OpenStack().interpret_hints(defaults, None)
def test_get_provider(self):
auth = os.environ.get('OS_AUTH_URL', None)
os.environ['OS_AUTH_URL'] = 'cloud.ovh.net'
assert OpenStack().get_provider() == 'ovh'
if auth != None:
os.environ['OS_AUTH_URL'] = auth
else:
del os.environ['OS_AUTH_URL']
def test_get_os_url(self):
o = OpenStack()
#
# Only for OVH
#
o.provider = 'something'
assert "" == o.get_os_url("server ")
o.provider = 'ovh'
assert "" == o.get_os_url("unknown ")
type2cmd = {
'compute': ('server', 'flavor'),
'network': ('ip', 'security', 'network'),
'image': ('image',),
'volume': ('volume',),
}
os.environ['OS_REGION_NAME'] = 'REGION'
os.environ['OS_TENANT_ID'] = 'TENANT'
for (type, cmds) in type2cmd.items():
for cmd in cmds:
assert ("//" + type) in o.get_os_url(cmd + " ")
for type in type2cmd.keys():
assert ("//" + type) in o.get_os_url("whatever ", type=type)
@patch('teuthology.misc.sh')
def test_cache_token(self, m_sh):
token = 'TOKEN VALUE'
m_sh.return_value = token
OpenStack.token = None
o = OpenStack()
#
# Only for OVH
#
o.provider = 'something'
assert False == o.cache_token()
o.provider = 'ovh'
#
# Set the environment with the token
#
assert 'OS_TOKEN_VALUE' not in os.environ
assert 'OS_TOKEN_EXPIRES' not in os.environ
assert True == o.cache_token()
m_sh.assert_called_with('openstack -q token issue -c id -f value')
assert token == os.environ['OS_TOKEN_VALUE']
assert token == OpenStack.token
assert time.time() < int(os.environ['OS_TOKEN_EXPIRES'])
assert time.time() < OpenStack.token_expires
#
# Reset after it expires
#
token_expires = int(time.time()) - 2000
OpenStack.token_expires = token_expires
assert True == o.cache_token()
assert time.time() < int(os.environ['OS_TOKEN_EXPIRES'])
assert time.time() < OpenStack.token_expires
@patch('teuthology.misc.sh')
def test_cache_token_from_environment(self, m_sh):
OpenStack.token = None
o = OpenStack()
o.provider = 'ovh'
token = 'TOKEN VALUE'
os.environ['OS_TOKEN_VALUE'] = token
token_expires = int(time.time()) + OpenStack.token_cache_duration
os.environ['OS_TOKEN_EXPIRES'] = str(token_expires)
assert True == o.cache_token()
assert token == OpenStack.token
assert token_expires == OpenStack.token_expires
m_sh.assert_not_called()
@patch('teuthology.misc.sh')
def test_cache_token_expired_environment(self, m_sh):
token = 'TOKEN VALUE'
m_sh.return_value = token
OpenStack.token = None
o = OpenStack()
o.provider = 'ovh'
os.environ['OS_TOKEN_VALUE'] = token
token_expires = int(time.time()) - 2000
os.environ['OS_TOKEN_EXPIRES'] = str(token_expires)
assert True == o.cache_token()
m_sh.assert_called_with('openstack -q token issue -c id -f value')
assert token == os.environ['OS_TOKEN_VALUE']
assert token == OpenStack.token
assert time.time() < int(os.environ['OS_TOKEN_EXPIRES'])
assert time.time() < OpenStack.token_expires
class TestTeuthologyOpenStack(TestOpenStackBase):
@classmethod
def setup_class(self):
if 'OS_AUTH_URL' not in os.environ:
pytest.skip('no OS_AUTH_URL environment variable')
teuthology.log.setLevel(logging.DEBUG)
set_config_attr(argparse.Namespace())
ip = TeuthologyOpenStack.create_floating_ip()
if ip:
ip_id = TeuthologyOpenStack.get_floating_ip_id(ip)
OpenStack().run("ip floating delete " + ip_id)
self.can_create_floating_ips = True
else:
self.can_create_floating_ips = False
def setup(self):
super(TestTeuthologyOpenStack, self).setup()
self.key_filename = tempfile.mktemp()
self.key_name = 'teuthology-test'
self.name = 'teuthology-test'
self.clobber()
misc.sh("""
openstack keypair create {key_name} > {key_filename}
chmod 600 {key_filename}
""".format(key_filename=self.key_filename,
key_name=self.key_name))
self.options = ['--key-name', self.key_name,
'--key-filename', self.key_filename,
'--name', self.name,
'--verbose']
def teardown(self):
super(TestTeuthologyOpenStack, self).teardown()
self.clobber()
os.unlink(self.key_filename)
def clobber(self):
misc.sh("""
openstack server delete {name} --wait || true
openstack keypair delete {key_name} || true
""".format(key_name=self.key_name,
name=self.name))
def test_create(self, caplog):
teuthology_argv = [
'--suite', 'upgrade/hammer',
'--dry-run',
'--ceph', 'master',
'--kernel', 'distro',
'--flavor', 'gcov',
'--distro', 'ubuntu',
'--suite-branch', 'hammer',
'--email', 'loic@dachary.org',
'--num', '10',
'--limit', '23',
'--subset', '1/2',
'--priority', '101',
'--timeout', '234',
'--filter', 'trasher',
'--filter-out', 'erasure-code',
'--throttle', '3',
]
archive_upload = 'user@archive:/tmp'
argv = (self.options +
['--teuthology-git-url', 'TEUTHOLOGY_URL',
'--teuthology-branch', 'TEUTHOLOGY_BRANCH',
'--ceph-workbench-git-url', 'CEPH_WORKBENCH_URL',
'--ceph-workbench-branch', 'CEPH_WORKBENCH_BRANCH',
'--upload',
'--archive-upload', archive_upload] +
teuthology_argv)
args = scripts.openstack.parse_args(argv)
teuthology_argv.extend([
'--archive-upload', archive_upload,
'--archive-upload-url', args.archive_upload_url,
])
teuthology = TeuthologyOpenStack(args, None, argv)
teuthology.user_data = 'teuthology/openstack/test/user-data-test1.txt'
teuthology.teuthology_suite = 'echo --'
teuthology.main()
assert 0 == teuthology.ssh("lsb_release -a")
assert 0 == teuthology.ssh("grep 'substituded variables' /var/log/cloud-init.log")
l = caplog.text
assert 'Ubuntu 14.04' in l
assert "nworkers=" + str(args.simultaneous_jobs) in l
assert "username=" + teuthology.username in l
assert "upload=--archive-upload user@archive:/tmp" in l
assert ("ceph_workbench="
" --ceph-workbench-branch CEPH_WORKBENCH_BRANCH"
" --ceph-workbench-git-url CEPH_WORKBENCH_URL") in l
assert "clone=git clone -b TEUTHOLOGY_BRANCH TEUTHOLOGY_URL" in l
assert os.environ['OS_AUTH_URL'] in l
assert " ".join(teuthology_argv) in l
if self.can_create_floating_ips:
ip = teuthology.get_floating_ip(self.name)
teuthology.teardown()
if self.can_create_floating_ips:
assert teuthology.get_floating_ip_id(ip) == None
def test_floating_ip(self):
if not self.can_create_floating_ips:
pytest.skip('unable to create floating ips')
expected = TeuthologyOpenStack.create_floating_ip()
ip = TeuthologyOpenStack.get_unassociated_floating_ip()
assert expected == ip
ip_id = TeuthologyOpenStack.get_floating_ip_id(ip)
OpenStack().run("ip floating delete " + ip_id)
| |
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
def flake8ext(f):
"""Decorator to indicate flake8 extension.
This is borrowed from hacking.core.flake8ext(), but at now it is used
only for unit tests to know which are watcher flake8 extensions.
"""
f.name = __name__
f.version = '0.0.1'
f.skip_on_py3 = False
return f
# Guidelines for writing new hacking checks
#
# - Use only for Watcher specific tests. OpenStack general tests
# should be submitted to the common 'hacking' module.
# - Pick numbers in the range N3xx. Find the current test with
# the highest allocated number and then pick the next value.
# - Keep the test method code in the source file ordered based
# on the N3xx value.
# - List the new rule in the top level HACKING.rst file
_all_log_levels = {
'reserved': '_', # this should never be used with a log unless
# it is a variable used for a log message and
# a exception
'error': '_LE',
'info': '_LI',
'warning': '_LW',
'critical': '_LC',
'exception': '_LE',
}
_all_hints = set(_all_log_levels.values())
def _regex_for_level(level, hint):
return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
'level': level,
'wrong_hints': '|'.join(_all_hints - set([hint])),
}
log_warn = re.compile(
r"(.)*LOG\.(warn)\(\s*('|\"|_)")
unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b")
unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b")
re_redundant_import_alias = re.compile(r".*import (.+) as \1$")
@flake8ext
def use_jsonutils(logical_line, filename):
msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
# Skip list is currently empty.
json_check_skipped_patterns = []
for pattern in json_check_skipped_patterns:
if pattern in filename:
return
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
@flake8ext
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_(' and 'LOG.debug(_Lx('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
N319
"""
for hint in _all_hints:
if logical_line.startswith("LOG.debug(%s(" % hint):
yield(0, "N319 Don't translate debug level logs")
@flake8ext
def check_assert_called_once_with(logical_line, filename):
# Try to detect unintended calls of nonexistent mock methods like:
# assert_called_once
# assertCalledOnceWith
# assert_has_called
# called_once_with
if 'watcher/tests/' in filename:
if '.assert_called_once_with(' in logical_line:
return
uncased_line = logical_line.lower().replace('_', '')
check_calls = ['.assertcalledonce', '.calledoncewith']
if any(x for x in check_calls if x in uncased_line):
msg = ("N322: Possible use of no-op mock method. "
"please use assert_called_once_with.")
yield (0, msg)
if '.asserthascalled' in uncased_line:
msg = ("N322: Possible use of no-op mock method. "
"please use assert_has_calls.")
yield (0, msg)
@flake8ext
def check_python3_xrange(logical_line):
if re.search(r"\bxrange\s*\(", logical_line):
yield(0, "N325: Do not use xrange. Use range, or six.moves.range for "
"large loops.")
@flake8ext
def check_no_basestring(logical_line):
if re.search(r"\bbasestring\b", logical_line):
msg = ("N326: basestring is not Python3-compatible, use "
"six.string_types instead.")
yield(0, msg)
@flake8ext
def check_python3_no_iteritems(logical_line):
if re.search(r".*\.iteritems\(\)", logical_line):
msg = ("N327: Use six.iteritems() instead of dict.iteritems().")
yield(0, msg)
@flake8ext
def check_asserttrue(logical_line, filename):
if 'watcher/tests/' in filename:
if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?\)", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?\)", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
@flake8ext
def check_assertfalse(logical_line, filename):
if 'watcher/tests/' in filename:
if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line):
msg = ("N329: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line):
msg = ("N329: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
@flake8ext
def check_assertempty(logical_line, filename):
if 'watcher/tests/' in filename:
msg = ("N330: Use assertEqual(*empty*, observed) instead of "
"assertEqual(observed, *empty*). *empty* contains "
"{}, [], (), set(), '', \"\"")
empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")"
reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties
if re.search(reg, logical_line):
yield (0, msg)
@flake8ext
def check_assertisinstance(logical_line, filename):
if 'watcher/tests/' in filename:
if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)",
logical_line):
msg = ("N331: Use assertIsInstance(observed, type) instead "
"of assertTrue(isinstance(observed, type))")
yield (0, msg)
@flake8ext
def check_assertequal_for_httpcode(logical_line, filename):
msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) "
"instead of assertEqual(observed_http_code, expected_http_code)")
if 'watcher/tests/' in filename:
if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)",
logical_line):
yield (0, msg)
@flake8ext
def check_log_warn_deprecated(logical_line, filename):
msg = "N333: Use LOG.warning due to compatibility with py3"
if log_warn.match(logical_line):
yield (0, msg)
@flake8ext
def check_oslo_i18n_wrapper(logical_line, filename, noqa):
"""Check for watcher.i18n usage.
N340(watcher/foo/bar.py): from watcher.i18n import _
Okay(watcher/foo/bar.py): from watcher.i18n import _ # noqa
"""
if noqa:
return
split_line = logical_line.split()
modulename = os.path.normpath(filename).split('/')[0]
bad_i18n_module = '%s.i18n' % modulename
if (len(split_line) > 1 and split_line[0] in ('import', 'from')):
if (split_line[1] == bad_i18n_module or
modulename != 'watcher' and split_line[1] in ('watcher.i18n',
'watcher._i18n')):
msg = ("N340: %(found)s is found. Use %(module)s._i18n instead."
% {'found': split_line[1], 'module': modulename})
yield (0, msg)
@flake8ext
def check_builtins_gettext(logical_line, tokens, filename, lines, noqa):
"""Check usage of builtins gettext _().
N341(watcher/foo.py): _('foo')
Okay(watcher/i18n.py): _('foo')
Okay(watcher/_i18n.py): _('foo')
Okay(watcher/foo.py): _('foo') # noqa
"""
if noqa:
return
modulename = os.path.normpath(filename).split('/')[0]
if '%s/tests' % modulename in filename:
return
if os.path.basename(filename) in ('i18n.py', '_i18n.py'):
return
token_values = [t[1] for t in tokens]
i18n_wrapper = '%s._i18n' % modulename
if '_' in token_values:
i18n_import_line_found = False
for line in lines:
split_line = [elm.rstrip(',') for elm in line.split()]
if (len(split_line) > 1 and split_line[0] == 'from' and
split_line[1] == i18n_wrapper and
'_' in split_line):
i18n_import_line_found = True
break
if not i18n_import_line_found:
msg = ("N341: _ from python builtins module is used. "
"Use _ from %s instead." % i18n_wrapper)
yield (0, msg)
@flake8ext
def no_redundant_import_alias(logical_line):
"""Checking no redundant import alias.
https://bugs.launchpad.net/watcher/+bug/1745527
N342
"""
if re.match(re_redundant_import_alias, logical_line):
yield(0, "N342: No redundant import alias.")
@flake8ext
def import_stock_mock(logical_line):
"""Use python's mock, not the mock library.
Since we `dropped support for python 2`__, we no longer need to use the
mock library, which existed to backport py3 functionality into py2.
Which must be done by saying::
from unittest import mock
...because if you say::
import mock
...you definitely will not be getting the standard library mock. That will
always import the third party mock library. This check can be removed in
the future (and we can start saying ``import mock`` again) if we manage to
purge these transitive dependencies.
.. __: https://review.opendev.org/#/c/717540
N366
"""
if logical_line == 'import mock':
yield (0, "N366: You must explicitly import python's mock: "
"``from unittest import mock``")
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in array_ops.py."""
from tensorflow.compiler.tf2xla.ops import gen_xla_ops
from tensorflow.python import pywrap_tfe
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices as indexed_slices_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("Pack")
def _PackGrad(op, grad):
"""Gradient for pack op."""
return array_ops.unstack(grad, num=op.get_attr("N"), axis=op.get_attr("axis"))
@ops.RegisterGradient("Unpack")
def _UnpackGrad(op, *grads):
"""Gradient for unpack op."""
return array_ops.stack(grads, axis=op.get_attr("axis"))
def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
"""Gradient for concat op.
Args:
op: An operation.
grad: `Tensor` or `IndexedSlices` representing the gradients with respect to
each output of the op.
start_value_index: An integer index of the first value in the op.inputs.
end_value_index: An integer index of the last value in the op.inputs.
dim_index: An integer index of concat_dim or axis parameter in op.inputs.
Returns:
Tensors representing the partial gradients with respect to each input
of the op.
Raises:
ValueError: if concat_dim/axis is not statically known.
"""
def _CreateDenseMaskAndBegin(sizes, concat_dim):
"""Create variables for iteratively slicing a dense gradients tensor."""
# Since shape is 1-D, shape_of_shape = [rank-of-inputs]
shape_of_shape = array_ops.shape(sizes[0])
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
mask = array_ops.concat([
array_ops.zeros(
array_ops.expand_dims(concat_dim, 0), dtype=dtypes.int32), [1],
array_ops.zeros(shape_of_shape - concat_dim - 1, dtype=dtypes.int32)
], 0)
begin = array_ops.zeros(shape_of_shape, dtype=dtypes.int32)
return mask, begin
def _ExtractInputShapes(inputs):
"""Extract the shapes of a set of input tensors."""
if context.executing_eagerly():
return array_ops.shape_n(inputs)
sizes = []
fully_known = True
for x in inputs:
input_shape = array_ops.shape(x)
if not isinstance(input_shape,
ops.Tensor) or input_shape.op.type != "Const":
fully_known = False
break
sizes.append(input_shape)
if fully_known:
return sizes
else:
return array_ops.shape_n(inputs)
# Degenerate concatenation, just return grad.
if len(op.inputs) == 2:
return grad + [None] if end_value_index <= dim_index else [None] + grad
concat_dim = op.inputs[dim_index]
input_values = op.inputs[start_value_index:end_value_index]
out_grads = []
if isinstance(grad, ops.Tensor):
if context.executing_eagerly() or isinstance(concat_dim, ops.EagerTensor):
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = (
concat_dim._numpy().item(0) % input_values[0]._rank()) # pylint: disable=protected-access
# All inputs are guaranteed to be EagerTensors in eager mode
sizes = pywrap_tfe.TFE_Py_TensorShapeSlice(input_values,
non_neg_concat_dim)
out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
else:
if constant_op.is_constant(concat_dim):
# If concat_dim is a constant defined in a different context,
# then we duplicate it in the current context to avoid passing it
# through an Enter node.
# This is a small optimization in general, but it is required when
# compiling with XLA, as XLA needs the concat input to be folded into a
# constant.
grad_context = control_flow_util.GetOutputContext(grad.op)
dim_context = control_flow_util.GetOutputContext(concat_dim.op)
if dim_context != grad_context:
value = tensor_util.constant_value(concat_dim)
concat_dim = constant_op.constant(value=value, dtype=concat_dim.dtype)
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])
# Get the inputs' tensor shapes
sizes = _ExtractInputShapes(input_values)
# The magic number of 16 was found through benchmarking a range of sizes
# on CPUs and a Maxwell TitanX. A speedup was seen in a large majority of
# cases when switching implementations at N=16, but it is possible that
# there will be a small number of performance regressions.
if len(sizes) > 16:
# extract the size of each input along the concat dimension
sizes = array_ops.squeeze(
array_ops.slice(
array_ops.stack(sizes, axis=1), [non_neg_concat_dim, 0],
[1, -1]))
out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
else:
offset = gen_array_ops.concat_offset(non_neg_concat_dim, sizes)
for (begin, size) in zip(offset, sizes):
out_grads.append(array_ops.slice(grad, begin, size))
elif isinstance(grad, indexed_slices_lib.IndexedSlices):
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])
concat_dim_static = tensor_util.constant_value(concat_dim)
if concat_dim_static is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"statically-known concat_dim")
if concat_dim_static < 0:
rank = tensor_util.constant_value(array_ops.rank(input_values[0]))
if rank is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"negative concat_dim when first value rank is "
"statically-known.")
concat_dim_static %= rank
# Get the inputs' tensor shapes
sizes = [array_ops.shape(x) for x in input_values]
if concat_dim_static > 0:
# IndexedSlices, non_neg_concat_dim > 0. Each input gets IndexedSlices
# gradients with all the indices, but with grad.values sliced accordingly.
# This is like the Tensor case, except shape(grad.values)[0] is not equal
# to shape(sizes[i])[0], since only a subset of the dim-0 values are
# stored.
mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim)
for size in sizes:
new_values = array_ops.slice(
grad.values, begin,
array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0))
out_grads.append(
indexed_slices_lib.IndexedSlices(new_values, grad.indices, size))
# Lint complains begin = begin + ...
begin = math_ops.add(begin, size * mask)
else:
# IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
# only for the relevant indices.
start = constant_op.constant(0, dtype=grad.indices.dtype)
for size in sizes:
size_concat_dim = array_ops.gather(size, non_neg_concat_dim)
if size_concat_dim.dtype != grad.indices.dtype:
size_concat_dim = math_ops.cast(
size_concat_dim, dtype=grad.indices.dtype)
end = start + size_concat_dim
# Compute the 1-D Tensor of indices relevant for this input.
indices_to_select = array_ops.squeeze(
array_ops.where(
math_ops.logical_and(grad.indices >= start,
grad.indices < end)),
axis=[1])
new_indices = array_ops.gather(grad.indices, indices_to_select) - start
new_values = array_ops.gather(grad.values, indices_to_select)
out_grads.append(
indexed_slices_lib.IndexedSlices(new_values, new_indices, size))
start = end
else:
raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))
return (out_grads + [None] if end_value_index <= dim_index else [None] +
out_grads)
@ops.RegisterGradient("Concat")
def _ConcatGrad(op, grad):
return _ConcatGradHelper(
op,
grad,
start_value_index=1,
end_value_index=len(op.inputs),
dim_index=0)
@ops.RegisterGradient("ConcatV2")
def _ConcatGradV2(op, grad):
return _ConcatGradHelper(
op, grad, start_value_index=0, end_value_index=-1, dim_index=-1)
ops.NotDifferentiable("ConcatOffset")
@ops.RegisterGradient("Slice")
def _SliceGrad(op, grad):
"""Gradient for Slice op."""
# Create an Nx2 padding where the first column represents how many
# zeros are to be prepended for each dimension, and the second
# column indicates how many zeros are appended.
#
# The number of zeros to append is the shape of the input
# elementwise-subtracted by both the begin vector and sizes vector.
#
# Some more reshaping is needed to assemble this tensor with the
# right dimensions.
input_vec = op.inputs[0]
begin_vec = op.inputs[1]
input_rank = array_ops.rank(input_vec)
slice_size = array_ops.shape(op.outputs[0])
if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()):
return gen_xla_ops.xla_dynamic_update_slice(array_ops.zeros_like(input_vec),
grad, begin_vec), None, None
shape = array_ops.stack([input_rank, 1])
before_pad = array_ops.reshape(begin_vec, shape)
after_pad = array_ops.reshape(
array_ops.shape(input_vec) - slice_size - begin_vec, shape)
paddings = array_ops.concat([before_pad, after_pad], 1)
return array_ops.pad(grad, paddings), None, None
@ops.RegisterGradient("StridedSlice")
def _StridedSliceGrad(op, grad):
"""Gradient for StridedSlice op."""
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
# StridedSliceGrad requires `x`, `begin`, `end` and `strides` to be of the
# same dtype so we build a shape of the same type as other args.
# Note that the choice of `begin` for specifying `out_type` is arbitrary.
# We could choose any of {begin|end|strides}.dtype since they are required to
# be the same.
x = array_ops.shape(op.inputs[0], out_type=begin.dtype)
x_static = tensor_util.constant_value(x)
x = x_static if x_static is not None else x
begin_static = tensor_util.constant_value(begin)
begin = begin_static if begin_static is not None else begin
end_static = tensor_util.constant_value(end)
end = end_static if end_static is not None else end
strides_static = tensor_util.constant_value(strides)
strides = strides_static if strides_static is not None else strides
return array_ops.strided_slice_grad(
x,
begin,
end,
strides,
grad,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask")), None, None, None
@ops.RegisterGradient("StridedSliceGrad")
def _StridedSliceGradGrad(op, grad):
"""Gradient for StridedSliceGrad op."""
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
return None, None, None, None, array_ops.strided_slice(
grad,
begin,
end,
strides,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask"))
@ops.RegisterGradient("TensorStridedSliceUpdate")
def _TensorStridedSliceUpdateGrad(op, grad): # pylint:disable=missing-function-docstring
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
begin_mask = op.get_attr("begin_mask")
end_mask = op.get_attr("end_mask")
ellipsis_mask = op.get_attr("ellipsis_mask")
new_axis_mask = op.get_attr("new_axis_mask")
shrink_axis_mask = op.get_attr("shrink_axis_mask")
def Apply(f, *args):
return f(*args,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask)
dy = Apply(array_ops.strided_slice,
grad, begin, end, strides)
dx = Apply(array_ops.tensor_strided_slice_update,
grad, begin, end, strides, array_ops.zeros_like(dy))
return dx, None, None, None, dy
@ops.RegisterGradient("Split")
def _SplitGrad(op, *grads):
return None, array_ops.concat(list(grads), op.inputs[0])
@ops.RegisterGradient("SplitV")
def _SplitVGrad(op, *grads):
returnval = array_ops.concat(list(grads), op.inputs[2])
returnval = [returnval] + [
None,
] * (
len(op.inputs) - 1)
return returnval
ops.NotDifferentiable("Const")
@ops.RegisterGradient("Diag")
def _DiagGrad(_, grad):
return array_ops.diag_part(grad)
@ops.RegisterGradient("DiagPart")
def _DiagPartGrad(_, grad):
return array_ops.diag(grad)
@ops.RegisterGradient("MatrixDiag")
def _MatrixDiagGrad(_, grad):
return array_ops.matrix_diag_part(grad)
@ops.RegisterGradient("MatrixDiagV2")
def _MatrixDiagV2Grad(op, grad):
return array_ops.matrix_diag_part(
grad, k=op.inputs[1]), None, None, None, None
@ops.RegisterGradient("MatrixDiagV3")
def _MatrixDiagV3Grad(op, grad):
return array_ops.matrix_diag_part(
grad, k=op.inputs[1], align=op.get_attr("align")), None, None, None, None
@ops.RegisterGradient("MatrixDiagPart")
def _MatrixDiagPartGrad(op, grad):
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined() and matrix_shape[0] == matrix_shape[1]:
return array_ops.matrix_diag(grad)
else:
return array_ops.matrix_set_diag(array_ops.zeros_like(op.inputs[0]), grad)
@ops.RegisterGradient("MatrixDiagPartV2")
def _MatrixDiagPartV2Grad(op, grad):
"""Gradient for MatrixDiagPartV2."""
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined():
return array_ops.matrix_diag(
grad,
k=op.inputs[1],
num_rows=matrix_shape[0],
num_cols=matrix_shape[1]), None, None
else:
return array_ops.matrix_set_diag(
array_ops.zeros_like(op.inputs[0]), grad, k=op.inputs[1]), None, None
@ops.RegisterGradient("MatrixDiagPartV3")
def _MatrixDiagPartV3Grad(op, grad):
"""Gradient for MatrixDiagPartV3."""
matrix_shape = op.inputs[0].get_shape()[-2:]
align = op.get_attr("align")
if matrix_shape.is_fully_defined():
return array_ops.matrix_diag(
grad,
k=op.inputs[1],
num_rows=matrix_shape[0],
num_cols=matrix_shape[1],
align=align), None, None
else:
return array_ops.matrix_set_diag(
array_ops.zeros_like(op.inputs[0]), grad, k=op.inputs[1],
align=align), None, None
@ops.RegisterGradient("MatrixSetDiag")
def _MatrixSetDiagGrad(op, grad):
"""Gradient for MatrixSetDiag."""
input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
diag_shape = op.inputs[1].get_shape()
batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
matrix_shape = input_shape[-2:]
if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
else:
with ops.colocate_with(grad):
grad_shape = array_ops.shape(grad)
grad_rank = array_ops.rank(grad)
batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
min_dim = math_ops.reduce_min(matrix_shape)
diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
grad_input = array_ops.matrix_set_diag(
grad, array_ops.zeros(diag_shape, dtype=grad.dtype))
grad_diag = array_ops.matrix_diag_part(grad)
return (grad_input, grad_diag)
@ops.RegisterGradient("MatrixSetDiagV2")
def _MatrixSetDiagGradV2(op, grad):
"""Gradient for MatrixSetDiagV2."""
diag_shape = op.inputs[1].get_shape()
if not diag_shape.is_fully_defined():
# Need to know the values of `d_lower` and `d_upper` to infer diag_shape.
grad_shape = array_ops.shape(grad)
batch_shape = grad_shape[:-2]
matrix_shape = grad_shape[-2:]
diag_index = array_ops.reshape(op.inputs[2], [-1]) # Converts to vector.
d_lower = diag_index[0]
d_upper = diag_index[-1] # Works both when len(diag_index) is 1 and 2.
y_offset = control_flow_ops.cond(
math_ops.less(d_upper, 0), lambda: d_upper, lambda: 0)
x_offset = control_flow_ops.cond(
math_ops.greater(d_lower, 0), lambda: -d_lower, lambda: 0)
max_diag_len = math_ops.minimum(matrix_shape[0] + y_offset,
matrix_shape[1] + x_offset)
# pylint: disable=g-long-lambda
# pyformat: disable
postfix = control_flow_ops.cond(
math_ops.equal(d_lower, d_upper),
lambda: ops.convert_to_tensor([max_diag_len]),
lambda: ops.convert_to_tensor([d_upper - d_lower + 1,
max_diag_len]))
# pyformat: enable
# pylint: enable=g-long-lambda
diag_shape = array_ops.concat([batch_shape, postfix], 0)
grad_input = array_ops.matrix_set_diag(
grad, array_ops.zeros(diag_shape, dtype=grad.dtype), k=op.inputs[2])
grad_diag = array_ops.matrix_diag_part(grad, k=op.inputs[2])
return (grad_input, grad_diag, None)
@ops.RegisterGradient("MatrixSetDiagV3")
def _MatrixSetDiagGradV3(op, grad):
"""Gradient for MatrixSetDiagV3."""
diag_shape = op.inputs[1].get_shape()
align = op.get_attr("align")
if not diag_shape.is_fully_defined():
# Need to know the values of `d_lower` and `d_upper` to infer diag_shape.
grad_shape = array_ops.shape(grad)
batch_shape = grad_shape[:-2]
matrix_shape = grad_shape[-2:]
diag_index = array_ops.reshape(op.inputs[2], [-1]) # Converts to vector.
d_lower = diag_index[0]
d_upper = diag_index[-1] # Works both when len(diag_index) is 1 and 2.
y_offset = control_flow_ops.cond(
math_ops.less(d_upper, 0), lambda: d_upper, lambda: 0)
x_offset = control_flow_ops.cond(
math_ops.greater(d_lower, 0), lambda: -d_lower, lambda: 0)
max_diag_len = math_ops.minimum(matrix_shape[0] + y_offset,
matrix_shape[1] + x_offset)
# pylint: disable=g-long-lambda
# pyformat: disable
postfix = control_flow_ops.cond(
math_ops.equal(d_lower, d_upper),
lambda: ops.convert_to_tensor([max_diag_len]),
lambda: ops.convert_to_tensor([d_upper - d_lower + 1,
max_diag_len]))
# pyformat: enable
# pylint: enable=g-long-lambda
diag_shape = array_ops.concat([batch_shape, postfix], 0)
grad_input = array_ops.matrix_set_diag(
grad,
array_ops.zeros(diag_shape, dtype=grad.dtype),
k=op.inputs[2],
align=align)
grad_diag = array_ops.matrix_diag_part(grad, k=op.inputs[2], align=align)
return (grad_input, grad_diag, None)
@ops.RegisterGradient("MatrixBandPart")
def _MatrixBandPartGrad(op, grad):
num_lower = op.inputs[1]
num_upper = op.inputs[2]
return (array_ops.matrix_band_part(grad, num_lower, num_upper), None, None)
# Edit Distance has no gradient (but can be used to eval seq2seq or CTC).
ops.NotDifferentiable("EditDistance")
@ops.RegisterGradient("Fill")
def _FillGrad(_, grad):
return None, math_ops.reduce_sum(grad)
ops.NotDifferentiable("ZerosLike")
ops.NotDifferentiable("OnesLike")
@ops.RegisterGradient("PreventGradient")
def _PreventGradientGrad(op, _):
raise LookupError("Gradient explicitly disabled. Reason: %s" %
op.get_attr("message"))
def _IndexedSlicesToTensorNoWarning(indexed_slices):
"""Converts an IndexedSlices to a Tensor without sparse->dense warnings."""
if not isinstance(indexed_slices, indexed_slices_lib.IndexedSlices):
# If it is not IndexedSlices, it's better be a tensor.
return indexed_slices
if indexed_slices.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(indexed_slices))
return math_ops.unsorted_segment_sum(indexed_slices.values,
indexed_slices.indices,
indexed_slices.dense_shape[0])
@ops.RegisterGradient("Gather")
def _GatherGrad(op, grad):
"""Gradient for Gather op."""
# params can be large, so colocate the shape calculation with it.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params)
# Build appropriately shaped IndexedSlices
indices = op.inputs[1]
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(
_IndexedSlicesToTensorNoWarning(grad), values_shape)
indices = array_ops.reshape(indices, size)
return [indexed_slices_lib.IndexedSlices(values, indices, params_shape), None]
def _GetBatchIndices(params_shape, indices, batch_dims):
"""Addds the batch offsets to the given indices and returns the results."""
batch_indices = indices
indices_dtype = indices.dtype.base_dtype
casted_params_shape = math_ops.cast(params_shape, indices_dtype)
accum_dim_value = array_ops.ones((), dtype=indices_dtype)
for dim in range(batch_dims, 0, -1):
dim_value = casted_params_shape[dim - 1]
accum_dim_value *= casted_params_shape[dim]
start = array_ops.zeros((), dtype=indices_dtype)
step = array_ops.ones((), dtype=indices_dtype)
dim_indices = math_ops.range(start, dim_value, step)
dim_indices *= accum_dim_value
dim_shape = array_ops.concat([
array_ops.tile([1], [dim - 1]), [dim_value],
array_ops.tile([1], [array_ops.rank(indices) - dim])
], axis=0)
batch_indices += array_ops.reshape(dim_indices, dim_shape)
return batch_indices
def _BatchGatherGrad(params_shape, values, indices, batch_dims,
gather_dim_size):
"""Returns the gradient of GatherV2 with batch dimensions."""
# Axis is the first non-batch dimension.
indices_size = array_ops.expand_dims(array_ops.size(indices), 0)
if batch_dims:
values_shape = array_ops.shape(values)
# Add the batch offsets to indices and flatten the batch dimensions.
outer_shape = values_shape[:batch_dims]
inner_shape = values_shape[batch_dims:][1:]
batch_size = gen_math_ops.prod(outer_shape, [0], False)
flat_values_shape = array_ops.concat([[-1], inner_shape], 0)
gather_dim_size *= batch_size
indices = _GetBatchIndices(params_shape, indices, batch_dims)
values = array_ops.reshape(
_IndexedSlicesToTensorNoWarning(values), flat_values_shape)
indices = array_ops.reshape(indices, indices_size)
params_grad = math_ops.unsorted_segment_sum(values, indices, gather_dim_size)
if batch_dims:
# Put back the batch dimensions.
params_grad = array_ops.reshape(
params_grad, array_ops.concat([outer_shape, flat_values_shape], 0))
return params_grad
@ops.RegisterGradient("GatherV2")
def _GatherV2Grad(op, grad):
"""Gradient for GatherV2 op."""
# params can be large, so colocate the shape calculation with it.
#
# params can be very large for sparse model, array_ops.shape raises
# exception on the Windows platform when any dimension is larger than
# int32. params_shape is not used in optimizer apply_sparse gradients,
# so it's fine to convert it back to int32 regardless of truncation.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params, out_type=ops.dtypes.int64)
params_shape = math_ops.cast(params_shape, dtypes.int32)
indices = op.inputs[1]
indices_size = array_ops.expand_dims(array_ops.size(indices), 0)
axis = op.inputs[2]
axis_static = tensor_util.constant_value(axis)
batch_dims = int(op.get_attr("batch_dims"))
if batch_dims < 0:
if indices.shape.ndims is None:
raise ValueError(
f"Currently, it is unsupported to take the gradient of tf.gather "
f"when batch_dims < 0 and the rank of the indices is unknown. Please "
f"pass a positive batch_dims or use tf.ensure_shape to update the "
f"shape of indices when calling tf.gather. Got "
f"batch_dims={batch_dims} and indices={indices}")
batch_dims += indices.shape.ndims
# For axis 0 gathers, build an appropriately shaped IndexedSlices.
if axis_static == 0:
if context.executing_eagerly():
with ops.device(indices_size.device):
params_tail_shape = array_ops.identity(params_shape)[1:]
else:
params_tail_shape = params_shape[1:]
values_shape = array_ops.concat([indices_size, params_tail_shape], 0)
values = array_ops.reshape(
_IndexedSlicesToTensorNoWarning(grad), values_shape)
indices = array_ops.reshape(indices, indices_size)
params_grad = indexed_slices_lib.IndexedSlices(values, indices,
params_shape)
else:
# Handle axis by transposing the axis dimension to be the first non-batch
# dimension, compute the gradient and transpose the result back.
outer_shape = params_shape[:axis]
inner_shape = params_shape[axis:][1:]
values_shape = array_ops.concat([outer_shape, [-1], inner_shape], 0)
values_dims = array_ops.size(values_shape)
axis_dims = array_ops.size(outer_shape)
outer_batches_indices = math_ops.range(batch_dims)
batch_axis_indices = math_ops.range(batch_dims, axis_dims)
inner_axes_indices = math_ops.range(axis_dims + 1, values_dims)
values = array_ops.reshape(
_IndexedSlicesToTensorNoWarning(grad), values_shape)
# Move values[axis] up to values[batch_dims]
transpose_dims = array_ops.concat([
outer_batches_indices, [axis_dims], batch_axis_indices,
inner_axes_indices
], 0)
values_transpose = array_ops.transpose(values, transpose_dims)
params_shape_transpose = array_ops.gather(params_shape, transpose_dims)
params_grad = _BatchGatherGrad(params_shape_transpose, values_transpose,
indices, batch_dims, params_shape[axis])
# Inverts the above transpose by moving dimension batch_dims back to its
# original position.
invert_transpose_dims = array_ops.concat([
outer_batches_indices, batch_axis_indices + 1, [batch_dims],
inner_axes_indices
], 0)
params_grad = array_ops.transpose(params_grad, invert_transpose_dims)
return [params_grad, None, None]
@ops.RegisterGradient("GatherNd")
def _GatherNdGrad(op, grad):
ref = op.inputs[0]
indices = op.inputs[1]
ref_shape = array_ops.shape(ref, out_type=indices.dtype)
if indices.shape.ndims == 2 and indices.shape.dims[-1].value == 1:
ref_grad = indexed_slices_lib.IndexedSlices(
grad, array_ops.squeeze(indices, axis=-1), ref_shape)
else:
ref_grad = array_ops.scatter_nd(indices, grad, ref_shape)
return [ref_grad, None]
@ops.RegisterGradient("ResourceGatherNd")
def _ResourceGatherNdGrad(op, grad): # pylint: disable=missing-docstring
ref = op.inputs[0]
indices = op.inputs[1]
ref_shape = gen_resource_variable_ops.variable_shape(ref, indices.dtype)
if indices.shape.ndims == 2 and indices.shape.dims[-1].value == 1:
ref_grad = indexed_slices_lib.IndexedSlices(
grad, array_ops.squeeze(indices, axis=-1), ref_shape)
else:
ref_grad = array_ops.scatter_nd(indices, grad, ref_shape)
return [ref_grad, None]
@ops.RegisterGradient("CheckNumerics")
def _CheckNumericsGrad(op, grad):
"""Gradient for check_numerics op."""
return array_ops.check_numerics(
grad,
"Not a number (NaN) or infinity (Inf) values detected in gradient. %s" %
op.get_attr("message"))
@ops.RegisterGradient("CheckNumericsV2")
def _CheckNumericsV2Grad(op, grad):
"""Gradient for check_numerics op."""
return array_ops.check_numerics_v2(
grad,
"Not a number (NaN) or infinity (Inf) values detected in gradient. %s" %
op.get_attr("message"))
@ops.RegisterGradient("PlaceholderWithDefault")
@ops.RegisterGradient("Identity")
def _IdGrad(_, grad):
return grad
@ops.RegisterGradient("_EagerConst")
def _EagerConstGrad(_, grad):
raise AssertionError(
"This op should never interact with gradient APIs. Please file a bug.")
@ops.RegisterGradient("RefIdentity")
def _RefIdGrad(_, grad):
return grad
@ops.RegisterGradient("IdentityN")
def _IdNGrad(_, *grad):
return grad
ops.NotDifferentiable("StopGradient")
@ops.RegisterGradient("Reshape")
def _ReshapeGrad(op, grad):
return [
array_ops.reshape(
_IndexedSlicesToTensorNoWarning(grad), array_ops.shape(op.inputs[0])),
None
]
ops.NotDifferentiable("InvertPermutation")
def _ReshapeToInput(op, grad):
"""Reshapes the gradient to the shape of the original input."""
return array_ops.reshape(
_IndexedSlicesToTensorNoWarning(grad), array_ops.shape(op.inputs[0]))
@ops.RegisterGradient("ExpandDims")
def _ExpandDimsGrad(op, grad):
return [_ReshapeToInput(op, grad), None]
@ops.RegisterGradient("Squeeze")
def _SqueezeGrad(op, grad):
return _ReshapeToInput(op, grad)
@ops.RegisterGradient("Transpose")
def _TransposeGrad(op, grad):
"""Returns unshuffle(grad)."""
p = op.inputs[1]
return [array_ops.transpose(grad, array_ops.invert_permutation(p)), None]
@ops.RegisterGradient("ConjugateTranspose")
def _ConjugateTransposeGrad(op, grad):
"""Returns conj(unshuffle(grad))."""
p = op.inputs[1]
return [
array_ops.transpose(
grad, array_ops.invert_permutation(p), conjugate=True), None
]
ops.NotDifferentiable("Shape")
ops.NotDifferentiable("ShapeN")
ops.NotDifferentiable("Rank")
ops.NotDifferentiable("Size")
@ops.RegisterGradient("Tile")
def _TileGrad(op, grad):
"""Sum reduces grad along the tiled dimensions."""
input_shape = array_ops.shape(op.inputs[0], out_type=op.inputs[1].dtype)
# We interleave multiples and input_shape to get split_shape,
# reshape grad to split_shape, and reduce along all even
# dimensions (the tiled dimensions) to get the result
# with shape input_shape. For example
# input_shape = [20, 30, 40]
# multiples = [2, 3, 4]
# split_shape = [2, 20, 3, 30, 4, 40]
# axes = [0, 2, 4]
split_shape = array_ops.reshape(
array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
axes = math_ops.range(0, array_ops.size(split_shape), 2)
# Sum reduces grad along the first dimension for IndexedSlices
if isinstance(grad, indexed_slices_lib.IndexedSlices):
input_shape_0 = math_ops.cast(input_shape[0], grad.indices.dtype)
grad = math_ops.unsorted_segment_sum(
grad.values, math_ops.mod(grad.indices, input_shape_0), input_shape_0)
split_shape = array_ops.concat([[1], split_shape[1:]], axis=0)
input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
# Fix shape inference
if not context.executing_eagerly():
input_grad.set_shape(op.inputs[0].get_shape())
return [input_grad, None]
ops.NotDifferentiable("BroadcastGradientArgs")
def _PadGrad(op, grad):
"""Gradient for Pad."""
# Pad introduces values around the original tensor, so the gradient function
# slices the original shape out of the gradient."""
x = op.inputs[0]
a = op.inputs[1] # [Rank(x), 2]
# Takes a slice of a. The 1st column. [Rank(x), 1].
pad_before = array_ops.slice(a, [0, 0],
array_ops.stack([array_ops.rank(x), 1]))
# Make it a 1-D tensor.
begin = array_ops.reshape(pad_before, [-1])
sizes = array_ops.shape(x, out_type=begin.dtype)
x_grad = array_ops.slice(grad, begin, sizes)
if len(op.inputs) == 3:
return x_grad, None, None
else:
return x_grad, None
ops.RegisterGradient("Pad")(_PadGrad)
ops.RegisterGradient("PadV2")(_PadGrad)
# ReverseSequence is just a permutation. The gradient permutes back.
@ops.RegisterGradient("ReverseSequence")
def _ReverseSequenceGrad(op, grad):
seq_lengths = op.inputs[1]
return [
array_ops.reverse_sequence(
grad,
batch_axis=op.get_attr("batch_dim"),
seq_axis=op.get_attr("seq_dim"),
seq_lengths=seq_lengths), None
]
@ops.RegisterGradient("Reverse")
def _ReverseGrad(op, grad):
reverse_dims = op.inputs[1]
return gen_array_ops.reverse(grad, reverse_dims), None
@ops.RegisterGradient("ReverseV2")
def _ReverseV2Grad(op, grad):
axis = op.inputs[1]
return array_ops.reverse_v2(grad, axis), None
@ops.RegisterGradient("SpaceToBatch")
def _SpaceToBatchGrad(op, grad):
# Its gradient is the opposite op: BatchToSpace.
block_size = op.get_attr("block_size")
return [
array_ops.batch_to_space(grad, op.inputs[1], block_size=block_size), None
]
@ops.RegisterGradient("SpaceToBatchND")
def _SpaceToBatchNDGrad(op, grad):
# Its gradient is the opposite op: BatchToSpaceND.
return [
array_ops.batch_to_space_nd(grad, op.inputs[1], op.inputs[2]), None, None
]
@ops.RegisterGradient("BatchToSpace")
def _BatchToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatch.
block_size = op.get_attr("block_size")
return [
array_ops.space_to_batch(grad, op.inputs[1], block_size=block_size), None
]
@ops.RegisterGradient("BatchToSpaceND")
def _BatchToSpaceNDGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatchND.
return [
array_ops.space_to_batch_nd(grad, op.inputs[1], op.inputs[2]), None, None
]
@ops.RegisterGradient("SpaceToDepth")
def _SpaceToDepthGrad(op, grad):
# Its gradient is the opposite op: DepthToSpace.
block_size = op.get_attr("block_size")
data_format = op.get_attr("data_format")
if data_format == "NCHW_VECT_C":
raise ValueError("Cannot compute SpaceToDepth gradient with NCHW_VECT_C. "
"NCHW_VECT_C requires qint8 data type.")
return array_ops.depth_to_space(grad, block_size, data_format=data_format)
@ops.RegisterGradient("DepthToSpace")
def _DepthToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToDepth.
block_size = op.get_attr("block_size")
data_format = op.get_attr("data_format")
if data_format == "NCHW_VECT_C":
raise ValueError("Cannot compute DepthToSpace gradient with NCHW_VECT_C. "
"NCHW_VECT_C requires qint8 data type.")
return array_ops.space_to_depth(grad, block_size, data_format=data_format)
ops.NotDifferentiable("OneHot")
@ops.RegisterGradient("MirrorPad")
def _MirrorPadGrad(op, grad):
mode = op.get_attr("mode")
return [gen_array_ops.mirror_pad_grad(grad, op.inputs[1], mode=mode), None]
@ops.RegisterGradient("MirrorPadGrad")
def _MirrorPadGradGrad(op, grad):
mode = op.get_attr("mode")
return [gen_array_ops.mirror_pad(grad, op.inputs[1], mode=mode), None]
@ops.RegisterGradient("QuantizeAndDequantize")
def _QuantizeAndDequantizeGrad(_, grad):
return grad
@ops.RegisterGradient("QuantizeAndDequantizeV2")
def _QuantizeAndDequantizeV2Grad(_, grad):
return [grad, None, None]
@ops.RegisterGradient("QuantizeAndDequantizeV3")
def _QuantizeAndDequantizeV3Grad(_, grad):
# Only propagate the gradient for the unquantized input.
return [grad, None, None, None]
@ops.RegisterGradient("ExtractImagePatches")
def _ExtractImagePatchesGrad(op, grad):
input_bhwc = array_ops.shape(op.inputs[0], out_type=dtypes.int64)
batch_size, rows_in, cols_in, channels = input_bhwc[0], input_bhwc[1], \
input_bhwc[2], input_bhwc[3]
# Create indices matrix for input tensor.
# Note that 0 is preserved for padding location,
# so indices for input start from 1 to 1 + rows_in * cols_in.
input_indices_num = 1 + rows_in * cols_in
input_idx = array_ops.reshape(
math_ops.range(1, input_indices_num, dtype=ops.dtypes.int64),
(1, rows_in, cols_in, 1))
input_idx_patched = gen_array_ops.extract_image_patches(
input_idx, op.get_attr("ksizes"), op.get_attr("strides"),
op.get_attr("rates"), op.get_attr("padding"))
# Create indices matrix for output tensor.
output_bhwc = array_ops.shape(op.outputs[0], out_type=dtypes.int64)
rows_out, cols_out = output_bhwc[1], output_bhwc[2]
_, ksize_r, ksize_c, _ = op.get_attr("ksizes")
# Indices for output start from 0.
output_indices_num = rows_out * cols_out * ksize_r * ksize_c
output_idx = array_ops.reshape(
math_ops.range(output_indices_num, dtype=ops.dtypes.int64),
(1, rows_out, cols_out, ksize_r * ksize_c))
# Construct mapping table for indices: (input -> output).
idx_matrix = array_ops.concat([
array_ops.expand_dims(input_idx_patched, axis=-1),
array_ops.expand_dims(output_idx, axis=-1)
],
axis=-1)
idx_map = array_ops.reshape(idx_matrix, (-1, 2))
sp_shape = (input_indices_num, output_indices_num)
sp_mat_full = sparse_tensor.SparseTensor(
idx_map, array_ops.ones([output_indices_num], dtype=grad.dtype), sp_shape)
# Remove all padding locations [0, :].
sp_mat = sparse_ops.sparse_slice(sp_mat_full, (1, 0),
(input_indices_num - 1, output_indices_num))
grad_expanded = array_ops.transpose(
array_ops.reshape(
_IndexedSlicesToTensorNoWarning(grad),
(batch_size, rows_out, cols_out, ksize_r, ksize_c, channels)),
(1, 2, 3, 4, 0, 5))
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(jac, (rows_in, cols_in, batch_size, channels))
grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3))
return [grad_out]
@ops.RegisterGradient("ExtractVolumePatches")
def _ExtractVolumePatchesGrad(op, grad):
batch_size, planes_in, rows_in, cols_in, channels = [
dim.value for dim in op.inputs[0].shape.dims
]
input_bphwc = array_ops.shape(op.inputs[0])
batch_size = input_bphwc[0]
channels = input_bphwc[4]
# Create indices matrix for input tensor.
# Note that 0 is preserved for padding location,
# so indices for input start from 1 to 1 + rows_in * cols_in.
input_indices_num = 1 + planes_in * rows_in * cols_in
input_idx = array_ops.reshape(
math_ops.range(1, input_indices_num, dtype=ops.dtypes.int64),
(1, planes_in, rows_in, cols_in, 1))
input_idx_patched = gen_array_ops.extract_volume_patches(
input_idx, op.get_attr("ksizes"), op.get_attr("strides"),
op.get_attr("padding"))
# Create indices matrix for output tensor.
_, planes_out, rows_out, cols_out, _ = [
dim.value for dim in op.outputs[0].shape.dims
]
_, ksize_p, ksize_r, ksize_c, _ = op.get_attr("ksizes")
# Indices for output start from 0.
prc_indices_num = planes_out * rows_out * cols_out
output_indices_num = prc_indices_num * ksize_p * ksize_r * ksize_c
output_idx = array_ops.reshape(
math_ops.range(output_indices_num, dtype=ops.dtypes.int64),
(1, planes_out, rows_out, cols_out, ksize_p * ksize_r * ksize_c))
# Construct mapping table for indices: (input -> output).
idx_matrix = array_ops.concat([
array_ops.expand_dims(input_idx_patched, axis=-1),
array_ops.expand_dims(output_idx, axis=-1)
],
axis=-1)
idx_map = array_ops.reshape(idx_matrix, (-1, 2))
sp_shape = (input_indices_num, output_indices_num)
sp_mat_full = sparse_tensor.SparseTensor(
idx_map, array_ops.ones([output_indices_num], dtype=grad.dtype), sp_shape)
# Remove all padding locations [0, :].
sp_mat = sparse_ops.sparse_slice(sp_mat_full, (1, 0),
(input_indices_num - 1, output_indices_num))
grad_expanded = array_ops.transpose(
array_ops.reshape(
_IndexedSlicesToTensorNoWarning(grad),
(batch_size, planes_out, rows_out, cols_out, ksize_p, ksize_r,
ksize_c, channels)), (1, 2, 3, 4, 5, 6, 0, 7))
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(
jac, (planes_in, rows_in, cols_in, batch_size, channels))
grad_out = array_ops.transpose(grad_out, (3, 0, 1, 2, 4))
return [grad_out]
@ops.RegisterGradient("ScatterNd")
def _ScatterNdGrad(op, grad):
indices = op.inputs[0]
updates_grad = array_ops.gather_nd(grad, indices)
return [None, updates_grad, None]
@ops.RegisterGradient("TensorScatterUpdate")
def _TensorScatterUpdateGrad(op, grad):
indices = op.inputs[1]
updates_grad = array_ops.gather_nd(grad, indices)
tensor_grad = array_ops.tensor_scatter_update(
array_ops.identity(grad), indices,
array_ops.zeros_like(op.inputs[2], dtype=grad.dtype))
return [tensor_grad, None, updates_grad]
@ops.RegisterGradient("TensorScatterAdd")
def _TensorScatterAddGrad(op, grad):
indices = op.inputs[1]
updates_grad = array_ops.gather_nd(grad, indices)
tensor_grad = array_ops.identity(grad)
return [tensor_grad, None, updates_grad]
def _TensorScatterMinOrMaxGrad(op, grad):
"""Gradient for TensorScatterMin and TensorScatterMax."""
indices = op.inputs[1]
x = op.inputs[0]
y = op.inputs[2]
output = op.outputs[0]
x_indicators = math_ops.cast(math_ops.equal(x, output), grad.dtype)
y_output = array_ops.gather_nd(output, indices)
y_indicators = math_ops.cast(math_ops.equal(y, y_output), grad.dtype)
ys_indicators = array_ops.scatter_nd(indices, y_indicators,
array_ops.shape(x))
indicators = x_indicators + ys_indicators # All elements are >= 1.
# If there are multiple minimum or maximum elements then the gradient will be
# divided between them.
x_grad = grad * x_indicators / indicators
y_grad = array_ops.gather_nd(grad / indicators, indices) * y_indicators
return [x_grad, None, y_grad]
@ops.RegisterGradient("TensorScatterMax")
def _TensorScatterMaxGrad(op, grad):
"""Gradient for TensorScatterMax op."""
return _TensorScatterMinOrMaxGrad(op, grad)
@ops.RegisterGradient("TensorScatterMin")
def _TensorScatterMinGrad(op, grad):
"""Gradient for TensorScatterMin op."""
return _TensorScatterMinOrMaxGrad(op, grad)
@ops.RegisterGradient("TensorScatterSub")
def _TensorScatterSubGrad(op, grad):
indices = op.inputs[1]
updates_grad = array_ops.gather_nd(grad, indices)
tensor_grad = array_ops.identity(grad)
return [tensor_grad, None, -updates_grad]
@ops.RegisterGradient("ScatterNdNonAliasingAdd")
def _ScatterNdNonAliasingAddGrad(op, grad):
indices = op.inputs[1]
updates_grad = array_ops.gather_nd(grad, indices)
return [grad, None, updates_grad]
@ops.RegisterGradient("BroadcastTo")
def _BroadcastToGrad(op, grad):
input_value = op.inputs[0]
broadcast_shape = op.inputs[1]
shape_dtype = dtypes.int32
if isinstance(broadcast_shape, ops.Tensor):
shape_dtype = broadcast_shape.dtype
input_value_shape = array_ops.shape(input_value, out_type=shape_dtype)
if not isinstance(broadcast_shape, ops.EagerTensor):
broadcast_shape_static = tensor_shape.TensorShape(
pywrap_tf_session.TF_TryEvaluateConstant_wrapper(
broadcast_shape.graph._c_graph, broadcast_shape._as_tf_output())) # pylint: disable=protected-access
if broadcast_shape_static.is_fully_defined():
broadcast_shape = constant_op.constant(
broadcast_shape_static.as_list(), dtype=shape_dtype)
_, reduction_axes = gen_array_ops.broadcast_gradient_args(
broadcast_shape, input_value_shape)
updates_grad_reshaped = math_ops.reduce_sum(
grad, axis=reduction_axes, keepdims=True)
updates_grad = array_ops.reshape(updates_grad_reshaped, input_value_shape)
return [updates_grad, None]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_subscription_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Web/kubeEnvironments')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class KubeEnvironmentsOperations(object):
"""KubeEnvironmentsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> Iterable["_models.KubeEnvironmentCollection"]:
"""Get all Kubernetes Environments for a subscription.
Description for Get all Kubernetes Environments for a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KubeEnvironmentCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.KubeEnvironmentCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KubeEnvironmentCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("KubeEnvironmentCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/kubeEnvironments'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.KubeEnvironmentCollection"]:
"""Get all the Kubernetes Environments in a resource group.
Description for Get all the Kubernetes Environments in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KubeEnvironmentCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.KubeEnvironmentCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KubeEnvironmentCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("KubeEnvironmentCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.KubeEnvironment":
"""Get the properties of a Kubernetes Environment.
Description for Get the properties of a Kubernetes Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the Kubernetes Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KubeEnvironment, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.KubeEnvironment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KubeEnvironment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('KubeEnvironment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments/{name}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
name: str,
kube_environment_envelope: "_models.KubeEnvironment",
**kwargs: Any
) -> "_models.KubeEnvironment":
cls = kwargs.pop('cls', None) # type: ClsType["_models.KubeEnvironment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(kube_environment_envelope, 'KubeEnvironment')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('KubeEnvironment', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('KubeEnvironment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments/{name}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
name: str,
kube_environment_envelope: "_models.KubeEnvironment",
**kwargs: Any
) -> LROPoller["_models.KubeEnvironment"]:
"""Creates or updates a Kubernetes Environment.
Description for Creates or updates a Kubernetes Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the Kubernetes Environment.
:type name: str
:param kube_environment_envelope: Configuration details of the Kubernetes Environment.
:type kube_environment_envelope: ~azure.mgmt.web.v2021_01_01.models.KubeEnvironment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either KubeEnvironment or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.web.v2021_01_01.models.KubeEnvironment]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.KubeEnvironment"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
name=name,
kube_environment_envelope=kube_environment_envelope,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('KubeEnvironment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments/{name}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments/{name}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Delete a Kubernetes Environment.
Description for Delete a Kubernetes Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the Kubernetes Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments/{name}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
name: str,
kube_environment_envelope: "_models.KubeEnvironmentPatchResource",
**kwargs: Any
) -> "_models.KubeEnvironment":
"""Creates or updates a Kubernetes Environment.
Description for Creates or updates a Kubernetes Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the Kubernetes Environment.
:type name: str
:param kube_environment_envelope: Configuration details of the Kubernetes Environment.
:type kube_environment_envelope:
~azure.mgmt.web.v2021_01_01.models.KubeEnvironmentPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KubeEnvironment, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.KubeEnvironment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KubeEnvironment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(kube_environment_envelope, 'KubeEnvironmentPatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('KubeEnvironment', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('KubeEnvironment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/kubeEnvironments/{name}'} # type: ignore
| |
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Compute v2 Server action implementations"""
import argparse
import getpass
import io
import logging
import os
import six
import sys
from cliff import command
from cliff import lister
from cliff import show
try:
from novaclient.v2 import servers
except ImportError:
from novaclient.v1_1 import servers
from openstackclient.common import exceptions
from openstackclient.common import parseractions
from openstackclient.common import utils
from openstackclient.i18n import _ # noqa
from openstackclient.identity import common as identity_common
from openstackclient.network import common
def _format_servers_list_networks(networks):
"""Return a formatted string of a server's networks
:param server: a Server.networks field
:rtype: a string of formatted network addresses
"""
output = []
for (network, addresses) in networks.items():
if not addresses:
continue
addresses_csv = ', '.join(addresses)
group = "%s=%s" % (network, addresses_csv)
output.append(group)
return '; '.join(output)
def _get_ip_address(addresses, address_type, ip_address_family):
# Old style addresses
if address_type in addresses:
for addy in addresses[address_type]:
if int(addy['version']) in ip_address_family:
return addy['addr']
# New style addresses
new_address_type = address_type
if address_type == 'public':
new_address_type = 'floating'
if address_type == 'private':
new_address_type = 'fixed'
for network in addresses:
for addy in addresses[network]:
# Case where it is list of strings
if isinstance(addy, six.string_types):
if new_address_type == 'fixed':
return addresses[network][0]
else:
return addresses[network][-1]
# Case where it is a dict
if 'OS-EXT-IPS:type' not in addy:
continue
if addy['OS-EXT-IPS:type'] == new_address_type:
if int(addy['version']) in ip_address_family:
return addy['addr']
raise exceptions.CommandError(
"ERROR: No %s IP version %s address found" %
(address_type, ip_address_family)
)
def _prep_server_detail(compute_client, server):
"""Prepare the detailed server dict for printing
:param compute_client: a compute client instance
:param server: a Server resource
:rtype: a dict of server details
"""
info = server._info.copy()
# Call .get() to retrieve all of the server information
# as findall(name=blah) and REST /details are not the same
# and do not return flavor and image information.
server = compute_client.servers.get(info['id'])
info.update(server._info)
# Convert the image blob to a name
image_info = info.get('image', {})
if image_info:
image_id = image_info.get('id', '')
try:
image = utils.find_resource(compute_client.images, image_id)
info['image'] = "%s (%s)" % (image.name, image_id)
except Exception:
info['image'] = image_id
# Convert the flavor blob to a name
flavor_info = info.get('flavor', {})
flavor_id = flavor_info.get('id', '')
try:
flavor = utils.find_resource(compute_client.flavors, flavor_id)
info['flavor'] = "%s (%s)" % (flavor.name, flavor_id)
except Exception:
info['flavor'] = flavor_id
# NOTE(dtroyer): novaclient splits these into separate entries...
# Format addresses in a useful way
info['addresses'] = _format_servers_list_networks(server.networks)
# Map 'metadata' field to 'properties'
info.update(
{'properties': utils.format_dict(info.pop('metadata'))}
)
# Migrate tenant_id to project_id naming
if 'tenant_id' in info:
info['project_id'] = info.pop('tenant_id')
# Remove values that are long and not too useful
info.pop('links', None)
return info
def _show_progress(progress):
if progress:
sys.stdout.write('\rProgress: %s' % progress)
sys.stdout.flush()
class AddServerSecurityGroup(command.Command):
"""Add security group to server"""
log = logging.getLogger(__name__ + '.AddServerSecurityGroup')
def get_parser(self, prog_name):
parser = super(AddServerSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
parser.add_argument(
'group',
metavar='<group>',
help=_('Security group to add (name or ID)'),
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
security_group = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
server.add_security_group(security_group.name)
return
class AddServerVolume(command.Command):
"""Add volume to server"""
log = logging.getLogger(__name__ + '.AddServerVolume')
def get_parser(self, prog_name):
parser = super(AddServerVolume, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to add (name or ID)'),
)
parser.add_argument(
'--device',
metavar='<device>',
help=_('Server internal device name for volume'),
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
volume_client = self.app.client_manager.volume
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
volume = utils.find_resource(
volume_client.volumes,
parsed_args.volume,
)
compute_client.volumes.create_server_volume(
server.id,
volume.id,
parsed_args.device,
)
class CreateServer(show.ShowOne):
"""Create a new server"""
log = logging.getLogger(__name__ + '.CreateServer')
def _is_neutron_enabled(self):
service_catalog = self.app.client_manager.auth_ref.service_catalog
return 'network' in service_catalog.get_endpoints()
def get_parser(self, prog_name):
parser = super(CreateServer, self).get_parser(prog_name)
parser.add_argument(
'server_name',
metavar='<server-name>',
help=_('New server name'),
)
disk_group = parser.add_mutually_exclusive_group(
required=True,
)
disk_group.add_argument(
'--image',
metavar='<image>',
help=_('Create server from this image'),
)
disk_group.add_argument(
'--volume',
metavar='<volume>',
help=_('Create server from this volume'),
)
parser.add_argument(
'--flavor',
metavar='<flavor>',
required=True,
help=_('Create server with this flavor'),
)
parser.add_argument(
'--security-group',
metavar='<security-group-name>',
action='append',
default=[],
help=_('Security group to assign to this server '
'(repeat for multiple groups)'),
)
parser.add_argument(
'--key-name',
metavar='<key-name>',
help=_('Keypair to inject into this server (optional extension)'),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Set a property on this server '
'(repeat for multiple values)'),
)
parser.add_argument(
'--file',
metavar='<dest-filename=source-filename>',
action='append',
default=[],
help=_('File to inject into image before boot '
'(repeat for multiple files)'),
)
parser.add_argument(
'--user-data',
metavar='<user-data>',
help=_('User data file to serve from the metadata server'),
)
parser.add_argument(
'--availability-zone',
metavar='<zone-name>',
help=_('Select an availability zone for the server'),
)
parser.add_argument(
'--block-device-mapping',
metavar='<dev-name=mapping>',
action='append',
default=[],
help=_('Map block devices; map is '
'<id>:<type>:<size(GB)>:<delete_on_terminate> '
'(optional extension)'),
)
parser.add_argument(
'--nic',
metavar="<net-id=net-uuid,v4-fixed-ip=ip-addr,v6-fixed-ip=ip-addr,"
"port-id=port-uuid>",
action='append',
default=[],
help=_("Create a NIC on the server. "
"Specify option multiple times to create multiple NICs. "
"Either net-id or port-id must be provided, but not both. "
"net-id: attach NIC to network with this UUID, "
"port-id: attach NIC to port with this UUID, "
"v4-fixed-ip: IPv4 fixed address for NIC (optional), "
"v6-fixed-ip: IPv6 fixed address for NIC (optional)."),
)
parser.add_argument(
'--hint',
metavar='<key=value>',
action='append',
default=[],
help=_('Hints for the scheduler (optional extension)'),
)
parser.add_argument(
'--config-drive',
metavar='<config-drive-volume>|True',
default=False,
help=_('Use specified volume as the config drive, '
'or \'True\' to use an ephemeral drive'),
)
parser.add_argument(
'--min',
metavar='<count>',
type=int,
default=1,
help=_('Minimum number of servers to launch (default=1)'),
)
parser.add_argument(
'--max',
metavar='<count>',
type=int,
default=1,
help=_('Maximum number of servers to launch (default=1)'),
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for build to complete'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
volume_client = self.app.client_manager.volume
# Lookup parsed_args.image
image = None
if parsed_args.image:
image = utils.find_resource(
compute_client.images,
parsed_args.image,
)
# Lookup parsed_args.volume
volume = None
if parsed_args.volume:
volume = utils.find_resource(
volume_client.volumes,
parsed_args.volume,
).id
# Lookup parsed_args.flavor
flavor = utils.find_resource(compute_client.flavors,
parsed_args.flavor)
boot_args = [parsed_args.server_name, image, flavor]
files = {}
for f in parsed_args.file:
dst, src = f.split('=', 1)
try:
files[dst] = io.open(src, 'rb')
except IOError as e:
raise exceptions.CommandError("Can't open '%s': %s" % (src, e))
if parsed_args.min > parsed_args.max:
msg = _("min instances should be <= max instances")
raise exceptions.CommandError(msg)
if parsed_args.min < 1:
msg = _("min instances should be > 0")
raise exceptions.CommandError(msg)
if parsed_args.max < 1:
msg = _("max instances should be > 0")
raise exceptions.CommandError(msg)
userdata = None
if parsed_args.user_data:
try:
userdata = io.open(parsed_args.user_data)
except IOError as e:
msg = "Can't open '%s': %s"
raise exceptions.CommandError(msg % (parsed_args.user_data, e))
block_device_mapping = {}
if volume:
# When booting from volume, for now assume no other mappings
# This device value is likely KVM-specific
block_device_mapping = {'vda': volume}
else:
for dev_map in parsed_args.block_device_mapping:
dev_key, dev_vol = dev_map.split('=', 1)
block_volume = None
if dev_vol:
block_volume = utils.find_resource(
volume_client.volumes,
dev_vol,
).id
block_device_mapping.update({dev_key: block_volume})
nics = []
if parsed_args.nic:
neutron_enabled = self._is_neutron_enabled()
for nic_str in parsed_args.nic:
nic_info = {"net-id": "", "v4-fixed-ip": "",
"v6-fixed-ip": "", "port-id": ""}
nic_info.update(dict(kv_str.split("=", 1)
for kv_str in nic_str.split(",")))
if bool(nic_info["net-id"]) == bool(nic_info["port-id"]):
msg = _("either net-id or port-id should be specified "
"but not both")
raise exceptions.CommandError(msg)
if neutron_enabled:
network_client = self.app.client_manager.network
if nic_info["net-id"]:
nic_info["net-id"] = common.find(network_client,
'network',
'networks',
nic_info["net-id"])
if nic_info["port-id"]:
nic_info["port-id"] = common.find(network_client,
'port',
'ports',
nic_info["port-id"])
else:
if nic_info["net-id"]:
nic_info["net-id"] = utils.find_resource(
compute_client.networks,
nic_info["net-id"]
).id
if nic_info["port-id"]:
msg = _("can't create server with port specified "
"since neutron not enabled")
raise exceptions.CommandError(msg)
nics.append(nic_info)
hints = {}
for hint in parsed_args.hint:
key, _sep, value = hint.partition('=')
# NOTE(vish): multiple copies of the same hint will
# result in a list of values
if key in hints:
if isinstance(hints[key], six.string_types):
hints[key] = [hints[key]]
hints[key] += [value]
else:
hints[key] = value
# What does a non-boolean value for config-drive do?
# --config-drive argument is either a volume id or
# 'True' (or '1') to use an ephemeral volume
if str(parsed_args.config_drive).lower() in ("true", "1"):
config_drive = True
elif str(parsed_args.config_drive).lower() in ("false", "0",
"", "none"):
config_drive = None
else:
config_drive = parsed_args.config_drive
boot_kwargs = dict(
meta=parsed_args.property,
files=files,
reservation_id=None,
min_count=parsed_args.min,
max_count=parsed_args.max,
security_groups=parsed_args.security_group,
userdata=userdata,
key_name=parsed_args.key_name,
availability_zone=parsed_args.availability_zone,
block_device_mapping=block_device_mapping,
nics=nics,
scheduler_hints=hints,
config_drive=config_drive)
self.log.debug('boot_args: %s', boot_args)
self.log.debug('boot_kwargs: %s', boot_kwargs)
# Wrap the call to catch exceptions in order to close files
try:
server = compute_client.servers.create(*boot_args, **boot_kwargs)
finally:
# Clean up open files - make sure they are not strings
for f in files:
if hasattr(f, 'close'):
f.close()
if hasattr(userdata, 'close'):
userdata.close()
if parsed_args.wait:
if utils.wait_for_status(
compute_client.servers.get,
server.id,
callback=_show_progress,
):
sys.stdout.write('\n')
else:
self.log.error(_('Error creating server: %s'),
parsed_args.server_name)
sys.stdout.write(_('\nError creating server'))
raise SystemExit
details = _prep_server_detail(compute_client, server)
return zip(*sorted(six.iteritems(details)))
class CreateServerImage(show.ShowOne):
"""Create a new disk image from a running server"""
log = logging.getLogger(__name__ + '.CreateServerImage')
def get_parser(self, prog_name):
parser = super(CreateServerImage, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<image-name>',
help=_('Name of new image (default is server name)'),
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for image create to complete'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
image_client = self.app.client_manager.image
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
if parsed_args.name:
name = parsed_args.name
else:
name = server.name
image_id = compute_client.servers.create_image(
server,
name,
)
if parsed_args.wait:
if utils.wait_for_status(
image_client.images.get,
image_id,
callback=_show_progress,
):
sys.stdout.write('\n')
else:
self.log.error(_('Error creating server snapshot: %s'),
parsed_args.image_name)
sys.stdout.write(_('\nError creating server snapshot'))
raise SystemExit
image = utils.find_resource(
image_client.images,
image_id,
)
return zip(*sorted(six.iteritems(image._info)))
class DeleteServer(command.Command):
"""Delete server(s)"""
log = logging.getLogger(__name__ + '.DeleteServer')
def get_parser(self, prog_name):
parser = super(DeleteServer, self).get_parser(prog_name)
parser.add_argument(
'servers',
metavar='<server>',
nargs="+",
help=_('Server(s) to delete (name or ID)'),
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for delete to complete'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
for server in parsed_args.servers:
server_obj = utils.find_resource(
compute_client.servers, server)
compute_client.servers.delete(server_obj.id)
if parsed_args.wait:
if utils.wait_for_delete(
compute_client.servers,
server_obj.id,
callback=_show_progress,
):
sys.stdout.write('\n')
else:
self.log.error(_('Error deleting server: %s'),
server_obj.id)
sys.stdout.write(_('\nError deleting server'))
raise SystemExit
return
class ListServer(lister.Lister):
"""List servers"""
log = logging.getLogger(__name__ + '.ListServer')
def get_parser(self, prog_name):
parser = super(ListServer, self).get_parser(prog_name)
parser.add_argument(
'--reservation-id',
metavar='<reservation-id>',
help=_('Only return instances that match the reservation'),
)
parser.add_argument(
'--ip',
metavar='<ip-address-regex>',
help=_('Regular expression to match IP addresses'),
)
parser.add_argument(
'--ip6',
metavar='<ip-address-regex>',
help=_('Regular expression to match IPv6 addresses'),
)
parser.add_argument(
'--name',
metavar='<name-regex>',
help=_('Regular expression to match names'),
)
parser.add_argument(
'--instance-name',
metavar='<server-name>',
help=_('Regular expression to match instance name (admin only)'),
)
parser.add_argument(
'--status',
metavar='<status>',
# FIXME(dhellmann): Add choices?
help=_('Search by server status'),
)
parser.add_argument(
'--flavor',
metavar='<flavor>',
help=_('Search by flavor'),
)
parser.add_argument(
'--image',
metavar='<image>',
help=_('Search by image'),
)
parser.add_argument(
'--host',
metavar='<hostname>',
help=_('Search by hostname'),
)
parser.add_argument(
'--all-projects',
action='store_true',
default=bool(int(os.environ.get("ALL_PROJECTS", 0))),
help=_('Include all projects (admin only)'),
)
parser.add_argument(
'--project',
metavar='<project>',
help="Search by project (admin only) (name or ID)")
identity_common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Search by user (admin only) (name or ID)'),
)
identity_common.add_user_domain_option_to_parser(parser)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
identity_client = self.app.client_manager.identity
project_id = None
if parsed_args.project:
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
parsed_args.all_projects = True
user_id = None
if parsed_args.user:
user_id = identity_common.find_project(
identity_client,
parsed_args.user,
parsed_args.user_domain,
).id
search_opts = {
'reservation_id': parsed_args.reservation_id,
'ip': parsed_args.ip,
'ip6': parsed_args.ip6,
'name': parsed_args.name,
'instance_name': parsed_args.instance_name,
'status': parsed_args.status,
'flavor': parsed_args.flavor,
'image': parsed_args.image,
'host': parsed_args.host,
'tenant_id': project_id,
'all_tenants': parsed_args.all_projects,
'user_id': user_id,
}
self.log.debug('search options: %s', search_opts)
if parsed_args.long:
columns = (
'ID',
'Name',
'Status',
'Networks',
'OS-EXT-AZ:availability_zone',
'OS-EXT-SRV-ATTR:host',
'Metadata',
)
column_headers = (
'ID',
'Name',
'Status',
'Networks',
'Availability Zone',
'Host',
'Properties',
)
mixed_case_fields = [
'OS-EXT-AZ:availability_zone',
'OS-EXT-SRV-ATTR:host',
]
else:
columns = ('ID', 'Name', 'Status', 'Networks')
column_headers = columns
mixed_case_fields = []
data = compute_client.servers.list(search_opts=search_opts)
return (column_headers,
(utils.get_item_properties(
s, columns,
mixed_case_fields=mixed_case_fields,
formatters={
'Networks': _format_servers_list_networks,
'Metadata': utils.format_dict,
},
) for s in data))
class LockServer(command.Command):
"""Lock a server. A non-admin user will not be able to execute actions"""
log = logging.getLogger(__name__ + '.LockServer')
def get_parser(self, prog_name):
parser = super(LockServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
utils.find_resource(
compute_client.servers,
parsed_args.server,
).lock()
# FIXME(dtroyer): Here is what I want, how with argparse/cliff?
# server migrate [--wait] \
# [--live <hostname>
# [--shared-migration | --block-migration]
# [--disk-overcommit | --no-disk-overcommit]]
# <server>
#
# live_parser = parser.add_argument_group(title='Live migration options')
# then adding the groups doesn't seem to work
class MigrateServer(command.Command):
"""Migrate server to different host"""
log = logging.getLogger(__name__ + '.MigrateServer')
def get_parser(self, prog_name):
parser = super(MigrateServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
parser.add_argument(
'--live',
metavar='<hostname>',
help=_('Target hostname'),
)
migration_group = parser.add_mutually_exclusive_group()
migration_group.add_argument(
'--shared-migration',
dest='shared_migration',
action='store_true',
default=True,
help=_('Perform a shared live migration (default)'),
)
migration_group.add_argument(
'--block-migration',
dest='shared_migration',
action='store_false',
help=_('Perform a block live migration'),
)
disk_group = parser.add_mutually_exclusive_group()
disk_group.add_argument(
'--disk-overcommit',
action='store_true',
default=False,
help=_('Allow disk over-commit on the destination host'),
)
disk_group.add_argument(
'--no-disk-overcommit',
dest='disk_overcommit',
action='store_false',
default=False,
help=_('Do not over-commit disk on the'
' destination host (default)'),
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for resize to complete'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
if parsed_args.live:
server.live_migrate(
parsed_args.live,
parsed_args.shared_migration,
parsed_args.disk_overcommit,
)
else:
server.migrate()
if parsed_args.wait:
if utils.wait_for_status(
compute_client.servers.get,
server.id,
callback=_show_progress,
):
sys.stdout.write(_('Complete\n'))
else:
sys.stdout.write(_('\nError migrating server'))
raise SystemExit
class PauseServer(command.Command):
"""Pause server"""
log = logging.getLogger(__name__ + '.PauseServer')
def get_parser(self, prog_name):
parser = super(PauseServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
utils.find_resource(
compute_client.servers,
parsed_args.server,
).pause()
class RebootServer(command.Command):
"""Perform a hard or soft server reboot"""
log = logging.getLogger(__name__ + '.RebootServer')
def get_parser(self, prog_name):
parser = super(RebootServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--hard',
dest='reboot_type',
action='store_const',
const=servers.REBOOT_HARD,
default=servers.REBOOT_SOFT,
help=_('Perform a hard reboot'),
)
group.add_argument(
'--soft',
dest='reboot_type',
action='store_const',
const=servers.REBOOT_SOFT,
default=servers.REBOOT_SOFT,
help=_('Perform a soft reboot'),
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for reboot to complete'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers, parsed_args.server)
server.reboot(parsed_args.reboot_type)
if parsed_args.wait:
if utils.wait_for_status(
compute_client.servers.get,
server.id,
callback=_show_progress,
):
sys.stdout.write(_('\nReboot complete\n'))
else:
sys.stdout.write(_('\nError rebooting server\n'))
raise SystemExit
class RebuildServer(show.ShowOne):
"""Rebuild server"""
log = logging.getLogger(__name__ + '.RebuildServer')
def get_parser(self, prog_name):
parser = super(RebuildServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
parser.add_argument(
'--image',
metavar='<image>',
required=True,
help=_('Recreate server from this image'),
)
parser.add_argument(
'--password',
metavar='<password>',
help="Set the password on the rebuilt instance",
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for rebuild to complete'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
# Lookup parsed_args.image
image = utils.find_resource(compute_client.images, parsed_args.image)
server = utils.find_resource(
compute_client.servers, parsed_args.server)
server = server.rebuild(image, parsed_args.password)
if parsed_args.wait:
if utils.wait_for_status(
compute_client.servers.get,
server.id,
callback=_show_progress,
):
sys.stdout.write(_('\nComplete\n'))
else:
sys.stdout.write(_('\nError rebuilding server'))
raise SystemExit
details = _prep_server_detail(compute_client, server)
return zip(*sorted(six.iteritems(details)))
class RemoveServerSecurityGroup(command.Command):
"""Remove security group from server"""
log = logging.getLogger(__name__ + '.RemoveServerSecurityGroup')
def get_parser(self, prog_name):
parser = super(RemoveServerSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Name or ID of server to use'),
)
parser.add_argument(
'group',
metavar='<group>',
help=_('Name or ID of security group to remove from server'),
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
security_group = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
server.remove_security_group(security_group)
class RemoveServerVolume(command.Command):
"""Remove volume from server"""
log = logging.getLogger(__name__ + '.RemoveServerVolume')
def get_parser(self, prog_name):
parser = super(RemoveServerVolume, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to remove (name or ID)'),
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
volume_client = self.app.client_manager.volume
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
volume = utils.find_resource(
volume_client.volumes,
parsed_args.volume,
)
compute_client.volumes.delete_server_volume(
server.id,
volume.id,
)
class RescueServer(show.ShowOne):
"""Put server in rescue mode"""
log = logging.getLogger(__name__ + '.RescueServer')
def get_parser(self, prog_name):
parser = super(RescueServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
_, body = utils.find_resource(
compute_client.servers,
parsed_args.server,
).rescue()
return zip(*sorted(six.iteritems(body)))
class ResizeServer(command.Command):
"""Scale server to a new flavor"""
log = logging.getLogger(__name__ + '.ResizeServer')
def get_parser(self, prog_name):
parser = super(ResizeServer, self).get_parser(prog_name)
phase_group = parser.add_mutually_exclusive_group()
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
phase_group.add_argument(
'--flavor',
metavar='<flavor>',
help=_('Resize server to specified flavor'),
)
phase_group.add_argument(
'--confirm',
action="store_true",
help=_('Confirm server resize is complete'),
)
phase_group.add_argument(
'--revert',
action="store_true",
help=_('Restore server state before resize'),
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for resize to complete'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
if parsed_args.flavor:
flavor = utils.find_resource(
compute_client.flavors,
parsed_args.flavor,
)
compute_client.servers.resize(server, flavor)
if parsed_args.wait:
if utils.wait_for_status(
compute_client.servers.get,
server.id,
success_status=['active', 'verify_resize'],
callback=_show_progress,
):
sys.stdout.write(_('Complete\n'))
else:
sys.stdout.write(_('\nError resizing server'))
raise SystemExit
elif parsed_args.confirm:
compute_client.servers.confirm_resize(server)
elif parsed_args.revert:
compute_client.servers.revert_resize(server)
class ResumeServer(command.Command):
"""Resume server"""
log = logging.getLogger(__name__ + '.ResumeServer')
def get_parser(self, prog_name):
parser = super(ResumeServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
utils.find_resource(
compute_client.servers,
parsed_args.server,
) .resume()
class SetServer(command.Command):
"""Set server properties"""
log = logging.getLogger(__name__ + '.SetServer')
def get_parser(self, prog_name):
parser = super(SetServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<new-name>',
help=_('New server name'),
)
parser.add_argument(
'--root-password',
action="store_true",
help=_('Set new root password (interactive only)'),
)
parser.add_argument(
"--property",
metavar="<key=value>",
action=parseractions.KeyValueAction,
help=_('Property to add/change for this server '
'(repeat option to set multiple properties)'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
if parsed_args.name:
server.update(name=parsed_args.name)
if parsed_args.property:
compute_client.servers.set_meta(
server,
parsed_args.property,
)
if parsed_args.root_password:
p1 = getpass.getpass(_('New password: '))
p2 = getpass.getpass(_('Retype new password: '))
if p1 == p2:
server.change_password(p1)
else:
msg = _("Passwords do not match, password unchanged")
raise exceptions.CommandError(msg)
class ShowServer(show.ShowOne):
"""Show server details"""
log = logging.getLogger(__name__ + '.ShowServer')
def get_parser(self, prog_name):
parser = super(ShowServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
parser.add_argument(
'--diagnostics',
action='store_true',
default=False,
help=_('Display server diagnostics information'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
server = utils.find_resource(compute_client.servers,
parsed_args.server)
if parsed_args.diagnostics:
(resp, data) = server.diagnostics()
if not resp.status_code == 200:
sys.stderr.write(_("Error retrieving diagnostics data"))
return ({}, {})
else:
data = _prep_server_detail(compute_client, server)
return zip(*sorted(six.iteritems(data)))
class SshServer(command.Command):
"""Ssh to server"""
log = logging.getLogger(__name__ + '.SshServer')
def get_parser(self, prog_name):
parser = super(SshServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
parser.add_argument(
'--login',
metavar='<login-name>',
help=_('Login name (ssh -l option)'),
)
parser.add_argument(
'-l',
dest='login',
metavar='<login-name>',
help=argparse.SUPPRESS,
)
parser.add_argument(
'--port',
metavar='<port>',
type=int,
help=_('Destination port (ssh -p option)'),
)
parser.add_argument(
'-p',
metavar='<port>',
dest='port',
type=int,
help=argparse.SUPPRESS,
)
parser.add_argument(
'--identity',
metavar='<keyfile>',
help=_('Private key file (ssh -i option)'),
)
parser.add_argument(
'-i',
metavar='<filename>',
dest='identity',
help=argparse.SUPPRESS,
)
parser.add_argument(
'--option',
metavar='<config-options>',
help=_('Options in ssh_config(5) format (ssh -o option)'),
)
parser.add_argument(
'-o',
metavar='<option>',
dest='option',
help=argparse.SUPPRESS,
)
ip_group = parser.add_mutually_exclusive_group()
ip_group.add_argument(
'-4',
dest='ipv4',
action='store_true',
default=False,
help=_('Use only IPv4 addresses'),
)
ip_group.add_argument(
'-6',
dest='ipv6',
action='store_true',
default=False,
help=_('Use only IPv6 addresses'),
)
type_group = parser.add_mutually_exclusive_group()
type_group.add_argument(
'--public',
dest='address_type',
action='store_const',
const='public',
default='public',
help=_('Use public IP address'),
)
type_group.add_argument(
'--private',
dest='address_type',
action='store_const',
const='private',
default='public',
help=_('Use private IP address'),
)
type_group.add_argument(
'--address-type',
metavar='<address-type>',
dest='address_type',
default='public',
help=_('Use other IP address (public, private, etc)'),
)
parser.add_argument(
'-v',
dest='verbose',
action='store_true',
default=False,
help=argparse.SUPPRESS,
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
# Build the command
cmd = "ssh"
ip_address_family = [4, 6]
if parsed_args.ipv4:
ip_address_family = [4]
cmd += " -4"
if parsed_args.ipv6:
ip_address_family = [6]
cmd += " -6"
if parsed_args.port:
cmd += " -p %d" % parsed_args.port
if parsed_args.identity:
cmd += " -i %s" % parsed_args.identity
if parsed_args.option:
cmd += " -o %s" % parsed_args.option
if parsed_args.login:
login = parsed_args.login
else:
login = self.app.client_manager._username
if parsed_args.verbose:
cmd += " -v"
cmd += " %s@%s"
ip_address = _get_ip_address(server.addresses,
parsed_args.address_type,
ip_address_family)
self.log.debug("ssh command: %s", (cmd % (login, ip_address)))
os.system(cmd % (login, ip_address))
class SuspendServer(command.Command):
"""Suspend server"""
log = logging.getLogger(__name__ + '.SuspendServer')
def get_parser(self, prog_name):
parser = super(SuspendServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
utils.find_resource(
compute_client.servers,
parsed_args.server,
).suspend()
class UnlockServer(command.Command):
"""Unlock server"""
log = logging.getLogger(__name__ + '.UnlockServer')
def get_parser(self, prog_name):
parser = super(UnlockServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
utils.find_resource(
compute_client.servers,
parsed_args.server,
).unlock()
class UnpauseServer(command.Command):
"""Unpause server"""
log = logging.getLogger(__name__ + '.UnpauseServer')
def get_parser(self, prog_name):
parser = super(UnpauseServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
utils.find_resource(
compute_client.servers,
parsed_args.server,
).unpause()
class UnrescueServer(command.Command):
"""Restore server from rescue mode"""
log = logging.getLogger(__name__ + '.UnrescueServer')
def get_parser(self, prog_name):
parser = super(UnrescueServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
utils.find_resource(
compute_client.servers,
parsed_args.server,
).unrescue()
class UnsetServer(command.Command):
"""Unset server properties"""
log = logging.getLogger(__name__ + '.UnsetServer')
def get_parser(self, prog_name):
parser = super(UnsetServer, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server (name or ID)'),
)
parser.add_argument(
'--property',
metavar='<key>',
action='append',
default=[],
help=_('Property key to remove from server '
'(repeat to unset multiple values)'),
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
if parsed_args.property:
compute_client.servers.delete_meta(
server,
parsed_args.property,
)
| |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
from typing import Tuple, Any
import json
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARS '''
SPECIAL_HEADERS: dict = {
'id': 'ID',
'userId': 'User ID',
'osVersion': 'OS Version',
'imei': 'IMEI',
'meid': 'MEID'
}
HEADERS: dict = {
'raw_device': ['id', 'userId', 'deviceName', 'operatingSystem', 'osVersion', 'emailAddress',
'manufacturer', 'model', 'imei', 'meid'],
}
''' CLIENT '''
class MsGraphClient:
def __init__(self, self_deployed, tenant_id, auth_and_token_url, enc_key, app_name, base_url, use_ssl, proxy,
ok_codes):
self.ms_client = MicrosoftClient(self_deployed=self_deployed, tenant_id=tenant_id, auth_id=auth_and_token_url,
enc_key=enc_key, app_name=app_name, base_url=base_url, verify=use_ssl,
proxy=proxy, ok_codes=ok_codes)
def list_managed_devices(self, limit: int) -> Tuple[list, Any]:
url_suffix: str = '/deviceManagement/managedDevices'
raw_response = self.ms_client.http_request('GET', url_suffix)
return raw_response.get('value', [])[:limit], raw_response
def get_managed_device(self, device_id: str) -> Tuple[Any, str]:
url_suffix: str = f'/deviceManagement/managedDevices/{device_id}'
return self.ms_client.http_request('GET', url_suffix), device_id
def make_action(self, device_id: str, action: str, body: str = None) -> None:
url_suffix: str = f'deviceManagement/managedDevices/{device_id}/{action}'
self.ms_client.http_request('POST', url_suffix, data=body, return_empty_response=True)
def delete_user_from_shared_apple_device(self, user_principal_name: str, device_id: str, action: str) -> None:
body: dict = {'userPrincipalName': user_principal_name}
self.make_action(device_id, action, json.dumps(body))
def clean_windows_device(self, keep_user_data: bool, device_id: str, action: str) -> None:
body: dict = {'keepUserData': keep_user_data}
self.make_action(device_id, action, json.dumps(body))
def windows_device_defender_scan(self, quick_scan: bool, device_id: str, action: str) -> None:
body: dict = {'quickScan': quick_scan}
self.make_action(device_id, action, json.dumps(body))
def wipe_device(self, keep_enrollment_data: bool, keep_user_data: bool, mac_os_unlock_code: str,
device_id: str, action: str) -> None:
body: dict = {
'keepEnrollmentData': keep_enrollment_data,
'keepUserData': keep_user_data
}
if mac_os_unlock_code:
body['macOsUnlockCode'] = mac_os_unlock_code
self.make_action(device_id, action, json.dumps(body))
def update_windows_device_account(self, device_account_password: str, password_rotation_enabled: bool,
calendar_sync_enabled: bool, device_account_email: str, exchange_server: str,
session_initiation_protocal_address: str, device_id: str, action: str) -> None:
body: dict = {
'updateWindowsDeviceAccountActionParameter': {
'@odata.type': 'microsoft.graph.updateWindowsDeviceAccountActionParameter',
'deviceAccount': {
'@odata.type': 'microsoft.graph.windowsDeviceAccount',
'password': device_account_password
},
'passwordRotationEnabled': password_rotation_enabled,
'calendarSyncEnabled': calendar_sync_enabled,
'deviceAccountEmail': device_account_email,
'exchangeServer': exchange_server,
'sessionInitiationProtocalAddress': session_initiation_protocal_address
}
}
self.make_action(device_id, action, json.dumps(body))
''' HELPER FUNCTIONS '''
def try_parse_integer(int_to_parse: Any, err_msg: str) -> int:
"""
Tries to parse an integer, and if fails will throw DemistoException with given err_msg
:param int_to_parse: The argument to be parsed into integer
:param err_msg: The error message to show in case of failure
:return: The integer
"""
try:
res: int = int(int_to_parse)
except (TypeError, ValueError):
raise DemistoException(err_msg)
return res
def parse_device_action_results(raw_device_action_results: list) -> list:
"""
Parses a list of device action results
:param raw_device_action_results: The raw list of device action results
:return: The parsed list of device action results
"""
action_results: list = list()
for device_action_result in raw_device_action_results:
action_result = assign_params(**{
'Name': device_action_result.get('actionName'),
'State': device_action_result.get('actionState'),
'StartDateTime': device_action_result.get('startDateTime'),
'LastUpdatedDateTime': device_action_result.get('lastUpdatedDateTime')
})
if action_result:
action_results.append(action_result)
return action_results
def build_device_object(raw_device: dict) -> dict:
"""
Builds a device context object
:param raw_device: The raw device object
:return: The device context object
"""
device_action_results: list = raw_device.get('deviceActionResults', []) if raw_device.get('deviceActionResults') \
else []
conf_manager_client_enabled_features: dict = raw_device.get('configurationManagerClientEnabledFeatures', {}) \
if raw_device.get('configurationManagerClientEnabledFeatures') else {}
device_health_attestation_state: dict = raw_device.get('deviceHealthAttestationState', {}) \
if raw_device.get('deviceHealthAttestationState') else {}
return assign_params(**{
'ID': raw_device.get('id'),
'UserID': raw_device.get('userId'),
'Name': raw_device.get('deviceName'),
'ManagedDeviceOwnerType': raw_device.get('managedDeviceOwnerType'),
'ActionResults': parse_device_action_results(device_action_results),
'EnrolledDateTime': raw_device.get('enrolledDateTime'),
'LastSyncDateTime': raw_device.get('lastSyncDateTime'),
'OperatingSystem': raw_device.get('operatingSystem'),
'ComplianceState': raw_device.get('complianceState'),
'JailBroken': raw_device.get('jailBroken'),
'ManagementAgent': raw_device.get('managementAgent'),
'OSVersion': raw_device.get('osVersion'),
'EASDeviceID': raw_device.get('easDeviceId'),
'EASActivationDateTime': raw_device.get('easActivationDateTime'),
'ActivationLockBypassCode': raw_device.get('activationLockBypassCode'),
'EmailAddress': raw_device.get('emailAddress'),
'AzureADDeviceID': raw_device.get('azureADDeviceId'),
'CategoryDisplayName': raw_device.get('deviceCategoryDisplayName'),
'ExchangeAccessState': raw_device.get('exchangeAccessState'),
'ExchangeAccessStateReason': raw_device.get('exchangeAccessStateReason'),
'IsSupervised': raw_device.get('isSupervised'),
'IsEncrypted': raw_device.get('isEncrypted'),
'UserPrincipalName': raw_device.get('userPrincipalName'),
'Model': raw_device.get('model'),
'Manufacturer': raw_device.get('manufacturer'),
'IMEI': raw_device.get('imei'),
'SerialNumber': raw_device.get('serialNumber'),
'PhoneNumber': raw_device.get('phoneNumber'),
'AndroidSecurityPatchLevel': raw_device.get('androidSecurityPatchLevel'),
'ConfigurationManagerClientEnabledFeatures': assign_params(**{
'Inventory': conf_manager_client_enabled_features.get('inventory'),
'ModernApps': conf_manager_client_enabled_features.get('modernApps'),
'ResourceAccess': conf_manager_client_enabled_features.get('resourceAccess'),
'DeviceConfiguration': conf_manager_client_enabled_features.get('deviceConfiguration'),
'CompliancePolicy': conf_manager_client_enabled_features.get('compliancePolicy'),
'WindowsUpdateForBusiness': conf_manager_client_enabled_features.get('windowsUpdatesForBusiness')
}),
'WiFiMacAddress': raw_device.get('wiFiMacAddress'),
'HealthAttestationState': assign_params(**{
'LastUpdateDateTime': device_health_attestation_state.get('lastUpdateDateTime'),
'IssuedDateTime': device_health_attestation_state.get('issuedDateTime'),
'ResetCount': device_health_attestation_state.get('resetCount'),
'RestartCount': device_health_attestation_state.get('restartCount'),
'BitLockerStatus': device_health_attestation_state.get('bitLockerStatus'),
'BootManagerVersion': device_health_attestation_state.get('bootManagerVersion'),
'SecureBoot': device_health_attestation_state.get('secureBoot'),
'BootDebugging': device_health_attestation_state.get('bootDebugging'),
'OperatingSystemKernelDebugging': device_health_attestation_state.get('operatingSystemKernelDebugging'),
'CodeIntegrity': device_health_attestation_state.get('codeIntegrity'),
'TestSigning': device_health_attestation_state.get('testSigning'),
'SafeMode': device_health_attestation_state.get('safeMode'),
'WindowsPE': device_health_attestation_state.get('windowsPE'),
'EarlyLaunchAntiMalwareDriverProtection':
device_health_attestation_state.get('earlyLaunchAntiMalwareDriverProtection'),
'VirtualSecureMode': device_health_attestation_state.get('virtualSecureMode'),
'PCRHashAlgorithm': device_health_attestation_state.get('pcrHashAlgorithm'),
'BootAppSecurityVersion': device_health_attestation_state.get('bootAppSecurityVersion'),
'BootManagerSecurityVersion': device_health_attestation_state.get('bootManagerSecurityVersion'),
'TPMVersion': device_health_attestation_state.get('tpmVersion'),
'PCR0': device_health_attestation_state.get('pcr0'),
'SecureBootConfigurationPolicyFingerPrint':
device_health_attestation_state.get('secureBootConfigurationPolicyFingerPrint'),
'CodeIntegrityPolicy': device_health_attestation_state.get('codeIntegrityPolicy'),
'BootRevisionListInfo': device_health_attestation_state.get('bootRevisionListInfo'),
'OperatingSystemRevListInfo': device_health_attestation_state.get('operatingSystemRevListInfo'),
'HealthStatusMismatchInfo': device_health_attestation_state.get('healthStatusMismatchInfo'),
'HealthAttestationSupportedStatus': device_health_attestation_state.get('healthAttestationSupportedStatus')
}),
'SubscriberCarrier': raw_device.get('subscriberCarrier'),
'MEID': raw_device.get('meid'),
'TotalStorageSpaceInBytes': raw_device.get('totalStorageSpaceInBytes'),
'FreeStorageSpaceInBytes': raw_device.get('freeStorageSpaceInBytes'),
'ManagedDeviceName': raw_device.get('managedDeviceName'),
'PartnerReportedThreatState': raw_device.get('partnerReportedThreatState')
})
''' COMMANDS '''
def list_managed_devices_command(client: MsGraphClient, args: dict) -> None:
limit: int = try_parse_integer(args.get('limit', 10), err_msg='This value for limit must be an integer.')
list_raw_devices, raw_response = client.list_managed_devices(limit)
list_devices: list = [build_device_object(device) for device in list_raw_devices if device]
entry_context: dict = {'MSGraphDeviceManagement.Device(val.ID === obj.ID)': list_devices}
human_readable: str = 'No managed devices found.'
if list_devices:
name: str = 'List managed devices'
if len(list_devices) == 1:
name = f'Managed device {list_devices[0].get("Name", "")}'
human_readable = tableToMarkdown(name=name, t=list_raw_devices, headers=HEADERS['raw_device'],
headerTransform=lambda h: SPECIAL_HEADERS.get(h, pascalToSpace(h)),
removeNull=True)
return_outputs(human_readable, entry_context, raw_response)
def get_managed_device_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
raw_response, device_id = client.get_managed_device(device_id)
device: dict = build_device_object(raw_response)
entry_context: dict = {'MSGraphDeviceManagement.Device(val.ID === obj.ID)': device}
device_name: str = device.get('Name', '')
human_readable: str = f'Managed device {device_id} not found.'
if device:
human_readable = tableToMarkdown(name=f'Managed device {device_name}', t=raw_response,
headers=HEADERS['raw_device'],
headerTransform=lambda h: SPECIAL_HEADERS.get(h, pascalToSpace(h)),
removeNull=True)
return_outputs(human_readable, entry_context, raw_response)
def disable_lost_mode_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'disableLostMode')
return_outputs('Device disable lost mode action activated successfully.', {}, {})
def locate_device_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'locateDevice')
return_outputs('Locate device action activated successfully.', {}, {})
def sync_device_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'syncDevice')
return_outputs('Sync device action activated successfully.', {}, {})
def device_reboot_now_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'rebootNow')
return_outputs('Device reboot now action activated successfully.', {}, {})
def device_shutdown_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'shutDown')
return_outputs('Device shutdown action activated successfully.', {}, {})
def device_bypass_activation_lock_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'bypassActivationLock')
return_outputs('Device bypass activation lock action activated successfully.', {}, {})
def device_retire_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'retire')
return_outputs('Retire device action activated successfully.', {}, {})
def device_reset_passcode_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'resetPasscode')
return_outputs('Device reset passcode action activated successfully.', {}, {})
def device_remote_lock_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'remoteLock')
return_outputs('Device remote lock action activated successfully.', {}, {})
def device_request_remote_assistance_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'requestRemoteAssistance')
return_outputs('Device request remote assistance action activated successfully.', {}, {})
def device_recover_passcode_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'recoverPasscode')
return_outputs('Device recover passcode action activated successfully.', {}, {})
def logout_shared_apple_device_active_user_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'logoutSharedAppleDeviceActiveUser')
return_outputs('Logout shard apple device active user action activated successfully.', {}, {})
def delete_user_from_shared_apple_device_command(client: MsGraphClient, args: dict) -> None:
user_principal_name: str = str(args.get('user_principal_name'))
device_id: str = str(args.get('device_id'))
client.delete_user_from_shared_apple_device(user_principal_name, device_id, 'deleteUserFromSharedAppleDevice')
return_outputs('Delete user from shared apple device action activated successfully.', {}, {})
def windows_device_defender_update_signatures_command(client: MsGraphClient, args: dict) -> None:
device_id: str = str(args.get('device_id'))
client.make_action(device_id, 'windowsDefenderUpdateSignatures')
return_outputs('Windows device defender update signatures action activated successfully.', {}, {})
def clean_windows_device_command(client: MsGraphClient, args: dict) -> None:
keep_user_data: bool = bool(args.get('keep_user_data'))
device_id: str = str(args.get('device_id'))
client.clean_windows_device(keep_user_data, device_id, 'cleanWindowsDevice')
return_outputs('Clean windows device action activated successfully.', {}, {})
def windows_device_defender_scan_command(client: MsGraphClient, args: dict) -> None:
quick_scan: bool = bool(args.get('quick_scan'))
device_id: str = str(args.get('device_id'))
client.windows_device_defender_scan(quick_scan, device_id, 'windowsDefenderScan')
return_outputs('Windows device defender scan action activated successfully.', {}, {})
def wipe_device_command(client: MsGraphClient, args: dict) -> None:
keep_enrollment_data: bool = bool(args.get('keep_enrollment_data'))
keep_user_data: bool = bool(args.get('keep_user_data'))
mac_os_unlock_code: str = str(args.get('mac_os_unlock_code'))
device_id: str = str(args.get('device_id'))
client.wipe_device(keep_enrollment_data, keep_user_data, mac_os_unlock_code, device_id, 'wipe')
return_outputs('Wipe device action activated successfully.', {}, {})
def update_windows_device_account_command(client: MsGraphClient, args: dict) -> None:
device_account_password: str = str(args.get('device_account_password'))
password_rotation_enabled: bool = bool(args.get('password_rotation_enabled'))
calendar_sync_enabled: bool = bool(args.get('calendar_sync_enabled'))
device_account_email: str = str(args.get('device_account_email'))
exchange_server: str = str(args.get('exchange_server'))
session_initiation_protocal_address: str = str(args.get('session_initiation_protocal_address'))
device_id: str = str(args.get('device_id'))
client.update_windows_device_account(device_account_password, password_rotation_enabled, calendar_sync_enabled,
device_account_email, exchange_server, session_initiation_protocal_address,
device_id, 'updateWindowsDeviceAccount')
return_outputs('Update windows device account action activated successfully.', {}, {})
''' MAIN '''
def main():
args: dict = demisto.args()
params: dict = demisto.params()
self_deployed: bool = params.get('self_deployed', False)
tenant_id: str = params.get('tenant_id', '')
auth_and_token_url: str = params.get('auth_id', '')
enc_key: str = params.get('enc_key', '')
base_url: str = urljoin(params.get('url', ''), '/v1.0')
app_name: str = 'ms-graph-device-management'
ok_codes: tuple = (200, 201, 202, 204)
use_ssl: bool = not params.get('insecure', False)
proxy: bool = params.get('proxy', False)
client: MsGraphClient = MsGraphClient(self_deployed, tenant_id, auth_and_token_url, enc_key, app_name, base_url,
use_ssl, proxy, ok_codes)
command: str = demisto.command()
LOG(f'Command being called is {command}')
try:
if command == 'test-module':
client.ms_client.get_access_token()
demisto.results('ok')
elif command == 'msgraph-list-managed-devices':
list_managed_devices_command(client, args)
elif command == 'msgraph-get-managed-device-by-id':
get_managed_device_command(client, args)
elif command == 'msgraph-device-disable-lost-mode':
disable_lost_mode_command(client, args)
elif command == 'msgraph-locate-device':
locate_device_command(client, args)
elif command == 'msgraph-sync-device':
sync_device_command(client, args)
elif command == 'msgraph-device-reboot-now':
device_reboot_now_command(client, args)
elif command == 'msgraph-device-shutdown':
device_shutdown_command(client, args)
elif command == 'msgraph-device-bypass-activation-lock':
device_bypass_activation_lock_command(client, args)
elif command == 'msgraph-device-retire':
device_retire_command(client, args)
elif command == 'msgraph-device-reset-passcode':
device_reset_passcode_command(client, args)
elif command == 'msgraph-device-remote-lock':
device_remote_lock_command(client, args)
elif command == 'msgraph-device-request-remote-assistance':
device_request_remote_assistance_command(client, args)
elif command == 'msgraph-device-recover-passcode':
device_recover_passcode_command(client, args)
elif command == 'msgraph-logout-shared-apple-device-active-user':
logout_shared_apple_device_active_user_command(client, args)
elif command == 'msgraph-delete-user-from-shared-apple-device':
delete_user_from_shared_apple_device_command(client, args)
elif command == 'msgraph-windows-device-defender-update-signatures':
windows_device_defender_update_signatures_command(client, args)
elif command == 'msgraph-clean-windows-device':
clean_windows_device_command(client, args)
elif command == 'msgraph-windows-device-defender-scan':
windows_device_defender_scan_command(client, args)
elif command == 'msgraph-wipe-device':
wipe_device_command(client, args)
elif command == 'msgraph-update-windows-device-account':
update_windows_device_account_command(client, args)
# log exceptions
except Exception as err:
return_error(str(err))
from MicrosoftApiModule import * # noqa: E402
if __name__ in ['__main__', 'builtins']:
main()
| |
from common_fixtures import * # NOQA
from test_services \
import service_with_healthcheck_enabled
from test_machine \
import action_on_digital_ocean_machine, get_dropletid_for_ha_hosts
ha_droplets = []
if_test_host_down = pytest.mark.skipif(
not os.environ.get('DIGITALOCEAN_KEY') or
not os.environ.get('TEST_HOST_DOWN'),
not os.environ.get('HOST_DISCONN_ACTIVE_TIMEOUT'),
not os.environ.get('HOST_ACTIVE_DISCONN_TIMEOUT'),
reason='HOST DOWN PARAMETERS not set')
HOST_DISCONN_ACTIVE_TIMEOUT = os.environ.get('HOST_DISCONN_ACTIVE_TIMEOUT',
900)
HOST_ACTIVE_DISCONN_TIMEOUT = os.environ.get('HOST_ACTIVE_DISCONN_TIMEOUT',
900)
@pytest.fixture(scope='session', autouse=True)
def get_host_droplets(ha_hosts, socat_containers):
ha_droplets.append(get_dropletid_for_ha_hosts())
@pytest.fixture
def check_host_state_power_on(client):
print("Power on hosts that are in disconnected or reconnecting state")
print(ha_droplets)
inactive_hosts = client.list_host(
kind='docker', removed_null=True, agentState="disconnected").data
print("Disconnected hosts:")
print(inactive_hosts)
reconn_hosts = client.list_host(
kind='docker', removed_null=True, agentState="reconnecting").data
print("Reconnecting hosts:")
print(reconn_hosts)
inactive_hosts_dropletids = []
inactive_hosts_list = []
# Get droplet Id and hosts from disconnected hosts
for host in inactive_hosts:
host_name = host.hostname
print(host_name)
droplet_id = ha_droplets[0][host_name]
inactive_hosts_dropletids.append(droplet_id)
inactive_hosts_list.append(host)
# Get droplet Id and hosts from reconnecting hosts
# and append to the inactive host/droplet lists
for host in reconn_hosts:
host_name = host.hostname
print(host_name)
droplet_id = ha_droplets[0][host_name]
inactive_hosts_dropletids.append(droplet_id)
inactive_hosts_list.append(host)
print("List of all disconnected/reconnecting hosts")
print(inactive_hosts_list)
print(inactive_hosts_dropletids)
# Power on the droplets
for dropletid in inactive_hosts_dropletids:
print("Power on droplet " + str(droplet_id))
action_on_digital_ocean_machine(dropletid, "power_on")
# Wait for the host agent state to become active
for host in inactive_hosts_list:
print("In host wait method")
wait_for_host_agent_state(client, host, "active", 600)
@if_test_host_down
def test_service_with_healthcheck_1_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_healthcheck_services(
client, host_down_count=1)
@if_test_host_down
def test_service_with_healthcheck_2_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_healthcheck_services(
client, host_down_count=2)
@if_test_host_down
def test_service_with_healthcheck_3_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_healthcheck_services(
client, host_down_count=3)
@if_test_host_down
def test_service_with_healthcheck_and_retainIp_2_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_healthcheck_services(
client, host_down_count=2, retainIp=True)
@if_test_host_down
def test_lbservice_with_healthcheck_1_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
lb_port = "7770"
host_down_with_lb_services(
client, lb_port, host_down_count=1)
@if_test_host_down
def test_lbservice_with_healthcheck_2_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
lb_port = "7771"
host_down_with_lb_services(
client, lb_port, host_down_count=2)
@if_test_host_down
def test_global_lbservice_with_healthcheck_1_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
lb_port = "7772"
host_down_with_lb_services(
client, lb_port, host_down_count=1, globalf=True)
@if_test_host_down
def test_global_lbservice_with_healthcheck_2_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
lb_port = "7773"
host_down_with_lb_services(
client, lb_port, host_down_count=2, globalf=True)
@if_test_host_down
def test_service_with_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_services(
client, host_down_count=2)
@if_test_host_down
def test_global_service_with_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_services(
client, host_down_count=2, globalf=True)
@if_test_host_down
def test_global_service_with_reconnecting_host(
client, ha_hosts, socat_containers,
check_host_state_power_on):
global_service_with_reconn_disconn_host(client, state="reconnecting")
@if_test_host_down
def test_global_service_with_disconnected_host(
client, ha_hosts, socat_containers,
check_host_state_power_on):
global_service_with_reconn_disconn_host(client, state="disconnected")
def global_service_with_reconn_disconn_host(client, state):
# Pick one of the host and power down hosts
host_down = ha_host_list[0]
host_name = ha_host_list[0].hostname
print("power down- " + host_name)
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_off")
wait_for_host_agent_state(client, host_down, state,
HOST_ACTIVE_DISCONN_TIMEOUT)
# Create service
launch_config = {"imageUuid": HEALTH_CHECK_IMAGE_UUID}
launch_config["labels"] = {"io.rancher.scheduler.global": "true"}
service, env = create_env_and_svc(client, launch_config)
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == get_service_instance_count(client, service)
# Power on the host
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_on")
wait_for_host_agent_state(client, host_down, "active",
HOST_DISCONN_ACTIVE_TIMEOUT)
service = wait_success(client, service, 300)
container_list = get_service_container_list(client, service)
assert len(container_list) == get_service_instance_count(client, service)
instance_list = get_containers_on_host_for_service(
client, host_down, service)
assert len(instance_list) == 1
@if_test_host_down
def test_global_service_with_inactive_host(
client, ha_hosts, socat_containers,
check_host_state_power_on):
# Pick one of the host and deactivate this host
host_down = ha_host_list[0]
host_name = ha_host_list[0].hostname
print("Deactivate " + host_name)
host_down.deactivate()
host_down = wait_for_condition(client,
host_down,
lambda x: x.state == "inactive",
lambda x: 'Host state is ' + x.state,
timeout=300)
# Create service
launch_config = {"imageUuid": HEALTH_CHECK_IMAGE_UUID}
launch_config["labels"] = {"io.rancher.scheduler.global": "true"}
service, env = create_env_and_svc(client, launch_config)
service = service.activate()
service = wait_success(client, service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == get_service_instance_count(client, service)
# Activate the host that is in deactivated state
print("Activate " + host_name)
host_down.activate()
host_down = wait_for_condition(client,
host_down,
lambda x: x.state == "active",
lambda x: 'Host state is ' + x.state,
timeout=300)
service = wait_success(client, service, 300)
container_list = get_service_container_list(client, service)
assert len(container_list) == get_service_instance_count(client, service)
instance_list = get_containers_on_host_for_service(
client, host_down, service)
assert len(instance_list) == 1
def host_down_with_lb_services(client, lb_port, host_down_count,
scale=2, lb_scale=2, globalf=False):
# Wait for hosts in "reconnecting" state to get to "active" state
check_hosts_state(client)
# Create environment with lb_service and 2 healthcheck enabled
# service targets
env, lb_service, service1, service2 = \
env_with_lb_service_with_health_check_enabled_targets(
client, lb_port, scale, lb_scale, globalf)
# Pick hosts (and collect instances that will fgo unhealthy) that need
# to be powered down
down_hosts = []
down_instances = []
for i in range(0, len(ha_host_list)):
host = ha_host_list[i]
instance_list = \
get_containers_on_host_for_service(client, host, lb_service)
if len(instance_list) > 0:
down_instances.extend(instance_list)
down_hosts.append(host)
if len(down_hosts) == host_down_count:
break
# Power Down hosts where lb service instances are running
for host in down_hosts:
host_name = host.hostname
print("power down- " + host_name)
print(ha_droplets[0])
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_off")
print("Waiting for the hosts to go to disconnected state")
for host in down_hosts:
wait_for_host_agent_state(client, host, "disconnected",
HOST_ACTIVE_DISCONN_TIMEOUT)
# Check for service reconcile
check_for_service_reconcile(
client, lb_service, down_instances,
instance_list, globalf)
validate_lb_service(client, lb_service,
lb_port, [service1, service2])
# Power on hosts that were powered off
for host in down_hosts:
host_name = host.hostname
print("power on- " + host_name)
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_on")
print("Waiting for the hosts to go to active state")
for host in down_hosts:
wait_for_host_agent_state(client, host, "active",
HOST_DISCONN_ACTIVE_TIMEOUT)
# if service is global, validate that new instances of the service gets
# created on the host that gets powered on
if (globalf):
check_hosts_state(client)
wait_for_scale_to_adjust(client, service1, timeout=300)
wait_for_scale_to_adjust(client, service2, timeout=300)
wait_for_scale_to_adjust(client, lb_service, timeout=300)
validate_lb_service(client, lb_service,
lb_port, [service1, service2])
delete_all(client, [env])
def host_down_with_healthcheck_services(client, host_down_count,
retainIp=False):
# Wait for hosts in "reconnecting" state to get to "active" state
check_hosts_state(client)
# Create service that is healthcheck enabled
scale = 10
env, service = service_with_healthcheck_enabled(
client, scale, retainIp=retainIp)
# Pick hosts (and collect instances that will fgo unhealthy) that need
# to be powered down
down_hosts = []
down_instances = []
for i in range(0, len(ha_host_list)):
host = ha_host_list[i]
instance_list = \
get_containers_on_host_for_service(client, host, service)
if len(instance_list) > 0:
down_instances.extend(instance_list)
down_hosts.append(host)
if len(down_hosts) == host_down_count:
break
# Power Down hosts where service instances are running
for host in down_hosts:
host_name = host.hostname
print("power down- " + host_name)
print(ha_droplets[0])
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_off")
print("Waiting for the hosts to go to disconnected state")
for host in down_hosts:
wait_for_host_agent_state(client, host, "disconnected",
HOST_ACTIVE_DISCONN_TIMEOUT)
# Check for service reconcile
check_for_service_reconcile(
client, service, down_instances, instance_list)
# If retainIp is turned on , make sure that ip address assigned to
# reconciled instances are the same
if (retainIp):
for con in down_instances:
container_name = con.name
containers = client.list_container(name=container_name,
removed_null=True).data
assert len(containers) == 1
container = containers[0]
assert container.primaryIpAddress == con.primaryIpAddress
assert container.externalId != con.externalId
# Power on hosts that were powered off
delete_all(client, [env])
for host in down_hosts:
host_name = host.hostname
print("power on- " + host_name)
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_on")
print("Waiting for the hosts to go to active state")
for host in down_hosts:
wait_for_host_agent_state(client, host, "active",
HOST_DISCONN_ACTIVE_TIMEOUT)
def host_down_with_services(client, host_down_count,
globalf=False):
# Wait for hosts in "reconnecting" state to get to "active" state
check_hosts_state(client)
# Create service
launch_config = {"imageUuid": HEALTH_CHECK_IMAGE_UUID}
if globalf:
launch_config["labels"] = {"io.rancher.scheduler.global": "true"}
scale = 0
else:
scale = 10
service, env = create_env_and_svc(client, launch_config, scale)
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == get_service_instance_count(client, service)
# Pick hosts (and collect instances that will go unhealthy) that need
# to be powered down
down_hosts = []
down_instances = []
for i in range(0, len(ha_host_list)):
host = ha_host_list[i]
instance_list = \
get_containers_on_host_for_service(client, host, service)
if len(instance_list) > 0:
down_instances.extend(instance_list)
down_hosts.append(host)
if len(down_hosts) == host_down_count:
break
# Power Down hosts where service instances are running
for host in down_hosts:
host_name = host.hostname
print("power down- " + host_name)
print(ha_droplets[0])
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_off")
print("Waiting for the hosts to go to disconnected state")
for host in down_hosts:
wait_for_host_agent_state(client, host, "disconnected",
HOST_DISCONN_ACTIVE_TIMEOUT)
# There will be no service reconcile since the instances will continue
# to be "running" state in rancher-server
for con in down_instances:
assert con.state == "running"
service = client.reload(service)
assert service.state == "active"
# Power on hosts that were powered off .
# "stopped" state of the containers on the host will get synced and
# service reconcile will trigger containers to be started.
for host in down_hosts:
host_name = host.hostname
print("power on- " + host_name)
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_on")
print("Waiting for the hosts to go to active state")
for host in down_hosts:
wait_for_host_agent_state(client, host, "active",
HOST_DISCONN_ACTIVE_TIMEOUT)
wait_for_condition(
client, service,
lambda x: x.state == 'active',
lambda x: 'State is: ' + x.state)
for con in down_instances:
assert con.state == "running"
delete_all(client, [env])
def get_containers_on_host_for_service(client, host, service):
instance_list = []
hosts = client.list_host(
kind='docker', removed_null=True, state='active', uuid=host.uuid,
include="instances").data
assert len(hosts) == 1
for instance in hosts[0].instances:
containers = client.list_container(
state='running', uuid=instance.uuid, include="services").data
assert len(containers) <= 1
if (len(containers) == 1 and
containers[0].createIndex is not None and
containers[0].services[0].id == service.id):
instance_list.append(instance)
return instance_list
def check_for_service_reconcile(client, service, unhealthy_con_list,
instance_list, globalf=False):
# Validate that unhealthy instances in the service get deleted
# This code segment is commented as unhealthy state is
# transient and hard to catch
# for con in unhealthy_con_list:
# wait_for_condition(
# client, con,
# lambda x: x.healthState == 'unhealthy',
# lambda x: 'State is: ' + x.healthState, timeout=180)
# con = client.reload(con)
# assert con.healthState == "unhealthy"
for con in unhealthy_con_list:
wait_for_condition(
client, con,
lambda x: x.state in ('removed', 'purged'),
lambda x: 'State is: ' + x.healthState, timeout=120)
wait_for_scale_to_adjust(client, service, timeout=300)
con = client.reload(con)
assert con.state in ('removed', 'purged')
# Validate all instances in the service are healthy
container_list = get_service_container_list(client, service)
if globalf is False:
assert len(container_list) == service.scale
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState, timeout=120)
# Validate all existing healthy instances in the service were not deleted
# and recreated
for unhealthy_con in unhealthy_con_list:
for con in instance_list:
if (unhealthy_con.name == con.name):
instance_list.remove(con)
for healthy_con in instance_list:
healthy_con = client.reload(healthy_con)
assert healthy_con.state == "running"
assert healthy_con.healthState == "healthy"
service = client.reload(service)
assert service.state == "active"
assert service.healthState == "healthy"
def check_hosts_state(client, timeout=300):
print("Check if host state is active")
start = time.time()
disconn_host = 1
while disconn_host != 0:
time.sleep(.5)
hosts = client.list_host(
kind='docker', removed_null=True, agentState="disconnected").data
disconn_host = len(hosts)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for all hosts to be active in the setup')
# Give some time for hosts that just got to"Active" state to settle down
time.sleep(30)
print("Host is Active")
def env_with_lb_service_with_health_check_enabled_targets(client,
lb_port,
scale=2, lb_scale=2,
globalf=False):
# Create Environment with 2 health check enabled service and 1 LB service
health_check = {"name": "check1", "responseTimeout": 2000,
"interval": 2000, "healthyThreshold": 2,
"unhealthyThreshold": 3,
"requestLine": "GET /name.html HTTP/1.0",
"port": 80}
launch_config = {"imageUuid": HEALTH_CHECK_IMAGE_UUID,
"healthCheck": health_check
}
lb_launch_config = {"ports": [lb_port],
"imageUuid": get_haproxy_image()}
if (globalf):
launch_config["labels"] = {"io.rancher.scheduler.global": "true"}
lb_launch_config["labels"] = {"io.rancher.scheduler.global": "true"}
scale = None
lb_scale = None
service1, env = create_env_and_svc(
client, launch_config, scale)
service1 = activate_svc(client, service1)
container_list = get_service_container_list(client, service1)
assert len(container_list) == get_service_instance_count(client, service1)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
service2 = create_svc(client, env, launch_config, scale)
service2 = activate_svc(client, service2)
container_list = get_service_container_list(client, service2)
assert len(container_list) == get_service_instance_count(client, service2)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
port_rule1 = {"serviceId": service1.id,
"sourcePort": lb_port,
"targetPort": "80",
"protocol": "http"
}
port_rule2 = {"serviceId": service2.id,
"sourcePort": lb_port,
"targetPort": "80",
"protocol": "http"
}
lb_Config = {"portRules": [port_rule1, port_rule2]}
lb_service = client.create_loadBalancerService(
name="lb-1",
stackId=env.id,
launchConfig=lb_launch_config,
scale=lb_scale,
lbConfig=lb_Config)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
lb_service = activate_svc(client, lb_service)
service_link1 = {"serviceId": service1.id}
service_link2 = {"serviceId": service2.id}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2])
validate_lb_service(client, lb_service,
lb_port, [service1, service2])
return env, lb_service, service1, service2
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import logging
import sys
import unittest.mock
from collections import namedtuple
from datetime import date, datetime, timedelta
from subprocess import CalledProcessError
from typing import List
import funcsigs
import pytest
from airflow.exceptions import AirflowException
from airflow.models import DAG, DagRun, TaskInstance as TI
from airflow.models.baseoperator import BaseOperator
from airflow.models.taskinstance import clear_task_instances, set_current_context
from airflow.models.xcom_arg import XComArg
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import (
BranchPythonOperator,
PythonOperator,
PythonVirtualenvOperator,
ShortCircuitOperator,
get_current_context,
task as task_decorator,
)
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.task_group import TaskGroup
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
END_DATE = timezone.datetime(2016, 1, 2)
INTERVAL = timedelta(hours=12)
FROZEN_NOW = timezone.datetime(2016, 1, 2, 12, 1, 1)
TI_CONTEXT_ENV_VARS = [
'AIRFLOW_CTX_DAG_ID',
'AIRFLOW_CTX_TASK_ID',
'AIRFLOW_CTX_EXECUTION_DATE',
'AIRFLOW_CTX_DAG_RUN_ID',
]
class Call:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def build_recording_function(calls_collection):
"""
We can not use a Mock instance as a PythonOperator callable function or some tests fail with a
TypeError: Object of type Mock is not JSON serializable
Then using this custom function recording custom Call objects for further testing
(replacing Mock.assert_called_with assertion method)
"""
def recording_function(*args, **kwargs):
calls_collection.append(Call(*args, **kwargs))
return recording_function
class TestPythonBase(unittest.TestCase):
"""Base test class for TestPythonOperator and TestPythonSensor classes"""
@classmethod
def setUpClass(cls):
super().setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def setUp(self):
super().setUp()
self.dag = DAG('test_dag', default_args={'owner': 'airflow', 'start_date': DEFAULT_DATE})
self.addCleanup(self.dag.clear)
self.clear_run()
self.addCleanup(self.clear_run)
def tearDown(self):
super().tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def clear_run(self):
self.run = False
def _assert_calls_equal(self, first, second):
self.assertIsInstance(first, Call)
self.assertIsInstance(second, Call)
self.assertTupleEqual(first.args, second.args)
# eliminate context (conf, dag_run, task_instance, etc.)
test_args = ["an_int", "a_date", "a_templated_string"]
first.kwargs = {key: value for (key, value) in first.kwargs.items() if key in test_args}
second.kwargs = {key: value for (key, value) in second.kwargs.items() if key in test_args}
self.assertDictEqual(first.kwargs, second.kwargs)
class TestPythonOperator(TestPythonBase):
def do_run(self):
self.run = True
def is_run(self):
return self.run
def test_python_operator_run(self):
"""Tests that the python callable is invoked on task run."""
task = PythonOperator(python_callable=self.do_run, task_id='python_operator', dag=self.dag)
self.assertFalse(self.is_run())
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
self.assertTrue(self.is_run())
def test_python_operator_python_callable_is_callable(self):
"""Tests that PythonOperator will only instantiate if
the python_callable argument is callable."""
not_callable = {}
with self.assertRaises(AirflowException):
PythonOperator(python_callable=not_callable, task_id='python_operator', dag=self.dag)
not_callable = None
with self.assertRaises(AirflowException):
PythonOperator(python_callable=not_callable, task_id='python_operator', dag=self.dag)
def test_python_callable_arguments_are_templatized(self):
"""Test PythonOperator op_args are templatized"""
recorded_calls = []
# Create a named tuple and ensure it is still preserved
# after the rendering is done
Named = namedtuple('Named', ['var1', 'var2'])
named_tuple = Named('{{ ds }}', 'unchanged')
task = PythonOperator(
task_id='python_operator',
# a Mock instance cannot be used as a callable function or test fails with a
# TypeError: Object of type Mock is not JSON serializable
python_callable=build_recording_function(recorded_calls),
op_args=[4, date(2019, 1, 1), "dag {{dag.dag_id}} ran on {{ds}}.", named_tuple],
dag=self.dag,
)
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
ds_templated = DEFAULT_DATE.date().isoformat()
self.assertEqual(1, len(recorded_calls))
self._assert_calls_equal(
recorded_calls[0],
Call(
4,
date(2019, 1, 1),
f"dag {self.dag.dag_id} ran on {ds_templated}.",
Named(ds_templated, 'unchanged'),
),
)
def test_python_callable_keyword_arguments_are_templatized(self):
"""Test PythonOperator op_kwargs are templatized"""
recorded_calls = []
task = PythonOperator(
task_id='python_operator',
# a Mock instance cannot be used as a callable function or test fails with a
# TypeError: Object of type Mock is not JSON serializable
python_callable=build_recording_function(recorded_calls),
op_kwargs={
'an_int': 4,
'a_date': date(2019, 1, 1),
'a_templated_string': "dag {{dag.dag_id}} ran on {{ds}}.",
},
dag=self.dag,
)
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
self.assertEqual(1, len(recorded_calls))
self._assert_calls_equal(
recorded_calls[0],
Call(
an_int=4,
a_date=date(2019, 1, 1),
a_templated_string="dag {} ran on {}.".format(
self.dag.dag_id, DEFAULT_DATE.date().isoformat()
),
),
)
def test_python_operator_shallow_copy_attr(self):
not_callable = lambda x: x
original_task = PythonOperator(
python_callable=not_callable,
task_id='python_operator',
op_kwargs={'certain_attrs': ''},
dag=self.dag,
)
new_task = copy.deepcopy(original_task)
# shallow copy op_kwargs
self.assertEqual(
id(original_task.op_kwargs['certain_attrs']), id(new_task.op_kwargs['certain_attrs'])
)
# shallow copy python_callable
self.assertEqual(id(original_task.python_callable), id(new_task.python_callable))
def test_conflicting_kwargs(self):
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
external_trigger=False,
)
# dag is not allowed since it is a reserved keyword
def func(dag):
# An ValueError should be triggered since we're using dag as a
# reserved keyword
raise RuntimeError(f"Should not be triggered, dag: {dag}")
python_operator = PythonOperator(
task_id='python_operator', op_args=[1], python_callable=func, dag=self.dag
)
with self.assertRaises(ValueError) as context:
python_operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
self.assertTrue('dag' in context.exception, "'dag' not found in the exception")
def test_provide_context_does_not_fail(self):
"""
ensures that provide_context doesn't break dags in 2.0
"""
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
external_trigger=False,
)
def func(custom, dag):
self.assertEqual(1, custom, "custom should be 1")
self.assertIsNotNone(dag, "dag should be set")
python_operator = PythonOperator(
task_id='python_operator',
op_kwargs={'custom': 1},
python_callable=func,
provide_context=True,
dag=self.dag,
)
python_operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_context_with_conflicting_op_args(self):
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
external_trigger=False,
)
def func(custom, dag):
self.assertEqual(1, custom, "custom should be 1")
self.assertIsNotNone(dag, "dag should be set")
python_operator = PythonOperator(
task_id='python_operator', op_kwargs={'custom': 1}, python_callable=func, dag=self.dag
)
python_operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_context_with_kwargs(self):
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
external_trigger=False,
)
def func(**context):
# check if context is being set
self.assertGreater(len(context), 0, "Context has not been injected")
python_operator = PythonOperator(
task_id='python_operator', op_kwargs={'custom': 1}, python_callable=func, dag=self.dag
)
python_operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
class TestAirflowTaskDecorator(TestPythonBase):
def test_python_operator_python_callable_is_callable(self):
"""Tests that @task will only instantiate if
the python_callable argument is callable."""
not_callable = {}
with pytest.raises(AirflowException):
task_decorator(not_callable, dag=self.dag)
def test_fails_bad_signature(self):
"""Tests that @task will fail if signature is not binding."""
@task_decorator
def add_number(num: int) -> int:
return num + 2
with pytest.raises(TypeError):
add_number(2, 3) # pylint: disable=too-many-function-args
with pytest.raises(TypeError):
add_number() # pylint: disable=no-value-for-parameter
add_number('test') # pylint: disable=no-value-for-parameter
def test_fail_method(self):
"""Tests that @task will fail if signature is not binding."""
with pytest.raises(AirflowException):
class Test:
num = 2
@task_decorator
def add_number(self, num: int) -> int:
return self.num + num
Test().add_number(2)
def test_fail_multiple_outputs_key_type(self):
@task_decorator(multiple_outputs=True)
def add_number(num: int):
return {2: num}
with self.dag:
ret = add_number(2)
self.dag.create_dagrun(
run_id=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
)
with pytest.raises(AirflowException):
# pylint: disable=maybe-no-member
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_fail_multiple_outputs_no_dict(self):
@task_decorator(multiple_outputs=True)
def add_number(num: int):
return num
with self.dag:
ret = add_number(2)
self.dag.create_dagrun(
run_id=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
)
with pytest.raises(AirflowException):
# pylint: disable=maybe-no-member
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_python_callable_arguments_are_templatized(self):
"""Test @task op_args are templatized"""
recorded_calls = []
# Create a named tuple and ensure it is still preserved
# after the rendering is done
Named = namedtuple('Named', ['var1', 'var2'])
named_tuple = Named('{{ ds }}', 'unchanged')
task = task_decorator(
# a Mock instance cannot be used as a callable function or test fails with a
# TypeError: Object of type Mock is not JSON serializable
build_recording_function(recorded_calls),
dag=self.dag,
)
ret = task(4, date(2019, 1, 1), "dag {{dag.dag_id}} ran on {{ds}}.", named_tuple)
self.dag.create_dagrun(
run_id=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
)
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) # pylint: disable=maybe-no-member
ds_templated = DEFAULT_DATE.date().isoformat()
assert len(recorded_calls) == 1
self._assert_calls_equal(
recorded_calls[0],
Call(
4,
date(2019, 1, 1),
f"dag {self.dag.dag_id} ran on {ds_templated}.",
Named(ds_templated, 'unchanged'),
),
)
def test_python_callable_keyword_arguments_are_templatized(self):
"""Test PythonOperator op_kwargs are templatized"""
recorded_calls = []
task = task_decorator(
# a Mock instance cannot be used as a callable function or test fails with a
# TypeError: Object of type Mock is not JSON serializable
build_recording_function(recorded_calls),
dag=self.dag,
)
ret = task(an_int=4, a_date=date(2019, 1, 1), a_templated_string="dag {{dag.dag_id}} ran on {{ds}}.")
self.dag.create_dagrun(
run_id=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
)
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) # pylint: disable=maybe-no-member
assert len(recorded_calls) == 1
self._assert_calls_equal(
recorded_calls[0],
Call(
an_int=4,
a_date=date(2019, 1, 1),
a_templated_string="dag {} ran on {}.".format(
self.dag.dag_id, DEFAULT_DATE.date().isoformat()
),
),
)
def test_manual_task_id(self):
"""Test manually seting task_id"""
@task_decorator(task_id='some_name')
def do_run():
return 4
with self.dag:
do_run()
assert ['some_name'] == self.dag.task_ids
def test_multiple_calls(self):
"""Test calling task multiple times in a DAG"""
@task_decorator
def do_run():
return 4
with self.dag:
do_run()
assert ['do_run'] == self.dag.task_ids
do_run_1 = do_run()
do_run_2 = do_run()
assert ['do_run', 'do_run__1', 'do_run__2'] == self.dag.task_ids
assert do_run_1.operator.task_id == 'do_run__1' # pylint: disable=maybe-no-member
assert do_run_2.operator.task_id == 'do_run__2' # pylint: disable=maybe-no-member
def test_multiple_calls_in_task_group(self):
"""Test calling task multiple times in a TaskGroup"""
@task_decorator
def do_run():
return 4
group_id = "KnightsOfNii"
with self.dag:
with TaskGroup(group_id=group_id):
do_run()
assert [f"{group_id}.do_run"] == self.dag.task_ids
do_run()
assert [f"{group_id}.do_run", f"{group_id}.do_run__1"] == self.dag.task_ids
assert len(self.dag.task_ids) == 2
def test_call_20(self):
"""Test calling decorated function 21 times in a DAG"""
@task_decorator
def __do_run():
return 4
with self.dag:
__do_run()
for _ in range(20):
__do_run()
assert self.dag.task_ids[-1] == '__do_run__20'
def test_multiple_outputs(self):
"""Tests pushing multiple outputs as a dictionary"""
@task_decorator(multiple_outputs=True)
def return_dict(number: int):
return {'number': number + 1, '43': 43}
test_number = 10
with self.dag:
ret = return_dict(test_number)
dr = self.dag.create_dagrun(
run_id=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) # pylint: disable=maybe-no-member
ti = dr.get_task_instances()[0]
assert ti.xcom_pull(key='number') == test_number + 1
assert ti.xcom_pull(key='43') == 43
assert ti.xcom_pull() == {'number': test_number + 1, '43': 43}
def test_default_args(self):
"""Test that default_args are captured when calling the function correctly"""
@task_decorator
def do_run():
return 4
with self.dag:
ret = do_run()
assert ret.operator.owner == 'airflow' # pylint: disable=maybe-no-member
def test_xcom_arg(self):
"""Tests that returned key in XComArg is returned correctly"""
@task_decorator
def add_2(number: int):
return number + 2
@task_decorator
def add_num(number: int, num2: int = 2):
return number + num2
test_number = 10
with self.dag:
bigger_number = add_2(test_number)
ret = add_num(bigger_number, XComArg(bigger_number.operator)) # pylint: disable=maybe-no-member
dr = self.dag.create_dagrun(
run_id=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
bigger_number.operator.run( # pylint: disable=maybe-no-member
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE
)
ret.operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) # pylint: disable=maybe-no-member
ti_add_num = [ti for ti in dr.get_task_instances() if ti.task_id == 'add_num'][0]
assert ti_add_num.xcom_pull(key=ret.key) == (test_number + 2) * 2 # pylint: disable=maybe-no-member
def test_dag_task(self):
"""Tests dag.task property to generate task"""
@self.dag.task
def add_2(number: int):
return number + 2
test_number = 10
res = add_2(test_number)
add_2(res)
assert 'add_2' in self.dag.task_ids
def test_dag_task_multiple_outputs(self):
"""Tests dag.task property to generate task with multiple outputs"""
@self.dag.task(multiple_outputs=True)
def add_2(number: int):
return {'1': number + 2, '2': 42}
test_number = 10
add_2(test_number)
add_2(test_number)
assert 'add_2' in self.dag.task_ids
def test_airflow_task(self):
"""Tests airflow.task decorator to generate task"""
from airflow.decorators import task
@task
def add_2(number: int):
return number + 2
test_number = 10
with self.dag:
add_2(test_number)
assert 'add_2' in self.dag.task_ids
def test_task_documentation(self):
"""Tests that task_decorator loads doc_md from function doc"""
@task_decorator
def add_2(number: int):
"""
Adds 2 to number.
"""
return number + 2
test_number = 10
with self.dag:
ret = add_2(test_number)
assert ret.operator.doc_md.strip(), "Adds 2 to number." # pylint: disable=maybe-no-member
class TestBranchOperator(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def setUp(self):
self.dag = DAG(
'branch_operator_test',
default_args={'owner': 'airflow', 'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL,
)
self.branch_1 = DummyOperator(task_id='branch_1', dag=self.dag)
self.branch_2 = DummyOperator(task_id='branch_2', dag=self.dag)
self.branch_3 = None
def tearDown(self):
super().tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def test_without_dag_run(self):
"""This checks the defensive against non existent tasks in a dag run"""
branch_op = BranchPythonOperator(
task_id='make_choice', dag=self.dag, python_callable=lambda: 'branch_1'
)
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.dag.clear()
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(TI.dag_id == self.dag.dag_id, TI.execution_date == DEFAULT_DATE)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
# should exist with state None
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_branch_list_without_dag_run(self):
"""This checks if the BranchPythonOperator supports branching off to a list of tasks."""
branch_op = BranchPythonOperator(
task_id='make_choice', dag=self.dag, python_callable=lambda: ['branch_1', 'branch_2']
)
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.branch_3 = DummyOperator(task_id='branch_3', dag=self.dag)
self.branch_3.set_upstream(branch_op)
self.dag.clear()
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(TI.dag_id == self.dag.dag_id, TI.execution_date == DEFAULT_DATE)
expected = {
"make_choice": State.SUCCESS,
"branch_1": State.NONE,
"branch_2": State.NONE,
"branch_3": State.SKIPPED,
}
for ti in tis:
if ti.task_id in expected:
self.assertEqual(ti.state, expected[ti.task_id])
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_with_dag_run(self):
branch_op = BranchPythonOperator(
task_id='make_choice', dag=self.dag, python_callable=lambda: 'branch_1'
)
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.dag.clear()
dr = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_with_skip_in_branch_downstream_dependencies(self):
branch_op = BranchPythonOperator(
task_id='make_choice', dag=self.dag, python_callable=lambda: 'branch_1'
)
branch_op >> self.branch_1 >> self.branch_2
branch_op >> self.branch_2
self.dag.clear()
dr = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_with_skip_in_branch_downstream_dependencies2(self):
branch_op = BranchPythonOperator(
task_id='make_choice', dag=self.dag, python_callable=lambda: 'branch_2'
)
branch_op >> self.branch_1 >> self.branch_2
branch_op >> self.branch_2
self.dag.clear()
dr = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_xcom_push(self):
branch_op = BranchPythonOperator(
task_id='make_choice', dag=self.dag, python_callable=lambda: 'branch_1'
)
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.dag.clear()
dr = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.xcom_pull(task_ids='make_choice'), 'branch_1')
def test_clear_skipped_downstream_task(self):
"""
After a downstream task is skipped by BranchPythonOperator, clearing the skipped task
should not cause it to be executed.
"""
branch_op = BranchPythonOperator(
task_id='make_choice', dag=self.dag, python_callable=lambda: 'branch_1'
)
branches = [self.branch_1, self.branch_2]
branch_op >> branches
self.dag.clear()
dr = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
for task in branches:
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
children_tis = [ti for ti in tis if ti.task_id in branch_op.get_direct_relative_ids()]
# Clear the children tasks.
with create_session() as session:
clear_task_instances(children_tis, session=session, dag=self.dag)
# Run the cleared tasks again.
for task in branches:
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
# Check if the states are correct after children tasks are cleared.
for ti in dr.get_task_instances():
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
class TestShortCircuitOperator(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def tearDown(self):
super().tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def test_without_dag_run(self):
"""This checks the defensive against non existent tasks in a dag run"""
value = False
dag = DAG(
'shortcircuit_operator_test_without_dag_run',
default_args={'owner': 'airflow', 'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL,
)
short_op = ShortCircuitOperator(task_id='make_choice', dag=dag, python_callable=lambda: value)
branch_1 = DummyOperator(task_id='branch_1', dag=dag)
branch_1.set_upstream(short_op)
branch_2 = DummyOperator(task_id='branch_2', dag=dag)
branch_2.set_upstream(branch_1)
upstream = DummyOperator(task_id='upstream', dag=dag)
upstream.set_downstream(short_op)
dag.clear()
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(TI.dag_id == dag.dag_id, TI.execution_date == DEFAULT_DATE)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
# should not exist
raise ValueError(f'Invalid task id {ti.task_id} found!')
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
value = True
dag.clear()
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
# should not exist
raise ValueError(f'Invalid task id {ti.task_id} found!')
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_with_dag_run(self):
value = False
dag = DAG(
'shortcircuit_operator_test_with_dag_run',
default_args={'owner': 'airflow', 'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL,
)
short_op = ShortCircuitOperator(task_id='make_choice', dag=dag, python_callable=lambda: value)
branch_1 = DummyOperator(task_id='branch_1', dag=dag)
branch_1.set_upstream(short_op)
branch_2 = DummyOperator(task_id='branch_2', dag=dag)
branch_2.set_upstream(branch_1)
upstream = DummyOperator(task_id='upstream', dag=dag)
upstream.set_downstream(short_op)
dag.clear()
logging.error("Tasks %s", dag.tasks)
dr = dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
upstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 4)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
value = True
dag.clear()
dr.verify_integrity()
upstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 4)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_clear_skipped_downstream_task(self):
"""
After a downstream task is skipped by ShortCircuitOperator, clearing the skipped task
should not cause it to be executed.
"""
dag = DAG(
'shortcircuit_clear_skipped_downstream_task',
default_args={'owner': 'airflow', 'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL,
)
short_op = ShortCircuitOperator(task_id='make_choice', dag=dag, python_callable=lambda: False)
downstream = DummyOperator(task_id='downstream', dag=dag)
short_op >> downstream
dag.clear()
dr = dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
downstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'downstream':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
# Clear downstream
with create_session() as session:
clear_task_instances([t for t in tis if t.task_id == "downstream"], session=session, dag=dag)
# Run downstream again
downstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
# Check if the states are correct.
for ti in dr.get_task_instances():
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'downstream':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
virtualenv_string_args: List[str] = []
class TestPythonVirtualenvOperator(unittest.TestCase):
def setUp(self):
super().setUp()
self.dag = DAG(
'test_dag',
default_args={'owner': 'airflow', 'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL,
)
self.addCleanup(self.dag.clear)
def _run_as_operator(self, fn, python_version=sys.version_info[0], **kwargs):
task = PythonVirtualenvOperator(
python_callable=fn, python_version=python_version, task_id='task', dag=self.dag, **kwargs
)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
return task
def test_add_dill(self):
def f():
pass
task = self._run_as_operator(f, use_dill=True, system_site_packages=False)
assert 'dill' in task.requirements
def test_no_requirements(self):
"""Tests that the python callable is invoked on task run."""
def f():
pass
self._run_as_operator(f)
def test_no_system_site_packages(self):
def f():
try:
import funcsigs # noqa: F401 # pylint: disable=redefined-outer-name,reimported,unused-import
except ImportError:
return True
raise Exception
self._run_as_operator(f, system_site_packages=False, requirements=['dill'])
def test_system_site_packages(self):
def f():
import funcsigs # noqa: F401 # pylint: disable=redefined-outer-name,reimported,unused-import
self._run_as_operator(f, requirements=['funcsigs'], system_site_packages=True)
def test_with_requirements_pinned(self):
self.assertNotEqual('0.4', funcsigs.__version__, 'Please update this string if this fails')
def f():
import funcsigs # noqa: F401 # pylint: disable=redefined-outer-name,reimported
if funcsigs.__version__ != '0.4':
raise Exception
self._run_as_operator(f, requirements=['funcsigs==0.4'])
def test_unpinned_requirements(self):
def f():
import funcsigs # noqa: F401 # pylint: disable=redefined-outer-name,reimported,unused-import
self._run_as_operator(f, requirements=['funcsigs', 'dill'], system_site_packages=False)
def test_range_requirements(self):
def f():
import funcsigs # noqa: F401 # pylint: disable=redefined-outer-name,reimported,unused-import
self._run_as_operator(f, requirements=['funcsigs>1.0', 'dill'], system_site_packages=False)
def test_fail(self):
def f():
raise Exception
with self.assertRaises(CalledProcessError):
self._run_as_operator(f)
def test_python_2(self):
def f():
{}.iteritems() # pylint: disable=no-member
self._run_as_operator(f, python_version=2, requirements=['dill'])
def test_python_2_7(self):
def f():
{}.iteritems() # pylint: disable=no-member
return True
self._run_as_operator(f, python_version='2.7', requirements=['dill'])
def test_python_3(self):
def f():
import sys # pylint: disable=reimported,unused-import,redefined-outer-name
print(sys.version)
try:
{}.iteritems() # pylint: disable=no-member
except AttributeError:
return
raise Exception
self._run_as_operator(f, python_version=3, use_dill=False, requirements=['dill'])
@staticmethod
def _invert_python_major_version():
if sys.version_info[0] == 2:
return 3
else:
return 2
def test_wrong_python_op_args(self):
if sys.version_info[0] == 2:
version = 3
else:
version = 2
def f():
pass
with self.assertRaises(AirflowException):
self._run_as_operator(f, python_version=version, op_args=[1])
def test_without_dill(self):
def f(a):
return a
self._run_as_operator(f, system_site_packages=False, use_dill=False, op_args=[4])
def test_string_args(self):
def f():
global virtualenv_string_args # pylint: disable=global-statement
print(virtualenv_string_args)
if virtualenv_string_args[0] != virtualenv_string_args[2]:
raise Exception
self._run_as_operator(f, python_version=self._invert_python_major_version(), string_args=[1, 2, 1])
def test_with_args(self):
def f(a, b, c=False, d=False):
if a == 0 and b == 1 and c and not d:
return True
else:
raise Exception
self._run_as_operator(f, op_args=[0, 1], op_kwargs={'c': True})
def test_return_none(self):
def f():
return None
self._run_as_operator(f)
def test_lambda(self):
with self.assertRaises(AirflowException):
PythonVirtualenvOperator(python_callable=lambda x: 4, task_id='task', dag=self.dag)
def test_nonimported_as_arg(self):
def f(_):
return None
self._run_as_operator(f, op_args=[datetime.utcnow()])
def test_context(self):
def f(templates_dict):
return templates_dict['ds']
self._run_as_operator(f, templates_dict={'ds': '{{ ds }}'})
def test_airflow_context(self):
def f(
# basic
ds_nodash,
inlets,
next_ds,
next_ds_nodash,
outlets,
params,
prev_ds,
prev_ds_nodash,
run_id,
task_instance_key_str,
test_mode,
tomorrow_ds,
tomorrow_ds_nodash,
ts,
ts_nodash,
ts_nodash_with_tz,
yesterday_ds,
yesterday_ds_nodash,
# pendulum-specific
execution_date,
next_execution_date,
prev_execution_date,
prev_execution_date_success,
prev_start_date_success,
# airflow-specific
macros,
conf,
dag,
dag_run,
task,
# other
**context,
): # pylint: disable=unused-argument,too-many-arguments,too-many-locals
pass
self._run_as_operator(f, use_dill=True, system_site_packages=True, requirements=None)
def test_pendulum_context(self):
def f(
# basic
ds_nodash,
inlets,
next_ds,
next_ds_nodash,
outlets,
params,
prev_ds,
prev_ds_nodash,
run_id,
task_instance_key_str,
test_mode,
tomorrow_ds,
tomorrow_ds_nodash,
ts,
ts_nodash,
ts_nodash_with_tz,
yesterday_ds,
yesterday_ds_nodash,
# pendulum-specific
execution_date,
next_execution_date,
prev_execution_date,
prev_execution_date_success,
prev_start_date_success,
# other
**context,
): # pylint: disable=unused-argument,too-many-arguments,too-many-locals
pass
self._run_as_operator(
f, use_dill=True, system_site_packages=False, requirements=['pendulum', 'lazy_object_proxy']
)
def test_base_context(self):
def f(
# basic
ds_nodash,
inlets,
next_ds,
next_ds_nodash,
outlets,
params,
prev_ds,
prev_ds_nodash,
run_id,
task_instance_key_str,
test_mode,
tomorrow_ds,
tomorrow_ds_nodash,
ts,
ts_nodash,
ts_nodash_with_tz,
yesterday_ds,
yesterday_ds_nodash,
# other
**context,
): # pylint: disable=unused-argument,too-many-arguments,too-many-locals
pass
self._run_as_operator(f, use_dill=True, system_site_packages=False, requirements=None)
DEFAULT_ARGS = {
"owner": "test",
"depends_on_past": True,
"start_date": days_ago(1),
"end_date": datetime.today(),
"schedule_interval": "@once",
"retries": 1,
"retry_delay": timedelta(minutes=1),
}
class TestCurrentContext:
def test_current_context_no_context_raise(self):
with pytest.raises(AirflowException):
get_current_context()
def test_current_context_roundtrip(self):
example_context = {"Hello": "World"}
with set_current_context(example_context):
assert get_current_context() == example_context
def test_context_removed_after_exit(self):
example_context = {"Hello": "World"}
with set_current_context(example_context):
pass
with pytest.raises(
AirflowException,
):
get_current_context()
def test_nested_context(self):
"""
Nested execution context should be supported in case the user uses multiple context managers.
Each time the execute method of an operator is called, we set a new 'current' context.
This test verifies that no matter how many contexts are entered - order is preserved
"""
max_stack_depth = 15
ctx_list = []
for i in range(max_stack_depth):
# Create all contexts in ascending order
new_context = {"ContextId": i}
# Like 15 nested with statements
ctx_obj = set_current_context(new_context)
ctx_obj.__enter__() # pylint: disable=E1101
ctx_list.append(ctx_obj)
for i in reversed(range(max_stack_depth)):
# Iterate over contexts in reverse order - stack is LIFO
ctx = get_current_context()
assert ctx["ContextId"] == i
# End of with statement
ctx_list[i].__exit__(None, None, None)
class MyContextAssertOperator(BaseOperator):
def execute(self, context):
assert context == get_current_context()
def get_all_the_context(**context):
current_context = get_current_context()
assert context == current_context
@pytest.fixture()
def clear_db():
clear_db_runs()
yield
clear_db_runs()
@pytest.mark.usefixtures("clear_db")
class TestCurrentContextRuntime:
def test_context_in_task(self):
with DAG(dag_id="assert_context_dag", default_args=DEFAULT_ARGS):
op = MyContextAssertOperator(task_id="assert_context")
op.run(ignore_first_depends_on_past=True, ignore_ti_state=True)
def test_get_context_in_old_style_context_task(self):
with DAG(dag_id="edge_case_context_dag", default_args=DEFAULT_ARGS):
op = PythonOperator(python_callable=get_all_the_context, task_id="get_all_the_context")
op.run(ignore_first_depends_on_past=True, ignore_ti_state=True)
@pytest.mark.parametrize(
"choice,expected_states",
[
("task1", [State.SUCCESS, State.SUCCESS, State.SUCCESS]),
("join", [State.SUCCESS, State.SKIPPED, State.SUCCESS]),
],
)
def test_empty_branch(choice, expected_states):
"""
Tests that BranchPythonOperator handles empty branches properly.
"""
with DAG(
'test_empty_branch',
start_date=DEFAULT_DATE,
) as dag:
branch = BranchPythonOperator(task_id='branch', python_callable=lambda: choice)
task1 = DummyOperator(task_id='task1')
join = DummyOperator(task_id='join', trigger_rule="none_failed_or_skipped")
branch >> [task1, join]
task1 >> join
dag.clear(start_date=DEFAULT_DATE)
task_ids = ["branch", "task1", "join"]
tis = {}
for task_id in task_ids:
task_instance = TI(dag.get_task(task_id), execution_date=DEFAULT_DATE)
tis[task_id] = task_instance
task_instance.run()
def get_state(ti):
ti.refresh_from_db()
return ti.state
assert [get_state(tis[task_id]) for task_id in task_ids] == expected_states
| |
from copy import deepcopy
import datetime
import time
import stat
import os
import mimetypes
import email
import hashlib
from abc import ABCMeta, abstractmethod
from pymongo.cursor import Cursor
from tornado.web import Application, StaticFileHandler, HTTPError, URLSpec, RequestHandler
from tornado.template import BaseLoader, Template
from engine.utils.db import DataProvider, DEFAULT_HOST, DEFAULT_PORT
from engine.utils.dictutils import JSON, encode_data, decode_data, get_content_type, MSGPACK
from engine.utils.mathutils import random_id
from engine.utils.pathutils import norm_path, file_path
class CherryURLSpec(URLSpec):
def __init__(self, pattern, handler_class, kwargs=None, name=None, prefix=''):
if not prefix.startswith('^'):
prefix = '^{}'.format(prefix)
pattern = '{}{}'.format(prefix, pattern)
name = name or 'ch-{}'.format(random_id(8))
super(CherryURLSpec, self).__init__(pattern, handler_class, kwargs, name)
def __eq__(self, other):
if isinstance(other, str):
return self.name == other
else:
return self.name == other.name
class CherryTemplateLoader(BaseLoader):
"""This template loader can load templates from multiple locations on harddrive.
"""
def __init__(self, path, **kwargs):
super(CherryTemplateLoader, self).__init__(**kwargs)
self.path = list(map(norm_path, path))
def resolve_path(self, name, parent_path=None):
return file_path(name, self.path)
def _create_template(self, name):
with open(name, 'rb') as f:
return Template(f.read(), name=name, loader=self)
class CrossDomainHandler(RequestHandler):
def get(self, *args, **kwargs):
self.set_header('Content-type', 'text/x-cross-domain-policy')
self.write(
'<?xml version="1.0"?>\n'
'<!DOCTYPE cross-domain-policy SYSTEM "http://www.adobe.com/xml/dtds/cross-domain-policy.dtd">\n'
'<cross-domain-policy>'
'<allow-access-from domain="*" secure="false"/>'
'</cross-domain-policy>')
class CherryRequestHandler(RequestHandler):
"""This handler uses CherryTemplateLoader, so you can load templates from list of locations.
Just provide iterable with path in handler's template_path named argument.
"""
def initialize(self, templates_path=(), **kwargs):
templates_path = list(templates_path)
try:
templates_path.insert(0, self.application.settings['template_path'])
except KeyError:
pass
self.templates_path = list(map(norm_path, templates_path))
def get_template_path(self):
return 'ch-template-{}'.format(':'.join(self.templates_path))
def create_template_loader(self, template_path):
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
return CherryTemplateLoader(self.templates_path, **kwargs)
class CherryStaticHandler(StaticFileHandler):
"""This slightly modified static file handler can host files from multiple locations
"""
def initialize(self, path=(), default_filename=None):
if isinstance(path, str):
path = path,
self.path = list(map(norm_path, path))
# TODO: split this copypasted from tornado spaghetti to several methods to simplify subclassing, until they fix it.
def get(self, path, include_body=True):
try:
path = file_path(path, self.path)
except OSError:
raise HTTPError(404)
stat_result = os.stat(path)
modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
self.set_header("Last-Modified", modified)
mime_type, encoding = mimetypes.guess_type(path)
if mime_type:
self.set_header("Content-Type", mime_type)
cache_time = self.get_cache_time(path, modified, mime_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() + datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
else:
self.set_header("Cache-Control", "public")
self.set_extra_headers(path)
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
if if_since >= modified:
self.set_status(304)
return
with open(path, "rb") as f:
data = f.read()
hasher = hashlib.sha1()
hasher.update(data)
self.set_header("Etag", '"%s"' % hasher.hexdigest())
if include_body:
self.write(data)
else:
assert self.request.method == "HEAD"
self.set_header("Content-Length", len(data))
class DataHandler(RequestHandler):
# TODO: Documentation
data_format = MSGPACK
def initialize(self, data_format=None, **kwargs):
if data_format is not None:
self.data_format = data_format
def encode_data(self, data):
return encode_data(data, self.data_format)
def decode_data(self, data):
return decode_data(data.decode('utf-8'), self.data_format)
_request_data = None
def get_request_data(self):
if self._request_data is None:
try:
self._request_data = self.decode_data(self.request.body)
except (TypeError, ValueError):
self._request_data = {}
return self._request_data
_ARG_DEFAULT = []
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
try:
return self.get_request_data()[name]
except KeyError:
return super(DataHandler, self).get_argument(name, default=default, strip=strip)
def respond(self, data=None):
self.set_header('Content-Type', get_content_type(self.data_format))
self.write(self.encode_data(data))
class JSONPHandler(DataHandler):
data_format = JSON
jsonp_template = '{callback}({data});'
jsonp_callback_argument = 'jsonp_callback'
def respond(self, data=None):
callback = self.get_argument(self.jsonp_callback_argument)
data = self.jsonp_template.format(callback, self.encode_data(data))
self.set_header('Content-Type', 'text/javascript')
self.write(data)
# Some abstract handlers here
class AbstractCollectionHandler(DataHandler, metaclass=ABCMeta):
"""Abstract class for handlers which supposed to provide access to collections, stored in the DB or memory.
"""
@abstractmethod
def query_documents(self, **kwargs):
"""Implement this method with functionality to query documents from database.
Also query can be populated with some default parameters.
:param kwargs:
:return: iterable with documents in collection, matched corresponding query.
"""
def get_documents(self, **kwargs):
return self.query_documents(**kwargs)
@abstractmethod
def get_ids(self):
"""Implement this method to get all document's ids in collection.
"""
@abstractmethod
def get_document(self, _id):
"""Implement this method to get document with provided _id.
:param _id: Document's id.
:return: Matched document from collection.
:raises KeyError: If document with provided id not found in the collection.
"""
def prepare(self):
self.set_header('Expires', '0')
self.set_header('Last-Modified', datetime.datetime.now().strftime('%a, %d %m %Y %H:%M:%S') + ' GMT')
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate')
self.set_header('Cache-Control', 'pre-check=0, post-check=0, max-age=0')
self.set_header('Pragma', 'no-cache')
class CollectionHandler(AbstractCollectionHandler):
"""Mongo collection handler. Provide access to collections, stored in mongodb.
"""
data_format = JSON
def initialize(self, db=None, collection=None, host=DEFAULT_HOST, port=DEFAULT_PORT,
use_cache=False, data_format=JSON):
if db is None:
try:
db = self.db
except AttributeError:
raise AttributeError('Either set db name in handler arguments or set it as an attribute')
else:
self.db = db
if collection is None:
try:
collection = getattr(self, 'collection')
except AttributeError:
raise AttributeError('Either set collection name in handler arguments or set it as an attribute')
else:
self.collection = collection
self.data_provider = DataProvider(db, collection, host=host, port=port)
self.data_format = data_format
def query_documents(self, **kwargs):
return self.data_provider.find(**kwargs)
def get_ids(self):
return list(self.data_provider.keys())
def get_document(self, _id):
return self.data_provider.get(_id)
class CollectionDumper(CollectionHandler):
def respond(self, data=None):
if isinstance(data, Cursor):
data = list(data)
super(CollectionDumper, self).respond(data)
def get(self, *args, **kwargs):
ids = self.get_arguments('ids')
if ids:
self.respond(self.get_documents(spec={'_id': {'$in': ids}}, **kwargs))
return
keys = self.get_argument('keys', False)
if keys:
self.respond(self.get_ids())
return
self.respond(self.get_documents())
class CollectionCRUD(CollectionDumper):
def generate_document_id(self, document):
document.setdefault('_id', random_id())
return document
def save_document(self, document):
self.data_provider.save(document)
def put_document(self, document_id, document):
self.data_provider.update(document_id, {'$set': document}, upsert=True)
def delete_document(self, document_id):
self.data_provider.remove(document_id)
def post(self, *args, **kwargs):
document = decode_data(self.request.body, self.data_format)
document = self.generate_document_id(document)
self.save_document(document)
self.respond(document)
def put(self, *args, **kwargs):
document = decode_data(self.request.body, self.data_format)
document_id = document.pop('_id', kwargs['id'])
self.put_document(document_id, document)
self.respond(document)
def delete(self, *args, **kwargs):
document_id = kwargs['id']
self.delete_document(document_id)
_DEFAULT_HOST = '.*$'
def add_handler(application, spec, host=_DEFAULT_HOST):
"""Add handler to the tornado application, after it's initialized, i.e. on the fly.
It's definitely a hack, but whatever...
:param application: Tornado application where handler should be registered.
:type application: Application
:param spec: Specification for handler.
:type spec: URLSpec or tuple or list or dict
:return: URLSpec for registered handler.
"""
if isinstance(spec, (tuple, list)):
l = len(spec)
if 2 <= l <= 4:
spec = CherryURLSpec(*spec)
else:
raise AttributeError('Invalid spec')
elif isinstance(spec, dict):
spec = deepcopy(spec)
spec = CherryURLSpec(**spec)
elif not isinstance(spec, URLSpec):
raise TypeError('Invalid spec: {}'.format(spec))
app_handlers = application.handlers
# Find host, if it exists.
for current_host, handlers in reversed(app_handlers):
if current_host == host:
handlers.append(spec)
return spec
# Host is registered in application yet. Prepare, what we about to add.
adding_handlers = [host, [spec]]
# If host is default and it's not registered in the application yet, than add it to the end of the handlers list.
if host == _DEFAULT_HOST:
app_handlers.append(adding_handlers)
return spec
# Host is not found and is not default. Add it at the end of the list, but before default host, if it is exists.
last_host, last_handlers = app_handlers[-1]
if last_host == _DEFAULT_HOST:
app_handlers.insert(-1, adding_handlers)
else:
app_handlers.append(adding_handlers)
return spec
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base import serialize
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class RoleTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles(sid="RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Roles/RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "channel user",
"type": "channel",
"permissions": [
"sendMessage",
"leaveChannel",
"editOwnMessage",
"deleteOwnMessage"
],
"date_created": "2016-03-03T19:47:15Z",
"date_updated": "2016-03-03T19:47:15Z",
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles/RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles(sid="RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles(sid="RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Roles/RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles(sid="RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles.create(friendly_name="friendly_name", type="channel", permission=['permission'])
values = {
'FriendlyName': "friendly_name",
'Type': "channel",
'Permission': serialize.map(['permission'], lambda e: e),
}
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Roles',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "channel user",
"type": "channel",
"permissions": [
"sendMessage",
"leaveChannel",
"editOwnMessage",
"deleteOwnMessage"
],
"date_created": "2016-03-03T19:47:15Z",
"date_updated": "2016-03-03T19:47:15Z",
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles/RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles.create(friendly_name="friendly_name", type="channel", permission=['permission'])
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles.list()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Roles',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles?PageSize=50&Page=0",
"next_page_url": null,
"key": "roles"
},
"roles": [
{
"sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "channel user",
"type": "channel",
"permissions": [
"sendMessage",
"leaveChannel",
"editOwnMessage",
"deleteOwnMessage"
],
"date_created": "2016-03-03T19:47:15Z",
"date_updated": "2016-03-03T19:47:15Z",
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles/RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}
'''
))
actual = self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles?PageSize=50&Page=0",
"next_page_url": null,
"key": "roles"
},
"roles": []
}
'''
))
actual = self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles(sid="RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(permission=['permission'])
values = {'Permission': serialize.map(['permission'], lambda e: e), }
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Roles/RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
data=values,
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "channel user",
"type": "channel",
"permissions": [
"sendMessage",
"leaveChannel",
"editOwnMessage",
"deleteOwnMessage"
],
"date_created": "2016-03-03T19:47:15Z",
"date_updated": "2016-03-03T19:47:15Z",
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles/RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v2.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles(sid="RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(permission=['permission'])
self.assertIsNotNone(actual)
| |
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from security_monkey import rbac
from security_monkey.views import AuthenticatedService
from security_monkey.views import ITEM_FIELDS
from security_monkey.views import AUDIT_FIELDS
from security_monkey.views import ITEM_LINK_FIELDS
from security_monkey.datastore import ItemAudit
from security_monkey.datastore import Item
from security_monkey.datastore import Account
from security_monkey.datastore import AccountType
from security_monkey.datastore import Technology
from security_monkey.datastore import ItemRevision
from security_monkey.datastore import AuditorSettings
from security_monkey import AWS_DEFAULT_REGION
from flask_restful import marshal
from sqlalchemy import or_
class ItemAuditList(AuthenticatedService):
decorators = [
rbac.allow(["View"], ["GET"])
]
def get(self):
"""
.. http:get:: /api/1/issues
Get a list of Audit Issues matching the given criteria
**Example Request**:
.. sourcecode:: http
GET /api/1/issues HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
items: [
{
account: "example_account",
account_type: "AWS",
justification: null,
name: "example_name",
technology: "s3",
issue: "Example Issue",
region: AWS_DEFAULT_REGION,
score: 10,
notes: "Example Notes",
item_id: 11,
justified: false,
justified_date: null,
id: 595
}
],
total: 1,
page: 1,
auth: {
authenticated: true,
user: "user@example.com"
}
}
:statuscode 200: no error
:statuscode 401: Authentication failure. Please login.
"""
self.reqparse.add_argument('count', type=int, default=30, location='args')
self.reqparse.add_argument('page', type=int, default=1, location='args')
self.reqparse.add_argument('regions', type=str, default=None, location='args')
self.reqparse.add_argument('accounts', type=str, default=None, location='args')
self.reqparse.add_argument('accounttypes', type=str, default=None, location='args')
self.reqparse.add_argument('technologies', type=str, default=None, location='args')
self.reqparse.add_argument('names', type=str, default=None, location='args')
self.reqparse.add_argument('arns', type=str, default=None, location='args')
self.reqparse.add_argument('active', type=str, default=None, location='args')
self.reqparse.add_argument('searchconfig', type=str, default=None, location='args')
self.reqparse.add_argument('enabledonly', type=bool, default=None, location='args')
self.reqparse.add_argument('justified', type=str, default=None, location='args')
self.reqparse.add_argument('summary', type=str, default=None, location='args')
args = self.reqparse.parse_args()
page = args.pop('page', None)
count = args.pop('count', None)
for k, v in list(args.items()):
if not v:
del args[k]
query = ItemAudit.query.join("item")
query = query.filter(ItemAudit.fixed == False)
if 'regions' in args:
regions = args['regions'].split(',')
query = query.filter(Item.region.in_(regions))
if 'accounts' in args:
accounts = args['accounts'].split(',')
query = query.join((Account, Account.id == Item.account_id))
query = query.filter(Account.name.in_(accounts))
if 'accounttypes' in args:
accounttypes = args['accounttypes'].split(',')
query = query.join((Account, Account.id == Item.account_id))
query = query.join((AccountType, AccountType.id == Account.account_type_id))
query = query.filter(AccountType.name.in_(accounttypes))
if 'technologies' in args:
technologies = args['technologies'].split(',')
query = query.join((Technology, Technology.id == Item.tech_id))
query = query.filter(Technology.name.in_(technologies))
if 'names' in args:
names = args['names'].split(',')
query = query.filter(Item.name.in_(names))
if 'arns' in args:
arns = args['arns'].split(',')
query = query.filter(Item.arn.in_(arns))
if 'active' in args:
active = args['active'].lower() == "true"
query = query.join((ItemRevision, Item.latest_revision_id == ItemRevision.id))
query = query.filter(ItemRevision.active == active)
if 'searchconfig' in args:
search = args['searchconfig'].split(',')
conditions = []
for searchterm in search:
conditions.append(ItemAudit.issue.ilike('%{}%'.format(searchterm)))
conditions.append(ItemAudit.notes.ilike('%{}%'.format(searchterm)))
conditions.append(ItemAudit.justification.ilike('%{}%'.format(searchterm)))
conditions.append(Item.name.ilike('%{}%'.format(searchterm)))
query = query.filter(or_(*conditions))
if 'enabledonly' in args:
query = query.join((AuditorSettings, AuditorSettings.id == ItemAudit.auditor_setting_id))
query = query.filter(AuditorSettings.disabled == False)
if 'justified' in args:
justified = args['justified'].lower() == "true"
query = query.filter(ItemAudit.justified == justified)
if 'summary' in args:
# Summary wants to order by oldest issues
# TODO: Add date_created column to ItemAudit, and have summary order by date_created
# Order by justified_date until date_created exists
query = query.order_by(ItemAudit.justified_date.asc())
else:
query = query.order_by(ItemAudit.justified, ItemAudit.score.desc())
issues = query.paginate(page, count)
marshaled_dict = {
'page': issues.page,
'total': issues.total,
'auth': self.auth_dict
}
items_marshaled = []
for issue in issues.items:
# TODO: This MUST be modified when switching to new issue logic in future:
# Currently there should be exactly 1 item in the list of sub_items:
item_marshaled = marshal(issue.item.__dict__, ITEM_FIELDS)
issue_marshaled = marshal(issue.__dict__, AUDIT_FIELDS)
account_marshaled = {'account': issue.item.account.name}
accounttype_marshaled = {'account_type': issue.item.account.account_type.name}
technology_marshaled = {'technology': issue.item.technology.name}
links = []
for link in issue.sub_items:
item_link_marshaled = marshal(link.__dict__, ITEM_LINK_FIELDS)
links.append(item_link_marshaled)
issue_marshaled['item_links'] = links
if issue.justified:
if issue.user is not None:
issue_marshaled = dict(
list(issue_marshaled.items()) +
list({'justified_user': issue.user.email}.items()))
merged_marshaled = dict(
list(item_marshaled.items()) +
list(issue_marshaled.items()) +
list(account_marshaled.items()) +
list(accounttype_marshaled.items()) +
list(technology_marshaled.items()))
items_marshaled.append(merged_marshaled)
marshaled_dict['items'] = items_marshaled
marshaled_dict['count'] = len(items_marshaled)
return marshaled_dict, 200
class ItemAuditGet(AuthenticatedService):
decorators = [
rbac.allow(["View"], ["GET"])
]
def get(self, audit_id):
"""
.. http:get:: /api/1/issue/1234
Get a specific issue
**Example Request**:
.. sourcecode:: http
GET /api/1/issue/1234 HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
justification: null,
name: "example_name",
issue: "Example Audit Issue",
notes: "Example Notes on Audit Issue",
auth: {
authenticated: true,
user: "user@example.com"
},
score: 0,
item_id: 704,
region: AWS_DEFAULT_REGION,
justified: false,
justified_date: null,
id: 704
}
:statuscode 200: no error
:statuscode 401: Authentication Error. Please login.
"""
query = ItemAudit.query.join("item").filter(ItemAudit.id == audit_id)
result = query.first()
issue_marshaled = marshal(result, AUDIT_FIELDS)
item_marshaled = marshal(result.item, ITEM_FIELDS)
issue_marshaled = dict(
list(issue_marshaled.items()) +
list(item_marshaled.items()) +
list({'auth': self.auth_dict}.items())
)
return issue_marshaled, 200
| |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Rangeslider(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.xaxis"
_path_str = "layout.xaxis.rangeslider"
_valid_props = {
"autorange",
"bgcolor",
"bordercolor",
"borderwidth",
"range",
"thickness",
"visible",
"yaxis",
}
# autorange
# ---------
@property
def autorange(self):
"""
Determines whether or not the range slider range is computed in
relation to the input data. If `range` is provided, then
`autorange` is set to False.
The 'autorange' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the range slider.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the range slider.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the border width of the range slider.
The 'borderwidth' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# range
# -----
@property
def range(self):
"""
Sets the range of the range slider. If not set, defaults to the
full xaxis range. If the axis `type` is "log", then you must
take the log of your desired range. If the axis `type` is
"date", it should be date strings, like date data, though Date
objects and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is assigned a
serial number from zero in the order it appears.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
# thickness
# ---------
@property
def thickness(self):
"""
The height of the range slider as a fraction of the total plot
area height.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not the range slider will be visible. If
visible, perpendicular axes will be set to `fixedrange`
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
The 'yaxis' property is an instance of YAxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.rangeslider.YAxis`
- A dict of string/value properties that will be passed
to the YAxis constructor
Supported dict properties:
range
Sets the range of this axis for the
rangeslider.
rangemode
Determines whether or not the range of this
axis in the rangeslider use the same value than
in the main plot when zooming in/out. If
"auto", the autorange will be used. If "fixed",
the `range` is used. If "match", the current
range of the corresponding y-axis on the main
subplot is used.
Returns
-------
plotly.graph_objs.layout.xaxis.rangeslider.YAxis
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autorange
Determines whether or not the range slider range is
computed in relation to the input data. If `range` is
provided, then `autorange` is set to False.
bgcolor
Sets the background color of the range slider.
bordercolor
Sets the border color of the range slider.
borderwidth
Sets the border width of the range slider.
range
Sets the range of the range slider. If not set,
defaults to the full xaxis range. If the axis `type` is
"log", then you must take the log of your desired
range. If the axis `type` is "date", it should be date
strings, like date data, though Date objects and unix
milliseconds will be accepted and converted to strings.
If the axis `type` is "category", it should be numbers,
using the scale where each category is assigned a
serial number from zero in the order it appears.
thickness
The height of the range slider as a fraction of the
total plot area height.
visible
Determines whether or not the range slider will be
visible. If visible, perpendicular axes will be set to
`fixedrange`
yaxis
:class:`plotly.graph_objects.layout.xaxis.rangeslider.Y
Axis` instance or dict with compatible properties
"""
def __init__(
self,
arg=None,
autorange=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
range=None,
thickness=None,
visible=None,
yaxis=None,
**kwargs
):
"""
Construct a new Rangeslider object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.xaxis.Rangeslider`
autorange
Determines whether or not the range slider range is
computed in relation to the input data. If `range` is
provided, then `autorange` is set to False.
bgcolor
Sets the background color of the range slider.
bordercolor
Sets the border color of the range slider.
borderwidth
Sets the border width of the range slider.
range
Sets the range of the range slider. If not set,
defaults to the full xaxis range. If the axis `type` is
"log", then you must take the log of your desired
range. If the axis `type` is "date", it should be date
strings, like date data, though Date objects and unix
milliseconds will be accepted and converted to strings.
If the axis `type` is "category", it should be numbers,
using the scale where each category is assigned a
serial number from zero in the order it appears.
thickness
The height of the range slider as a fraction of the
total plot area height.
visible
Determines whether or not the range slider will be
visible. If visible, perpendicular axes will be set to
`fixedrange`
yaxis
:class:`plotly.graph_objects.layout.xaxis.rangeslider.Y
Axis` instance or dict with compatible properties
Returns
-------
Rangeslider
"""
super(Rangeslider, self).__init__("rangeslider")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.xaxis.Rangeslider
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.xaxis.Rangeslider`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autorange", None)
_v = autorange if autorange is not None else _v
if _v is not None:
self["autorange"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("range", None)
_v = range if range is not None else _v
if _v is not None:
self["range"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2011, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
import numpy as np
from .classifier import normaliselabels
import multiprocessing
__all__ = [
'gridminimise',
'gridsearch',
]
def _allassignments(options):
try:
from itertools import product
except ImportError:
def product(*args, **kwds):
# from http://docs.python.org/library/itertools.html#itertools.product
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
from itertools import repeat, izip
for ks,vs in izip(repeat(options.keys()), product(*options.values())):
yield zip(ks,vs)
def _set_options(learner, options):
for k,v in options:
learner.set_option(k,v)
class Grid1(multiprocessing.Process):
def __init__(self, learner, features, labels, measure, train_kwargs, options, folds, inq, outq):
self.learner = learner
self.features = features
self.labels = labels
self.measure = measure
self.train_kwargs = train_kwargs
self.options = options
self.folds = folds
self.inq = inq
self.outq = outq
super(Grid1, self).__init__()
def execute_one(self, index, fold):
_set_options(self.learner, self.options[index])
train, test = self.folds[fold]
model = self.learner.train(self.features[train], self.labels[train], normalisedlabels=True, **self.train_kwargs)
preds = [model.apply(f) for f in self.features[test]]
error = self.measure(self.labels[test], preds)
return error
def run(self):
try:
while True:
index,fold = self.inq.get()
if index == 'shutdown':
self.outq.close()
self.outq.join_thread()
return
error = self.execute_one(index, fold)
self.outq.put( (index, error) )
except Exception, e:
import traceback
errstr = r'''\
Error in milk.gridminimise internal
Exception was: %s
Original Traceback:
%s
(Since this was run on a different process, this is not a real stack trace).
''' % (e, traceback.format_exc())
self.outq.put( ('error', errstr) )
def gridminimise(learner, features, labels, params, measure=None, nfolds=10, return_value=False, train_kwargs=None, nprocs=None):
'''
best = gridminimise(learner, features, labels, params, measure={0/1 loss}, nfolds=10, return_value=False, nprocs=None)
best, value = gridminimise(learner, features, labels, params, measure={0/1 loss}, nfolds=10, return_value=True, nprocs=None)
Grid search for the settings of parameters that maximises a given measure
This function is equivalent to searching the grid, but does not actually
search the whole grid.
Parameters
----------
learner : a classifier object
features : sequence of features
labels : sequence of labels
params : dictionary of sequences
keys are the options to change,
values are sequences of corresponding elements to try
measure : function, optional
a function that takes labels and outputs and returns the loss.
Default: 0/1 loss. This must be an *additive* function.
nfolds : integer, optional
nr of folds to run, default: 10
return_value : boolean, optional
Whether to return the error value as well. Default False
train_kwargs : dict, optional
Options that are passed to the train() method of the classifier, using
the ``train(features, labels, **train_kwargs)`` syntax. Defaults to {}.
nprocs : integer, optional
Number of processors to use. By default, uses the
``milk.utils.parallel`` framework to check the number of
processors.
Returns
-------
best : a sequence of assignments
value : float
Only returned if ``return_value`` is true
'''
# The algorithm is as follows:
#
# for all assignments: error = 0, next_iteration = 0
#
# at each iteration:
# look for assignment with smallest error
# if that is done: return it
# else: perform one more iteration
#
# When the function returns, that assignment has the lowest error of all
# assignments and all the iterations are done. Therefore, other assignments
# could only be worse even if we never computed the whole error!
from ..measures.nfoldcrossvalidation import foldgenerator
from ..utils import parallel
if measure is None:
from ..measures.measures import zero_one_loss
measure = zero_one_loss
if train_kwargs is None:
train_kwargs = {}
try:
features = np.asanyarray(features)
except:
features = np.array(features, dtype=object)
labels,_ = normaliselabels(labels)
options = list(_allassignments(params))
iteration = np.zeros(len(options), int)
error = np.zeros(len(options), float)
folds = [(Tr.copy(), Te.copy()) for Tr,Te in foldgenerator(labels, nfolds)]
# foldgenerator might actually decide on a smaller number of folds,
# depending on the distribution of class sizes:
nfolds = len(folds)
assert nfolds
if nprocs is None:
nprocs = len(options)
else:
nprocs = min(nprocs, len(options))
assert nprocs > 0, 'milk.supervised.gridminimise: nprocs <= 0!!'
nprocs = parallel.get_procs(nprocs, use_current=True)
executing = set()
workers = []
if nprocs > 1:
inqueue = multiprocessing.Queue()
outqueue = multiprocessing.Queue()
for i in xrange(nprocs):
inqueue.put((i,0))
executing.add(i)
w = Grid1(learner, features, labels, measure, train_kwargs, options, folds, inqueue, outqueue)
w.start()
workers.append(w)
getnext = outqueue.get
queuejob = lambda next, fold: inqueue.put( (next, fold) )
else:
worker = Grid1(learner, features, labels, measure, train_kwargs, options, folds, None, None)
queue = []
def queuejob(index,fold):
queue.append((index,fold))
def getnext():
index,fold = queue.pop()
return index, worker.execute_one(index,fold)
queuejob(0,0)
executing.add(0)
try:
while True:
p,err = getnext()
if p == 'error':
raise RuntimeError(err)
executing.remove(p)
iteration[p] += 1
error[p] += err
for best in np.where(error == error.min())[0]:
if iteration[best] == nfolds:
if return_value:
return options[best], error[best]
return options[best]
for next in error.argsort():
if iteration[next] < nfolds and next not in executing:
executing.add(next)
queuejob(next, iteration[next])
break
finally:
assert np.max(iteration) <= nfolds
if len(workers):
for w in workers:
inqueue.put( ('shutdown', None) )
inqueue.close()
inqueue.join_thread()
for w in workers:
w.join()
parallel.release_procs(nprocs, count_current=True)
class gridsearch(object):
'''
G = gridsearch(base, measure=accuracy, nfolds=10, params={ param1 : [...], param2 : [...]}, annotate=False)
Perform a grid search for the best parameter values.
When G.train() is called, then for each combination of p1 in param1, p2 in
param2, ... it performs (effectively)::
base.param1 = p1
base.param2 = p2
...
value[p1, p2,...] = measure(nfoldcrossvalidation(..., learner=base))
it then picks the highest set of parameters and re-learns a model on the
whole data.
Parameters
-----------
base : classifier to use
measure : function, optional
a function that takes labels and outputs and returns the loss.
Default: 0/1 loss. This must be an *additive* function.
nfolds : integer, optional
Nr of folds
params : dictionary
annotate : boolean
Whether to annotate the returned model with ``arguments`` and ``value``
fields with the result of cross-validation. Defaults to False.
All of the above can be *passed as parameters to the constructor or set as
attributes*.
See Also
--------
gridminimise : function
Implements the basic functionality behind this object
'''
def __init__(self, base, measure=None, nfolds=10, params={}, annotate=False):
self.params = params
self.base = base
self.nfolds = 10
self.measure = measure
self.annotate = annotate
def is_multi_class(self):
return self.base.is_multi_class()
def train(self, features, labels, normalisedlabels=False, **kwargs):
best,value = gridminimise(self.base, features, labels, self.params, self.measure, self.nfolds, return_value=True, train_kwargs=kwargs)
_set_options(self.base, best)
model = self.base.train(features, labels, normalisedlabels=normalisedlabels, **kwargs)
if self.annotate:
model.arguments = best
model.value = value
return model
| |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import copy
import datetime
import json
import traceback
from typing import TYPE_CHECKING
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QTextCharFormat, QBrush, QFont
from PyQt5.QtWidgets import (QDialog, QLabel, QPushButton, QHBoxLayout, QVBoxLayout,
QTextEdit, QFrame)
import qrcode
from qrcode import exceptions
from electrum.bitcoin import base_encode
from electrum.i18n import _
from electrum.plugin import run_hook
from electrum import simple_config
from electrum.util import bfh
from electrum.transaction import SerializationError, Transaction
from electrum.logging import get_logger
from .util import (MessageBoxMixin, read_QIcon, Buttons, CopyButton,
MONOSPACE_FONT, ColorScheme, ButtonsLineEdit)
if TYPE_CHECKING:
from .main_window import ElectrumWindow
SAVE_BUTTON_ENABLED_TOOLTIP = _("Save transaction offline")
SAVE_BUTTON_DISABLED_TOOLTIP = _("Please sign this transaction in order to save it")
_logger = get_logger(__name__)
dialogs = [] # Otherwise python randomly garbage collects the dialogs...
def show_transaction(tx, parent, desc=None, prompt_if_unsaved=False):
try:
d = TxDialog(tx, parent, desc, prompt_if_unsaved)
except SerializationError as e:
_logger.exception('unable to deserialize the transaction')
parent.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
else:
dialogs.append(d)
d.show()
class TxDialog(QDialog, MessageBoxMixin):
def __init__(self, tx, parent, desc, prompt_if_unsaved):
'''Transactions in the wallet will show their description.
Pass desc to give a description for txs not yet in the wallet.
'''
# We want to be a top-level window
QDialog.__init__(self, parent=None)
# Take a copy; it might get updated in the main window by
# e.g. the FX plugin. If this happens during or after a long
# sign operation the signatures are lost.
self.tx = tx = copy.deepcopy(tx) # type: Transaction
try:
self.tx.deserialize()
except BaseException as e:
raise SerializationError(e)
self.main_window = parent # type: ElectrumWindow
self.wallet = parent.wallet
self.prompt_if_unsaved = prompt_if_unsaved
self.saved = False
self.desc = desc
# if the wallet can populate the inputs with more info, do it now.
# as a result, e.g. we might learn an imported address tx is segwit,
# in which case it's ok to display txid
tx.add_inputs_info(self.wallet)
self.setMinimumWidth(950)
self.setWindowTitle(_("Transaction"))
vbox = QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(QLabel(_("Transaction ID:")))
self.tx_hash_e = ButtonsLineEdit()
qr_show = lambda: parent.show_qrcode(str(self.tx_hash_e.text()), 'Transaction ID', parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.tx_hash_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.tx_hash_e.setReadOnly(True)
vbox.addWidget(self.tx_hash_e)
self.add_tx_stats(vbox)
vbox.addSpacing(10)
self.add_io(vbox)
self.sign_button = b = QPushButton(_("Sign"))
b.clicked.connect(self.sign)
self.broadcast_button = b = QPushButton(_("Broadcast"))
b.clicked.connect(self.do_broadcast)
self.save_button = b = QPushButton(_("Save"))
save_button_disabled = not tx.is_complete()
b.setDisabled(save_button_disabled)
if save_button_disabled:
b.setToolTip(SAVE_BUTTON_DISABLED_TOOLTIP)
else:
b.setToolTip(SAVE_BUTTON_ENABLED_TOOLTIP)
b.clicked.connect(self.save)
self.export_button = b = QPushButton(_("Export"))
b.clicked.connect(self.export)
self.cancel_button = b = QPushButton(_("Close"))
b.clicked.connect(self.close)
b.setDefault(True)
self.qr_button = b = QPushButton()
b.setIcon(read_QIcon(qr_icon))
b.clicked.connect(self.show_qr)
self.copy_button = CopyButton(lambda: str(self.tx), parent.app)
# Action buttons
self.buttons = [self.sign_button, self.broadcast_button, self.cancel_button]
# Transaction sharing buttons
self.sharing_buttons = [self.copy_button, self.qr_button, self.export_button, self.save_button]
run_hook('transaction_dialog', self)
hbox = QHBoxLayout()
hbox.addLayout(Buttons(*self.sharing_buttons))
hbox.addStretch(1)
hbox.addLayout(Buttons(*self.buttons))
vbox.addLayout(hbox)
self.update()
def do_broadcast(self):
self.main_window.push_top_level_window(self)
try:
self.main_window.broadcast_transaction(self.tx, self.desc)
finally:
self.main_window.pop_top_level_window(self)
self.saved = True
self.update()
def closeEvent(self, event):
if (self.prompt_if_unsaved and not self.saved
and not self.question(_('This transaction is not saved. Close anyway?'), title=_("Warning"))):
event.ignore()
else:
event.accept()
try:
dialogs.remove(self)
except ValueError:
pass # was not in list already
def reject(self):
# Override escape-key to close normally (and invoke closeEvent)
self.close()
def show_qr(self):
text = bfh(str(self.tx))
text = base_encode(text, base=43)
try:
self.main_window.show_qrcode(text, 'Transaction', parent=self)
except qrcode.exceptions.DataOverflowError:
self.show_error(_('Failed to display QR code.') + '\n' +
_('Transaction is too large in size.'))
except Exception as e:
self.show_error(_('Failed to display QR code.') + '\n' + repr(e))
def sign(self):
def sign_done(success):
# note: with segwit we could save partially signed tx, because they have a txid
if self.tx.is_complete():
self.prompt_if_unsaved = True
self.saved = False
self.save_button.setDisabled(False)
self.save_button.setToolTip(SAVE_BUTTON_ENABLED_TOOLTIP)
self.update()
self.main_window.pop_top_level_window(self)
self.sign_button.setDisabled(True)
self.main_window.push_top_level_window(self)
self.main_window.sign_tx(self.tx, sign_done)
def save(self):
self.main_window.push_top_level_window(self)
if self.main_window.save_transaction_into_wallet(self.tx):
self.save_button.setDisabled(True)
self.saved = True
self.main_window.pop_top_level_window(self)
def export(self):
name = 'signed_%s.txn' % (self.tx.txid()[0:8]) if self.tx.is_complete() else 'unsigned.txn'
fileName = self.main_window.getSaveFileName(_("Select where to save your signed transaction"), name, "*.txn")
if fileName:
with open(fileName, "w+") as f:
f.write(json.dumps(self.tx.as_dict(), indent=4) + '\n')
self.show_message(_("Transaction exported successfully"))
self.saved = True
def update(self):
desc = self.desc
base_unit = self.main_window.base_unit()
format_amount = self.main_window.format_amount
tx_details = self.wallet.get_tx_info(self.tx)
tx_mined_status = tx_details.tx_mined_status
exp_n = tx_details.mempool_depth_bytes
amount, fee = tx_details.amount, tx_details.fee
size = self.tx.estimated_size()
self.broadcast_button.setEnabled(tx_details.can_broadcast)
can_sign = not self.tx.is_complete() and \
(self.wallet.can_sign(self.tx) or bool(self.main_window.tx_external_keypairs))
self.sign_button.setEnabled(can_sign)
self.tx_hash_e.setText(tx_details.txid or _('Unknown'))
if desc is None:
self.tx_desc.hide()
else:
self.tx_desc.setText(_("Description") + ': ' + desc)
self.tx_desc.show()
self.status_label.setText(_('Status:') + ' ' + tx_details.status)
if tx_mined_status.timestamp:
time_str = datetime.datetime.fromtimestamp(tx_mined_status.timestamp).isoformat(' ')[:-3]
self.date_label.setText(_("Date: {}").format(time_str))
self.date_label.show()
elif exp_n:
text = '%.2f MB'%(exp_n/1000000)
self.date_label.setText(_('Position in mempool: {} from tip').format(text))
self.date_label.show()
else:
self.date_label.hide()
self.locktime_label.setText(f"LockTime: {self.tx.locktime}")
self.rbf_label.setText(f"RBF: {not self.tx.is_final()}")
if tx_mined_status.header_hash:
self.block_hash_label.setText(_("Included in block: {}")
.format(tx_mined_status.header_hash))
self.block_height_label.setText(_("At block height: {}")
.format(tx_mined_status.height))
else:
self.block_hash_label.hide()
self.block_height_label.hide()
if amount is None:
amount_str = _("Transaction unrelated to your wallet")
elif amount > 0:
amount_str = _("Amount received:") + ' %s'% format_amount(amount) + ' ' + base_unit
else:
amount_str = _("Amount sent:") + ' %s'% format_amount(-amount) + ' ' + base_unit
size_str = _("Size:") + ' %d bytes'% size
fee_str = _("Fee") + ': %s' % (format_amount(fee) + ' ' + base_unit if fee is not None else _('unknown'))
if fee is not None:
fee_rate = fee/size*1000
fee_str += ' ( %s ) ' % self.main_window.format_fee_rate(fee_rate)
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee_rate > feerate_warning:
fee_str += ' - ' + _('Warning') + ': ' + _("high fee") + '!'
self.amount_label.setText(amount_str)
self.fee_label.setText(fee_str)
self.size_label.setText(size_str)
run_hook('transaction_dialog_update', self)
def add_io(self, vbox):
vbox.addWidget(QLabel(_("Inputs") + ' (%d)'%len(self.tx.inputs())))
ext = QTextCharFormat()
rec = QTextCharFormat()
rec.setBackground(QBrush(ColorScheme.GREEN.as_color(background=True)))
rec.setToolTip(_("Wallet receive address"))
chg = QTextCharFormat()
chg.setBackground(QBrush(ColorScheme.YELLOW.as_color(background=True)))
chg.setToolTip(_("Wallet change address"))
twofactor = QTextCharFormat()
twofactor.setBackground(QBrush(ColorScheme.BLUE.as_color(background=True)))
twofactor.setToolTip(_("TrustedCoin (2FA) fee for the next batch of transactions"))
def text_format(addr):
if self.wallet.is_mine(addr):
return chg if self.wallet.is_change(addr) else rec
elif self.wallet.is_billing_address(addr):
return twofactor
return ext
def format_amount(amt):
return self.main_window.format_amount(amt, whitespaces=True)
i_text = QTextEditWithDefaultSize()
i_text.setFont(QFont(MONOSPACE_FONT))
i_text.setReadOnly(True)
cursor = i_text.textCursor()
for x in self.tx.inputs():
if x['type'] == 'coinbase':
cursor.insertText('coinbase')
else:
prevout_hash = x.get('prevout_hash')
prevout_n = x.get('prevout_n')
cursor.insertText(prevout_hash + ":%-4d " % prevout_n, ext)
addr = self.wallet.get_txin_address(x)
if addr is None:
addr = ''
cursor.insertText(addr, text_format(addr))
if x.get('value'):
cursor.insertText(format_amount(x['value']), ext)
cursor.insertBlock()
vbox.addWidget(i_text)
vbox.addWidget(QLabel(_("Outputs") + ' (%d)'%len(self.tx.outputs())))
o_text = QTextEditWithDefaultSize()
o_text.setFont(QFont(MONOSPACE_FONT))
o_text.setReadOnly(True)
cursor = o_text.textCursor()
for o in self.tx.get_outputs_for_UI():
addr, v = o.address, o.value
cursor.insertText(addr, text_format(addr))
if v is not None:
cursor.insertText('\t', ext)
cursor.insertText(format_amount(v), ext)
cursor.insertBlock()
vbox.addWidget(o_text)
def add_tx_stats(self, vbox):
hbox_stats = QHBoxLayout()
# left column
vbox_left = QVBoxLayout()
self.tx_desc = TxDetailLabel(word_wrap=True)
vbox_left.addWidget(self.tx_desc)
self.status_label = TxDetailLabel()
vbox_left.addWidget(self.status_label)
self.date_label = TxDetailLabel()
vbox_left.addWidget(self.date_label)
self.amount_label = TxDetailLabel()
vbox_left.addWidget(self.amount_label)
self.fee_label = TxDetailLabel()
vbox_left.addWidget(self.fee_label)
vbox_left.addStretch(1)
hbox_stats.addLayout(vbox_left, 50)
# vertical line separator
line_separator = QFrame()
line_separator.setFrameShape(QFrame.VLine)
line_separator.setFrameShadow(QFrame.Sunken)
line_separator.setLineWidth(1)
hbox_stats.addWidget(line_separator)
# right column
vbox_right = QVBoxLayout()
self.size_label = TxDetailLabel()
vbox_right.addWidget(self.size_label)
self.rbf_label = TxDetailLabel()
vbox_right.addWidget(self.rbf_label)
self.locktime_label = TxDetailLabel()
vbox_right.addWidget(self.locktime_label)
self.block_hash_label = TxDetailLabel(word_wrap=True)
vbox_right.addWidget(self.block_hash_label)
self.block_height_label = TxDetailLabel()
vbox_right.addWidget(self.block_height_label)
vbox_right.addStretch(1)
hbox_stats.addLayout(vbox_right, 50)
vbox.addLayout(hbox_stats)
class QTextEditWithDefaultSize(QTextEdit):
def sizeHint(self):
return QSize(0, 100)
class TxDetailLabel(QLabel):
def __init__(self, *, word_wrap=None):
super().__init__()
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
if word_wrap is not None:
self.setWordWrap(word_wrap)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import inspect
import itertools
import os
import tempfile
import time
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_setter
from tensorflow.python.training import saver
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
class ModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `INFER`: inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
INFER = 'infer'
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
df = data_feeder.setup_train_data_feeder(x, y, n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
return input_fn, feed_fn
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
target - `Tensor` of target objects.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = ['Variable', 'AutoReloadVariable',
'MutableHashTable', 'MutableHashTableOfTensors']
if config.job_name:
worker_device = '/job:%s/task:%d' % (config.job_name, config.task)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=False, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, targets, predictions):
"""Add metrics to run on features, targets, and predictions dicts or tensors.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that predictions and targets are single tensors), or to
a `(pred_name, metric)` tuples, which passes `predictions[pred_name]` and
targets to `metric` (assuming targets is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
targets: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
features/targets/predictions provided. Mostly, a dict is given but no
pred_name specified.
"""
metrics = metrics or {}
if isinstance(targets, dict) and len(targets) == 1:
# Unpack single target into just tensor.
targets = targets[list(targets.keys())[0]]
result = {}
for name, metric in six.iteritems(metrics):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, targets, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: targets are single Tensor or a dict.
if isinstance(targets, dict) and name[1] in targets:
# If targets are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], targets[name[1]])
else:
# Otherwise pass the targets to the metric.
result[name[0]] = metric(predictions[name[1]], targets)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Targets: %s.' % (metrics, targets))
result[name] = metric(predictions, targets)
return result
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Concrete implementation of this class should provide the following functions:
* _get_train_ops
* _get_eval_ops
* _get_predict_ops
`Estimator` implemented below is a good example of how to use this class.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: A RunConfig instance.
"""
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration
if config is None:
self._config = BaseEstimator._Config()
logging.warning('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and targets TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._targets_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
input_fn, feed_fn = _get_input_fn(x, y, input_fn, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
loss = self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitors=monitors,
max_steps=max_steps)
logging.info('Loss for final step: %s.', loss)
return self
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
'for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
def evaluate(
self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None,
steps=None, metrics=None, name=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
input_fn, feed_fn = _get_input_fn(x, y, input_fn=input_fn,
feed_fn=feed_fn, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_arg_values(
AS_ITERABLE_DATE, AS_ITERABLE_INSTRUCTIONS, as_iterable=False)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=False):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=input_fn, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
return self._infer_model(
input_fn=input_fn, feed_fn=feed_fn, outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated_arg_values(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate. '
'input_fn and input_feature_key will become required args, '
'and use_deprecated_input_fn will default to False and be removed '
'altogether.',
use_deprecated_input_fn=True,
input_fn=None,
input_feature_key=None)
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, targets), where features is a dict of
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input.
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
@abc.abstractproperty
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
predictions: `Tensor` or `dict` of `Tensor` objects.
"""
pass
def _get_eval_ops(self, features, targets, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which targets with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
metrics: `dict` of `Tensor` objects.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, targets):
if self._features_info is not None:
logging.warning('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.info('Setting feature info to %s', str(self._features_info))
if targets is not None:
if self._targets_info is not None:
logging.warning('Given targets: %s, required signatures: %s.',
str(targets), str(self._targets_info))
if not tensor_signature.tensors_compatible(targets, self._targets_info):
raise ValueError('Targets are incompatible with given information. '
'Given targets: %s, required signatures: %s.' %
(str(targets), str(self._targets_info)))
else:
self._targets_info = tensor_signature.create_signatures(targets)
logging.info('Setting targets info to %s', str(self._targets_info))
def _train_model(self,
input_fn,
steps,
feed_fn=None,
init_op=None,
init_feed_fn=None,
init_fn=None,
device_fn=None,
monitors=None,
log_every_steps=100,
fail_on_nan_loss=True,
max_steps=None):
# TODO(wicke): Remove this once Model and associated code are gone.
if hasattr(self._config, 'execution_mode'):
if self._config.execution_mode not in ('all', 'train'):
return
# Stagger startup of worker sessions based on task id.
sleep_secs = min(
self._config.training_worker_max_startup_secs,
self._config.task *
self._config.training_worker_session_startup_stagger_secs)
if sleep_secs:
logging.info('Waiting %d secs before starting task %d.', sleep_secs,
self._config.task)
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or self._device_fn
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, targets = input_fn()
self._check_inputs(features, targets)
train_op, loss_op = self._get_train_ops(features, targets)
# Add default monitors.
if monitors is None:
monitors = []
hooks = [m for m in monitors
if isinstance(m, session_run_hook.SessionRunHook)]
deprecated_monitors = [
m for m in monitors
if not isinstance(m, session_run_hook.SessionRunHook)
]
supervisor_is_chief = self._config.is_chief
if not supervisor_is_chief:
# Prune list of monitor to the ones runnable on all workers.
deprecated_monitors = [m for m in deprecated_monitors
if m.run_on_all_workers]
# Setup monitors.
for monitor in deprecated_monitors:
monitor.set_estimator(self)
if deprecated_monitors:
hooks.append(monitor_lib.RunHookAdapterForMonitors(deprecated_monitors))
return graph_actions._monitored_train( # pylint: disable=protected-access
graph=g,
output_dir=self._model_dir,
train_op=train_op,
loss_op=loss_op,
global_step_tensor=global_step,
init_op=init_op,
init_feed_dict=init_feed_fn() if init_feed_fn is not None else None,
init_fn=init_fn,
log_every_steps=log_every_steps,
supervisor_is_chief=supervisor_is_chief,
supervisor_master=self._config.master,
supervisor_save_model_secs=self._config.save_checkpoints_secs,
supervisor_save_summaries_steps=self._config.save_summary_steps,
keep_checkpoint_max=self._config.keep_checkpoint_max,
feed_fn=feed_fn,
steps=steps,
fail_on_nan_loss=fail_on_nan_loss,
hooks=hooks,
max_steps=max_steps)
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in eval_dict.items():
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name=''):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained.
checkpoint_path = self._model_dir
latest_path = saver.latest_checkpoint(checkpoint_path)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% checkpoint_path)
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, targets = input_fn()
self._check_inputs(features, targets)
eval_dict = self._get_eval_ops(features, targets, metrics)
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
eval_results, current_global_step = graph_actions.evaluate(
graph=g,
output_dir=eval_dir,
checkpoint_path=checkpoint_path,
eval_dict=eval_dict,
update_op=update_op,
global_step_tensor=global_step,
supervisor_master=self._config.evaluation_master,
feed_fn=feed_fn,
max_steps=steps)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(
self, input_fn, feed_fn=None, outputs=None, as_iterable=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
predictions = self._get_predict_ops(features)
# If predictions is single output - wrap it into dict, and remember to
# return not a dict.
return_dict = isinstance(predictions, dict)
if not return_dict:
predictions = {'predictions': predictions}
# Filter what to run predictions on, if outputs provided.
if outputs:
existing_keys = predictions.keys()
predictions = {
key: value for key, value in predictions.items() if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
if as_iterable:
return self._infer_model_as_iterable(
checkpoint_path, predictions, feed_fn, return_dict)
else:
return self._infer_model_single(
checkpoint_path, predictions, feed_fn, return_dict)
def _infer_model_single(
self, checkpoint_path, predictions, feed_fn, return_dict):
if feed_fn is None:
preds = graph_actions.infer(checkpoint_path, predictions)
else:
def _feed_fn():
while True:
yield feed_fn()
outputs = graph_actions.run_feeds(
output_dict=predictions,
feed_dicts=_feed_fn(),
restore_checkpoint_path=checkpoint_path)
preds = {
key: np.concatenate([output[key] for output in outputs], axis=0)
for key in predictions}
return preds if return_dict else preds['predictions']
def _infer_model_as_iterable(
self, checkpoint_path, predictions, feed_fn, return_dict):
if feed_fn is None:
feed_dicts = itertools.repeat(None)
else:
def _feed_fn():
while True:
yield feed_fn()
feed_dicts = _feed_fn()
try:
for output_batch in graph_actions.run_feeds_iter(
output_dict=predictions,
feed_dicts=feed_dicts,
restore_checkpoint_path=checkpoint_path):
# Unpack batches into individual predictions
if return_dict:
batch_length = list(output_batch.values())[0].shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in output_batch.items()}
else:
for pred in output_batch['predictions']:
yield pred
except errors.OutOfRangeError:
# We fall out of the above loop naturally if feed_fn raises StopIteration,
# or we catch an OutOfRangeError if we've reached the end of inputs.
logging.info('Reached end of inputs for predict_iter.')
def _identity_feature_engineering_fn(features, targets):
return features, targets
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an Estimator instance.
Args:
model_fn: Model function, takes features and targets tensors or dicts of
tensors and returns predictions and loss tensors.
Supports next three signatures for the function:
* `(features, targets) -> (predictions, loss, train_op)`
* `(features, targets, mode) -> (predictions, loss, train_op)`
* `(features, targets, mode, params) -> (predictions, loss, train_op)`
Where
* `features` are single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `targets` are `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `targets=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`targets=None`.
* `mode` represents if this training, evaluation or
prediction. See `ModeKeys`.
* `params` is a `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tunning.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and targets.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) has includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, targets, mode):
"""Calls model function with support of 2, 3 or 4 arguments."""
features, targets = self._feature_engineering_fn(features, targets)
model_fn_args = _get_arguments(self._model_fn)
if 'mode' in model_fn_args:
if 'params' in model_fn_args:
return self._model_fn(features, targets, mode=mode, params=self.params)
else:
return self._model_fn(features, targets, mode=mode)
return self._model_fn(features, targets)
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
_, loss, train_op = self._call_model_fn(features, targets, ModeKeys.TRAIN)
return train_op, loss
def _get_eval_ops(self, features, targets, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which targets with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
metrics: `dict` of `Tensor` objects.
Raises:
ValueError: if `metrics` don't match `targets`.
"""
predictions, loss, _ = self._call_model_fn(features, targets, ModeKeys.EVAL)
result = {'loss': metrics_lib.streaming_mean(loss)}
result.update(_make_metrics_ops(metrics, features, targets, predictions))
return result
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
predictions: `Tensor` or `dict` of `Tensor` objects.
"""
targets = tensor_signature.create_placeholders_from_signatures(
self._targets_info)
predictions, _, _ = self._call_model_fn(features, targets, ModeKeys.INFER)
return predictions
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class policyhttpcallout(base_resource) :
""" Configuration for HTTP callout resource. """
def __init__(self) :
self._name = ""
self._ipaddress = ""
self._port = 0
self._vserver = ""
self._returntype = ""
self._httpmethod = ""
self._hostexpr = ""
self._urlstemexpr = ""
self._headers = []
self._parameters = []
self._bodyexpr = ""
self._fullreqexpr = ""
self._scheme = ""
self._resultexpr = ""
self._cacheforsecs = 0
self._comment = ""
self._hits = 0
self._undefhits = 0
self._svrstate = ""
self._undefreason = ""
self._recursivecallout = 0
self.___count = 0
@property
def name(self) :
"""Name for the HTTP callout. Not case sensitive. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Must not begin with 're' or 'xp' or be a word reserved for use as a default syntax expression qualifier prefix (such as HTTP) or enumeration value (such as ASCII). Must not be the name of an existing named expression, pattern set, dataset, stringmap, or HTTP callout.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the HTTP callout. Not case sensitive. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Must not begin with 're' or 'xp' or be a word reserved for use as a default syntax expression qualifier prefix (such as HTTP) or enumeration value (such as ASCII). Must not be the name of an existing named expression, pattern set, dataset, stringmap, or HTTP callout.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ipaddress(self) :
"""IP Address of the server (callout agent) to which the callout is sent. Can be an IPv4 or IPv6 address.
Mutually exclusive with the Virtual Server parameter. Therefore, you cannot set the <IP Address, Port> and the Virtual Server in the same HTTP callout.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
"""IP Address of the server (callout agent) to which the callout is sent. Can be an IPv4 or IPv6 address.
Mutually exclusive with the Virtual Server parameter. Therefore, you cannot set the <IP Address, Port> and the Virtual Server in the same HTTP callout.
"""
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
@property
def port(self) :
"""Server port to which the HTTP callout agent is mapped. Mutually exclusive with the Virtual Server parameter. Therefore, you cannot set the <IP Address, Port> and the Virtual Server in the same HTTP callout.<br/>Minimum length = 1.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
"""Server port to which the HTTP callout agent is mapped. Mutually exclusive with the Virtual Server parameter. Therefore, you cannot set the <IP Address, Port> and the Virtual Server in the same HTTP callout.<br/>Minimum length = 1
"""
try :
self._port = port
except Exception as e:
raise e
@property
def vserver(self) :
"""Name of the load balancing, content switching, or cache redirection virtual server (the callout agent) to which the HTTP callout is sent. The service type of the virtual server must be HTTP. Mutually exclusive with the IP address and port parameters. Therefore, you cannot set the <IP Address, Port> and the Virtual Server in the same HTTP callout.<br/>Minimum length = 1.
"""
try :
return self._vserver
except Exception as e:
raise e
@vserver.setter
def vserver(self, vserver) :
"""Name of the load balancing, content switching, or cache redirection virtual server (the callout agent) to which the HTTP callout is sent. The service type of the virtual server must be HTTP. Mutually exclusive with the IP address and port parameters. Therefore, you cannot set the <IP Address, Port> and the Virtual Server in the same HTTP callout.<br/>Minimum length = 1
"""
try :
self._vserver = vserver
except Exception as e:
raise e
@property
def returntype(self) :
"""Type of data that the target callout agent returns in response to the callout.
Available settings function as follows:
* TEXT - Treat the returned value as a text string.
* NUM - Treat the returned value as a number.
* BOOL - Treat the returned value as a Boolean value.
Note: You cannot change the return type after it is set.<br/>Possible values = BOOL, NUM, TEXT.
"""
try :
return self._returntype
except Exception as e:
raise e
@returntype.setter
def returntype(self, returntype) :
"""Type of data that the target callout agent returns in response to the callout.
Available settings function as follows:
* TEXT - Treat the returned value as a text string.
* NUM - Treat the returned value as a number.
* BOOL - Treat the returned value as a Boolean value.
Note: You cannot change the return type after it is set.<br/>Possible values = BOOL, NUM, TEXT
"""
try :
self._returntype = returntype
except Exception as e:
raise e
@property
def httpmethod(self) :
"""Method used in the HTTP request that this callout sends. Mutually exclusive with the full HTTP request expression.<br/>Possible values = GET, POST.
"""
try :
return self._httpmethod
except Exception as e:
raise e
@httpmethod.setter
def httpmethod(self, httpmethod) :
"""Method used in the HTTP request that this callout sends. Mutually exclusive with the full HTTP request expression.<br/>Possible values = GET, POST
"""
try :
self._httpmethod = httpmethod
except Exception as e:
raise e
@property
def hostexpr(self) :
"""Default Syntax string expression to configure the Host header. Can contain a literal value (for example, 10.101.10.11) or a derived value (for example, http.req.header("Host")). The literal value can be an IP address or a fully qualified domain name. Mutually exclusive with the full HTTP request expression.<br/>Minimum length = 1.
"""
try :
return self._hostexpr
except Exception as e:
raise e
@hostexpr.setter
def hostexpr(self, hostexpr) :
"""Default Syntax string expression to configure the Host header. Can contain a literal value (for example, 10.101.10.11) or a derived value (for example, http.req.header("Host")). The literal value can be an IP address or a fully qualified domain name. Mutually exclusive with the full HTTP request expression.<br/>Minimum length = 1
"""
try :
self._hostexpr = hostexpr
except Exception as e:
raise e
@property
def urlstemexpr(self) :
"""Default Syntax string expression for generating the URL stem. Can contain a literal string (for example, "/mysite/index.html") or an expression that derives the value (for example, http.req.url). Mutually exclusive with the full HTTP request expression.<br/>Minimum length = 1.
"""
try :
return self._urlstemexpr
except Exception as e:
raise e
@urlstemexpr.setter
def urlstemexpr(self, urlstemexpr) :
"""Default Syntax string expression for generating the URL stem. Can contain a literal string (for example, "/mysite/index.html") or an expression that derives the value (for example, http.req.url). Mutually exclusive with the full HTTP request expression.<br/>Minimum length = 1
"""
try :
self._urlstemexpr = urlstemexpr
except Exception as e:
raise e
@property
def headers(self) :
"""One or more headers to insert into the HTTP request. Each header is specified as "name(expr)", where expr is a default syntax expression that is evaluated at runtime to provide the value for the named header. You can configure a maximum of eight headers for an HTTP callout. Mutually exclusive with the full HTTP request expression.
"""
try :
return self._headers
except Exception as e:
raise e
@headers.setter
def headers(self, headers) :
"""One or more headers to insert into the HTTP request. Each header is specified as "name(expr)", where expr is a default syntax expression that is evaluated at runtime to provide the value for the named header. You can configure a maximum of eight headers for an HTTP callout. Mutually exclusive with the full HTTP request expression.
"""
try :
self._headers = headers
except Exception as e:
raise e
@property
def parameters(self) :
"""One or more query parameters to insert into the HTTP request URL (for a GET request) or into the request body (for a POST request). Each parameter is specified as "name(expr)", where expr is an default syntax expression that is evaluated at run time to provide the value for the named parameter (name=value). The parameter values are URL encoded. Mutually exclusive with the full HTTP request expression.
"""
try :
return self._parameters
except Exception as e:
raise e
@parameters.setter
def parameters(self, parameters) :
"""One or more query parameters to insert into the HTTP request URL (for a GET request) or into the request body (for a POST request). Each parameter is specified as "name(expr)", where expr is an default syntax expression that is evaluated at run time to provide the value for the named parameter (name=value). The parameter values are URL encoded. Mutually exclusive with the full HTTP request expression.
"""
try :
self._parameters = parameters
except Exception as e:
raise e
@property
def bodyexpr(self) :
"""An advanced string expression for generating the body of the request. The expression can contain a literal string or an expression that derives the value (for example, client.ip.src). Mutually exclusive with -fullReqExpr.<br/>Minimum length = 1.
"""
try :
return self._bodyexpr
except Exception as e:
raise e
@bodyexpr.setter
def bodyexpr(self, bodyexpr) :
"""An advanced string expression for generating the body of the request. The expression can contain a literal string or an expression that derives the value (for example, client.ip.src). Mutually exclusive with -fullReqExpr.<br/>Minimum length = 1
"""
try :
self._bodyexpr = bodyexpr
except Exception as e:
raise e
@property
def fullreqexpr(self) :
"""Exact HTTP request, in the form of a default syntax expression, which the NetScaler appliance sends to the callout agent. If you set this parameter, you must not include HTTP method, host expression, URL stem expression, headers, or parameters.
The request expression is constrained by the feature for which the callout is used. For example, an HTTP.RES expression cannot be used in a request-time policy bank or in a TCP content switching policy bank.
The NetScaler appliance does not check the validity of this request. You must manually validate the request.<br/>Minimum length = 1.
"""
try :
return self._fullreqexpr
except Exception as e:
raise e
@fullreqexpr.setter
def fullreqexpr(self, fullreqexpr) :
"""Exact HTTP request, in the form of a default syntax expression, which the NetScaler appliance sends to the callout agent. If you set this parameter, you must not include HTTP method, host expression, URL stem expression, headers, or parameters.
The request expression is constrained by the feature for which the callout is used. For example, an HTTP.RES expression cannot be used in a request-time policy bank or in a TCP content switching policy bank.
The NetScaler appliance does not check the validity of this request. You must manually validate the request.<br/>Minimum length = 1
"""
try :
self._fullreqexpr = fullreqexpr
except Exception as e:
raise e
@property
def scheme(self) :
"""Type of scheme for the callout server.<br/>Possible values = http, https.
"""
try :
return self._scheme
except Exception as e:
raise e
@scheme.setter
def scheme(self, scheme) :
"""Type of scheme for the callout server.<br/>Possible values = http, https
"""
try :
self._scheme = scheme
except Exception as e:
raise e
@property
def resultexpr(self) :
"""Expression that extracts the callout results from the response sent by the HTTP callout agent. Must be a response based expression, that is, it must begin with HTTP.RES. The operations in this expression must match the return type. For example, if you configure a return type of TEXT, the result expression must be a text based expression. If the return type is NUM, the result expression (resultExpr) must return a numeric value, as in the following example: http.res.body(10000).length.<br/>Minimum length = 1.
"""
try :
return self._resultexpr
except Exception as e:
raise e
@resultexpr.setter
def resultexpr(self, resultexpr) :
"""Expression that extracts the callout results from the response sent by the HTTP callout agent. Must be a response based expression, that is, it must begin with HTTP.RES. The operations in this expression must match the return type. For example, if you configure a return type of TEXT, the result expression must be a text based expression. If the return type is NUM, the result expression (resultExpr) must return a numeric value, as in the following example: http.res.body(10000).length.<br/>Minimum length = 1
"""
try :
self._resultexpr = resultexpr
except Exception as e:
raise e
@property
def cacheforsecs(self) :
"""Duration, in seconds, for which the callout response is cached. The cached responses are stored in an integrated caching content group named "calloutContentGroup". If no duration is configured, the callout responses will not be cached unless normal caching configuration is used to cache them. This parameter takes precedence over any normal caching configuration that would otherwise apply to these responses.
Note that the calloutContentGroup definition may not be modified or removed nor may it be used with other cache policies.<br/>Minimum length = 1<br/>Maximum length = 31536000.
"""
try :
return self._cacheforsecs
except Exception as e:
raise e
@cacheforsecs.setter
def cacheforsecs(self, cacheforsecs) :
"""Duration, in seconds, for which the callout response is cached. The cached responses are stored in an integrated caching content group named "calloutContentGroup". If no duration is configured, the callout responses will not be cached unless normal caching configuration is used to cache them. This parameter takes precedence over any normal caching configuration that would otherwise apply to these responses.
Note that the calloutContentGroup definition may not be modified or removed nor may it be used with other cache policies.<br/>Minimum length = 1<br/>Maximum length = 31536000
"""
try :
self._cacheforsecs = cacheforsecs
except Exception as e:
raise e
@property
def comment(self) :
"""Any comments to preserve information about this HTTP callout.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
"""Any comments to preserve information about this HTTP callout.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def hits(self) :
"""Total hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def undefhits(self) :
"""Total undefs.
"""
try :
return self._undefhits
except Exception as e:
raise e
@property
def svrstate(self) :
"""The state of the service.<br/>Possible values = UP, DOWN, UNKNOWN, BUSY, OUT OF SERVICE, GOING OUT OF SERVICE, DOWN WHEN GOING OUT OF SERVICE, NS_EMPTY_STR, Unknown, DISABLED.
"""
try :
return self._svrstate
except Exception as e:
raise e
@property
def undefreason(self) :
"""Reason for last undef.<br/>Possible values = Failed to add service, Vserver not found, Not a HTTP or SSL vserver, Generated callout request is invalid, Content-Length header not found in callout request, Not enough space to put Content-Length value, Config incomplete, Server is DOWN, Creating callout connection failed, No memory to generate callout request packets, No memory to create callout task, No memory to create callout async, Callout request expression undef, No callout response expression, Skipped callout response eval, Callout response pixl init undef, Callout response expression undef.
"""
try :
return self._undefreason
except Exception as e:
raise e
@property
def recursivecallout(self) :
"""Number of recursive callouts.
"""
try :
return self._recursivecallout
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(policyhttpcallout_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.policyhttpcallout
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add policyhttpcallout.
"""
try :
if type(resource) is not list :
addresource = policyhttpcallout()
addresource.name = resource.name
addresource.ipaddress = resource.ipaddress
addresource.port = resource.port
addresource.vserver = resource.vserver
addresource.returntype = resource.returntype
addresource.httpmethod = resource.httpmethod
addresource.hostexpr = resource.hostexpr
addresource.urlstemexpr = resource.urlstemexpr
addresource.headers = resource.headers
addresource.parameters = resource.parameters
addresource.bodyexpr = resource.bodyexpr
addresource.fullreqexpr = resource.fullreqexpr
addresource.scheme = resource.scheme
addresource.resultexpr = resource.resultexpr
addresource.cacheforsecs = resource.cacheforsecs
addresource.comment = resource.comment
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ policyhttpcallout() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].ipaddress = resource[i].ipaddress
addresources[i].port = resource[i].port
addresources[i].vserver = resource[i].vserver
addresources[i].returntype = resource[i].returntype
addresources[i].httpmethod = resource[i].httpmethod
addresources[i].hostexpr = resource[i].hostexpr
addresources[i].urlstemexpr = resource[i].urlstemexpr
addresources[i].headers = resource[i].headers
addresources[i].parameters = resource[i].parameters
addresources[i].bodyexpr = resource[i].bodyexpr
addresources[i].fullreqexpr = resource[i].fullreqexpr
addresources[i].scheme = resource[i].scheme
addresources[i].resultexpr = resource[i].resultexpr
addresources[i].cacheforsecs = resource[i].cacheforsecs
addresources[i].comment = resource[i].comment
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete policyhttpcallout.
"""
try :
if type(resource) is not list :
deleteresource = policyhttpcallout()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ policyhttpcallout() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ policyhttpcallout() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update policyhttpcallout.
"""
try :
if type(resource) is not list :
updateresource = policyhttpcallout()
updateresource.name = resource.name
updateresource.ipaddress = resource.ipaddress
updateresource.port = resource.port
updateresource.vserver = resource.vserver
updateresource.returntype = resource.returntype
updateresource.httpmethod = resource.httpmethod
updateresource.hostexpr = resource.hostexpr
updateresource.urlstemexpr = resource.urlstemexpr
updateresource.headers = resource.headers
updateresource.parameters = resource.parameters
updateresource.bodyexpr = resource.bodyexpr
updateresource.fullreqexpr = resource.fullreqexpr
updateresource.scheme = resource.scheme
updateresource.resultexpr = resource.resultexpr
updateresource.cacheforsecs = resource.cacheforsecs
updateresource.comment = resource.comment
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ policyhttpcallout() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].ipaddress = resource[i].ipaddress
updateresources[i].port = resource[i].port
updateresources[i].vserver = resource[i].vserver
updateresources[i].returntype = resource[i].returntype
updateresources[i].httpmethod = resource[i].httpmethod
updateresources[i].hostexpr = resource[i].hostexpr
updateresources[i].urlstemexpr = resource[i].urlstemexpr
updateresources[i].headers = resource[i].headers
updateresources[i].parameters = resource[i].parameters
updateresources[i].bodyexpr = resource[i].bodyexpr
updateresources[i].fullreqexpr = resource[i].fullreqexpr
updateresources[i].scheme = resource[i].scheme
updateresources[i].resultexpr = resource[i].resultexpr
updateresources[i].cacheforsecs = resource[i].cacheforsecs
updateresources[i].comment = resource[i].comment
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of policyhttpcallout resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = policyhttpcallout()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ policyhttpcallout() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ policyhttpcallout() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the policyhttpcallout resources that are configured on netscaler.
"""
try :
if not name :
obj = policyhttpcallout()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = policyhttpcallout()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [policyhttpcallout() for _ in range(len(name))]
obj = [policyhttpcallout() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = policyhttpcallout()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of policyhttpcallout resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = policyhttpcallout()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the policyhttpcallout resources configured on NetScaler.
"""
try :
obj = policyhttpcallout()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of policyhttpcallout resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = policyhttpcallout()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Svrstate:
UP = "UP"
DOWN = "DOWN"
UNKNOWN = "UNKNOWN"
BUSY = "BUSY"
OUT_OF_SERVICE = "OUT OF SERVICE"
GOING_OUT_OF_SERVICE = "GOING OUT OF SERVICE"
DOWN_WHEN_GOING_OUT_OF_SERVICE = "DOWN WHEN GOING OUT OF SERVICE"
NS_EMPTY_STR = "NS_EMPTY_STR"
Unknown = "Unknown"
DISABLED = "DISABLED"
class Scheme:
http = "http"
https = "https"
class Httpmethod:
GET = "GET"
POST = "POST"
class Returntype:
BOOL = "BOOL"
NUM = "NUM"
TEXT = "TEXT"
class Undefreason:
Failed_to_add_service = "Failed to add service"
Vserver_not_found = "Vserver not found"
Not_a_HTTP_or_SSL_vserver = "Not a HTTP or SSL vserver"
Generated_callout_request_is_invalid = "Generated callout request is invalid"
Content_Length_header_not_found_in_callout_request = "Content-Length header not found in callout request"
Not_enough_space_to_put_Content_Length_value = "Not enough space to put Content-Length value"
Config_incomplete = "Config incomplete"
Server_is_DOWN = "Server is DOWN"
Creating_callout_connection_failed = "Creating callout connection failed"
No_memory_to_generate_callout_request_packets = "No memory to generate callout request packets"
No_memory_to_create_callout_task = "No memory to create callout task"
No_memory_to_create_callout_async = "No memory to create callout async"
Callout_request_expression_undef = "Callout request expression undef"
No_callout_response_expression = "No callout response expression"
Skipped_callout_response_eval = "Skipped callout response eval"
Callout_response_pixl_init_undef = "Callout response pixl init undef"
Callout_response_expression_undef = "Callout response expression undef"
class policyhttpcallout_response(base_response) :
def __init__(self, length=1) :
self.policyhttpcallout = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.policyhttpcallout = [policyhttpcallout() for _ in range(length)]
| |
#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import mock
from ec2stack.helpers import read_file, generate_signature
from . import Ec2StackAppTestCase
class SecurityGroupTestCase(Ec2StackAppTestCase):
def test_authorize_security_group_ingress_by_name(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupIngress'
data['GroupName'] = 'test'
data['FromPort'] = '1000'
data['ToPort'] = '1024'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '0.0.0.0/0'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_authorize_security_group_ingress.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'AuthorizeSecurityGroupIngressResponse' in response.data
def test_authorize_security_group_ingress_by_id(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupIngress'
data['GroupId'] = '7ae5b92f-3a0d-4977-bc33-f1aaecee5776'
data['FromPort'] = '-1'
data['ToPort'] = '-1'
data['IpProtocol'] = 'icmp'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_authorize_security_group_ingress.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'AuthorizeSecurityGroupIngressResponse' in response.data
def test_authorize_security_group_egress_by_name(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupEgress'
data['GroupName'] = 'test'
data['FromPort'] = '1000'
data['ToPort'] = '1024'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '0.0.0.0/0'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_authorize_security_group_egress.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'AuthorizeSecurityGroupEgressResponse' in response.data
def test_authorize_security_group_egress_by_id(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupEgress'
data['GroupId'] = '7ae5b92f-3a0d-4977-bc33-f1aaecee5776'
data['FromPort'] = '-1'
data['ToPort'] = '-1'
data['IpProtocol'] = 'icmp'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_authorize_security_group_egress.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'AuthorizeSecurityGroupEgressResponse' in response.data
def test_duplicate_authorize_security_group(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupEgress'
data['GroupName'] = 'test'
data['FromPort'] = '1000'
data['ToPort'] = '1024'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '0.0.0.0/0'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_authorize_security_group_egress_duplicate.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidPermission.Duplicate' in response.data
def test_invalid_rule_authorize_security_group(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupEgress'
data['GroupName'] = 'test'
data['FromPort'] = '1000'
data['ToPort'] = '99999'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '0.0.0.0/24'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_authorize_security_group_egress.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidRequest' in response.data
def test_invalid_security_group_authorize_security_group(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupEgress'
data['GroupName'] = 'invalid-security-group'
data['FromPort'] = '1000'
data['ToPort'] = '1024'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '0.0.0.0/24'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/'
'invalid_security_group_authorize_security_group.json'
)
get.return_value.status_code = 431
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_create_security_group(self):
data = self.get_example_data()
data['Action'] = 'CreateSecurityGroup'
data['GroupName'] = 'securitygroupname'
data['GroupDescription'] = 'security group description'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_create_security_group.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'CreateSecurityGroupResponse' in response.data
def test_create_duplicate_security_group(self):
data = self.get_example_data()
data['Action'] = 'CreateSecurityGroup'
data['GroupName'] = 'securitygroupname'
data['GroupDescription'] = 'security group description'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_create_security_group_duplicate.json'
)
get.return_value.status_code = 431
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.Duplicate' in response.data
def test_delete_security_group_by_name(self):
data = self.get_example_data()
data['Action'] = 'DeleteSecurityGroup'
data['GroupName'] = 'securitygroupname'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_delete_security_group.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DeleteSecurityGroupResponse' in response.data
def test_delete_security_group_by_id(self):
data = self.get_example_data()
data['Action'] = 'DeleteSecurityGroup'
data['GroupId'] = 'securitygroupname'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_delete_security_group.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DeleteSecurityGroupResponse' in response.data
def test_invalid_delete_security_group(self):
data = self.get_example_data()
data['Action'] = 'DeleteSecurityGroup'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'MissingParameter' in response.data
def test_describe_security_groups(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DescribeSecurityGroupsResponse' in response.data
def test_describe_security_group_by_id(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupId'] = '3b637c2e-b0a8-40ae-a7a3-2bef2871d36d'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DescribeSecurityGroupsResponse' in response.data
assert '3b637c2e-b0a8-40ae-a7a3-2bef2871d36d' in response.data
def test_invalid_describe_security_group_by_id(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupId'] = 'invalid-security-group-id'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_empty_response_describe_security_group_by_id(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupId'] = 'invalid-security-group-id'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/empty_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_describe_security_group_by_name(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupName'] = 'test'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DescribeSecurityGroupsResponse' in response.data
assert 'test' in response.data
def test_invalid_describe_security_group_by_name(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupName'] = 'invalid-name'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_empty_response_describe_security_group_by_name(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupName'] = 'invalid-name'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/empty_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_revoke_security_group_ingress(self):
data = self.get_example_data()
data['Action'] = 'RevokeSecurityGroupIngress'
data['GroupId'] = '7ae5b92f-3a0d-4977-bc33-f1aaecee5776'
data['FromPort'] = '1000'
data['ToPort'] = '1024'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '192.168.0.0/24'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/revoke_security_group_ingress.json'
)
get.return_value.status_code = 200
describe_item_request = mock.Mock()
describe_item_request.return_value = json.loads(read_file(
'tests/data/revoke_security_group_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.describe_item_request',
describe_item_request
):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'RevokeSecurityGroupIngressResponse' in response.data
def test_revoke_security_group_egress(self):
data = self.get_example_data()
data['Action'] = 'RevokeSecurityGroupEgress'
data['GroupId'] = '7ae5b92f-3a0d-4977-bc33-f1aaecee5776'
data['FromPort'] = '-1'
data['ToPort'] = '-1'
data['IpProtocol'] = 'icmp'
data['CidrIp'] = '192.168.0.0/24'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/revoke_security_group_egress.json'
)
get.return_value.status_code = 200
describe_item_request = mock.Mock()
describe_item_request.return_value = json.loads(read_file(
'tests/data/revoke_security_group_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.describe_item_request',
describe_item_request
):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'RevokeSecurityGroupEgressResponse' in response.data
def test_invalid_revoke_security_group(self):
data = self.get_example_data()
data['Action'] = 'RevokeSecurityGroupEgress'
data['GroupId'] = '7ae5b92f-3a0d-4977-bc33-f1aaecee5776'
data['FromPort'] = '0'
data['ToPort'] = '0'
data['IpProtocol'] = 'invalid'
data['CidrIp'] = '192.168.0.0/24'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/revoke_security_group_egress.json'
)
get.return_value.status_code = 200
describe_item_request = mock.Mock()
describe_item_request.return_value = json.loads(read_file(
'tests/data/revoke_security_group_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.describe_item_request',
describe_item_request
):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidPermission.NotFound' in response.data
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Provenance) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class Provenance(domainresource.DomainResource):
""" Who, What, When for a set of resources.
Provenance of a resource is a record that describes entities and processes
involved in producing and delivering or otherwise influencing that
resource. Provenance provides a critical foundation for assessing
authenticity, enabling trust, and allowing reproducibility. Provenance
assertions are a form of contextual metadata and can themselves become
important records with their own provenance. Provenance statement indicates
clinical significance in terms of confidence in authenticity, reliability,
and trustworthiness, integrity, and stage in lifecycle (e.g. Document
Completion - has the artifact been legally authenticated), all of which may
impact security, privacy, and trust policies.
"""
resource_name = "Provenance"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.activity = None
""" Activity that occurred.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.agent = None
""" Agents involved in creating resource.
List of `ProvenanceAgent` items (represented as `dict` in JSON). """
self.entity = None
""" An entity used in this activity.
List of `ProvenanceEntity` items (represented as `dict` in JSON). """
self.location = None
""" Where the activity occurred, if relevant.
Type `FHIRReference` referencing `Location` (represented as `dict` in JSON). """
self.period = None
""" When the activity occurred.
Type `Period` (represented as `dict` in JSON). """
self.policy = None
""" Policy or plan the activity was defined by.
List of `str` items. """
self.reason = None
""" Reason the activity is occurring.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.recorded = None
""" When the activity was recorded / updated.
Type `FHIRDate` (represented as `str` in JSON). """
self.signature = None
""" Signature on target.
List of `Signature` items (represented as `dict` in JSON). """
self.target = None
""" Target Reference(s) (usually version specific).
List of `FHIRReference` items referencing `Resource` (represented as `dict` in JSON). """
super(Provenance, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Provenance, self).elementProperties()
js.extend([
("activity", "activity", codeableconcept.CodeableConcept, False, None, False),
("agent", "agent", ProvenanceAgent, True, None, False),
("entity", "entity", ProvenanceEntity, True, None, False),
("location", "location", fhirreference.FHIRReference, False, None, False),
("period", "period", period.Period, False, None, False),
("policy", "policy", str, True, None, False),
("reason", "reason", codeableconcept.CodeableConcept, True, None, False),
("recorded", "recorded", fhirdate.FHIRDate, False, None, True),
("signature", "signature", signature.Signature, True, None, False),
("target", "target", fhirreference.FHIRReference, True, None, True),
])
return js
from . import backboneelement
class ProvenanceAgent(backboneelement.BackboneElement):
""" Agents involved in creating resource.
An agent takes a role in an activity such that the agent can be assigned
some degree of responsibility for the activity taking place. An agent can
be a person, an organization, software, or other entities that may be
ascribed responsibility.
"""
resource_name = "ProvenanceAgent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.actor = None
""" Individual, device or organization playing role.
Type `FHIRReference` referencing `Practitioner, RelatedPerson, Patient, Device, Organization` (represented as `dict` in JSON). """
self.relatedAgent = None
""" Track delegation between agents.
List of `ProvenanceAgentRelatedAgent` items (represented as `dict` in JSON). """
self.role = None
""" What the agents involvement was.
Type `Coding` (represented as `dict` in JSON). """
self.userId = None
""" Authorization-system identifier for the agent.
Type `Identifier` (represented as `dict` in JSON). """
super(ProvenanceAgent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ProvenanceAgent, self).elementProperties()
js.extend([
("actor", "actor", fhirreference.FHIRReference, False, None, False),
("relatedAgent", "relatedAgent", ProvenanceAgentRelatedAgent, True, None, False),
("role", "role", coding.Coding, False, None, True),
("userId", "userId", identifier.Identifier, False, None, False),
])
return js
class ProvenanceAgentRelatedAgent(backboneelement.BackboneElement):
""" Track delegation between agents.
A relationship between two the agents referenced in this resource. This is
defined to allow for explicit description of the delegation between agents.
For example, this human author used this device, or one person acted on
another's behest.
"""
resource_name = "ProvenanceAgentRelatedAgent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.target = None
""" Reference to other agent in this resource by identifier.
Type `str`. """
self.type = None
""" Type of relationship between agents.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(ProvenanceAgentRelatedAgent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ProvenanceAgentRelatedAgent, self).elementProperties()
js.extend([
("target", "target", str, False, None, True),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
class ProvenanceEntity(backboneelement.BackboneElement):
""" An entity used in this activity.
"""
resource_name = "ProvenanceEntity"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.agent = None
""" Entity is attributed to this agent.
Type `ProvenanceAgent` (represented as `dict` in JSON). """
self.display = None
""" Human description of entity.
Type `str`. """
self.reference = None
""" Identity of entity.
Type `str`. """
self.role = None
""" derivation | revision | quotation | source.
Type `str`. """
self.type = None
""" The type of resource in this entity.
Type `Coding` (represented as `dict` in JSON). """
super(ProvenanceEntity, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ProvenanceEntity, self).elementProperties()
js.extend([
("agent", "agent", ProvenanceAgent, False, None, False),
("display", "display", str, False, None, False),
("reference", "reference", str, False, None, True),
("role", "role", str, False, None, True),
("type", "type", coding.Coding, False, None, True),
])
return js
from . import codeableconcept
from . import coding
from . import fhirdate
from . import fhirreference
from . import identifier
from . import period
from . import signature
| |
import os
import re
import tqdm
import glob
import logging
import networkx
import subprocess
import shellphish_qemu
l = logging.getLogger('fuzzer.input_hierarchy')
class Input(object):
def __init__(self, filename, instance, hierarchy):
self.hierarchy = hierarchy
self.instance = instance
self.filename = filename
self.id = None
self.source_ids = [ ]
self.sources = [ ]
self.cov = False
self.op = None
self.synced_from = None
self.other_fields = { }
self.val = None
self.rep = None
self.pos = None
self.orig = None
self.crash = False
self.sig = None
self._process_filename(filename)
self.looped = False
self.timestamp = os.stat(self.filepath).st_mtime
# cached stuff
self._trace = None
self._origins = None
self._contributing_techniques = None
self._technique_contributions = None
def _process_filename(self, filename):
# process the fields
fields = filename.split(',')
for f in fields:
if f == "+cov":
self.cov = True
elif f == "grease":
assert self.id
self.orig = "greased_%s" % self.id
else:
n,v = f.split(':', 1)
if n == 'id':
assert not self.id
self.id = v
elif n == 'src':
assert not self.source_ids
self.source_ids = v.split('+')
elif n == 'sync':
assert not self.synced_from
self.synced_from = v
elif n == 'op':
assert not self.op
self.op = v
elif n == 'rep':
assert not self.rep
self.rep = v
elif n == 'orig':
assert not self.orig
self.orig = v
elif n == 'pos':
assert not self.pos
self.pos = v
elif n == 'val':
assert not self.val
self.val = v
elif n == 'from': # driller uses this instead of synced/src
instance, from_id = v[:-6], v[-6:]
self.synced_from = instance
self.source_ids.append(from_id)
elif n == 'sig':
assert not self.crash
assert not self.sig
assert self.id
self.crash = True
self.sig = v
self.id = 'c'+self.id
else:
l.warning("Got unexpected field %s with value %s for file %s.", n, v, filename)
self.other_fields[n] = v
assert self.id is not None
assert self.source_ids or self.orig
def _resolve_sources(self):
try:
if self.synced_from:
self.sources = [ self.hierarchy.instance_input(self.synced_from, self.source_ids[0]) ]
else:
self.sources = [ self.hierarchy.instance_input(self.instance, i) for i in self.source_ids ]
except KeyError as e:
l.warning("Unable to resolve source ID %s for %s", e, self)
self.sources = [ ]
@property
def filepath(self):
return os.path.join(
self.hierarchy._dir, self.instance,
'crashes' if self.crash else 'queue', self.filename
)
def read(self):
with open(self.filepath, 'rb') as f:
return f.read()
def __repr__(self):
s = "<Input inst:%s,%s>" % (self.instance, self.filename)
#if self.synced_from:
# s += " sync:%s" % self.synced_from
#s += "src:%s" % self.source_ids
return s
#
# Lineage analysis.
#
@property
def lineage(self):
for p in self.sources:
for pl in p.lineage:
yield pl
yield self
def print_lineage(self, depth=0):
if depth:
print(' '*depth + str(self))
else:
print(self)
for parent in self.sources:
parent.print_lineage(depth=depth+1)
@property
def origins(self, follow_extensions=False):
"""
Return the origins of this seed.
"""
if self._origins is not None:
return self._origins
if not follow_extensions and not self.instance.startswith('fuzzer-'):
o = { self }
elif not self.sources:
o = { self }
else:
o = set.union(*(s.origins for s in self.sources))
self._origins = o
return self._origins
@property
def technique(self):
return 'fuzzer' if self.instance.startswith('fuzzer-') else self.instance
@property
def contributing_techniques(self):
if self._contributing_techniques is None:
# don't count this current technique if we synced it
if self.synced_from:
new_technique = frozenset()
else:
new_technique = frozenset([self.technique])
self._contributing_techniques = frozenset.union(
new_technique, *(i.contributing_techniques for i in self.sources)
)
return self._contributing_techniques
@property
def contributing_instances(self):
return set(i.instance for i in self.lineage)
@property
def output(self):
with open('/dev/null', 'w') as tf, open(self.filepath) as sf:
cmd_args = [
'timeout', '60', shellphish_qemu.qemu_path('cgc-tracer'),
self.hierarchy._fuzzer.binary_path
]
process = subprocess.Popen(cmd_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=tf)
fuck, _ = process.communicate(sf.read())
return fuck
@property
def trace(self):
if self._trace is not None:
return self._trace
with open(self.filepath, 'rb') as sf:
cmd_args = [
'timeout', '2',
shellphish_qemu.qemu_path('cgc-tracer'),
'-d', 'exec',
self.hierarchy._fuzzer.binary_path
]
#print("cat %s | %s" % (self.filepath, ' '.join(cmd_args)))
process = subprocess.Popen(cmd_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, you = process.communicate(sf.read())
trace = [ ]
for tline in you.split(b'\n'):
result = re.match(b'Trace 0x[0-9a-fA-F]* \\[([0-9a-fA-F]*)\\]', tline.strip())
if not result:
continue
trace.append(int(result.group(1), base=16))
self._trace = trace
return trace
@property
def transitions(self):
return [ (self.trace[i], self.trace[i+1]) for i in range(len(self.trace)-1) ]
@property
def transition_set(self):
return set(self.transitions)
@property
def new_transitions(self):
if self.sources:
return self.transition_set - set.union(*(s.transition_set for s in self.sources))
else:
return self.transition_set
@property
def block_set(self):
return set(self.trace)
@property
def new_blocks(self):
if self.sources:
return self.block_set - set.union(*(s.block_set for s in self.sources))
else:
return self.block_set
@property
def technique_contributions(self):
if self._technique_contributions is not None:
return self._technique_contributions
results = {
self.contributing_techniques: self.new_transitions
}
if self.sources:
for s in self.sources:
for k,v in s.technique_contributions.items():
results.setdefault(k,set()).update(v)
self._technique_contributions = results
return results
@property
def contribution_counts(self):
return {
k: len(v) for k,v in self.technique_contributions.iteritems()
}
class InputHierarchy(object):
"""
This class deals with the AFL input hierarchy and analyses done on it.
"""
def __init__(self, fuzzer=None, fuzzer_dir=None, load_crashes=False):
self._fuzzer = fuzzer
self._dir = fuzzer_dir if fuzzer_dir is not None else fuzzer.job_dir
self.inputs = { }
self.instance_inputs = { }
self.instances = [ ]
self.reload(load_crashes)
while self._remove_cycles():
pass
def _remove_cycles(self):
"""
Really hacky way to remove cycles in hierarchies (wtf).
"""
G = self.make_graph()
cycles = list(networkx.simple_cycles(G))
if not cycles:
return False
else:
cycles[0][0].looped = True
cycles[0][0].sources[:] = [ ]
return True
def triggered_blocks(self):
"""
Gets the triggered blocks by all the testcases.
"""
return set.union(*(i.block_set for i in tqdm.tqdm(self.inputs.values())))
def crashes(self):
"""
Returns the crashes, if they are loaded.
"""
return [ i for i in self.inputs.values() if i.crash ]
def technique_contributions(self):
"""
Get coverage and crashes by technique.
"""
results = { }
for s,(b,c) in self.seed_contributions():
results.setdefault(s.instance.split('-')[0], [0,0])[0] += b
results.setdefault(s.instance.split('-')[0], [0,0])[1] += c
return results
def seed_contributions(self):
"""
Get the seeds (including inputs introduced by extensions) that
resulted in coverage and crashes.
"""
sorted_inputs = sorted((
i for i in self.inputs.values() if i.instance.startswith('fuzzer-')
), key=lambda j: j.timestamp)
found = set()
contributions = { }
for s in tqdm.tqdm(sorted_inputs):
o = max(s.origins, key=lambda i: i.timestamp)
if s.crash:
contributions.setdefault(o, (set(),set()))[1].add(s)
else:
c = o.transition_set - found
if not c:
continue
contributions.setdefault(o, (set(),set()))[0].update(c)
found |= c
return sorted(((k, list(map(len,v))) for k,v in contributions.iteritems()), key=lambda x: x[0].timestamp)
def reload(self, load_crashes):
self._load_instances()
for i in self.instances:
self._load_inputs(i)
if load_crashes:
self._load_inputs(i, input_type="crashes")
self._resolve_sources()
return self
def _load_instances(self):
self.instances = [
os.path.basename(os.path.dirname(n))
for n in glob.glob(os.path.join(self._dir, "*", "queue"))
]
self.instance_inputs = { i: { } for i in self.instances }
l.debug("Instances: %s", self.instances)
def _load_inputs(self, instance, input_type="queue"):
l.info("Loading inputs from instance %s", instance)
for fp in glob.glob(os.path.join(self._dir, instance, input_type, "id*")):
f = os.path.basename(fp)
l.debug("Adding input %s (type %s)", f, input_type)
i = Input(f, instance, self)
self.inputs[i.instance + ':' + i.id] = i
self.instance_inputs[i.instance][i.id] = i
def _resolve_sources(self):
for i in self.inputs.values():
i._resolve_sources()
def instance_input(self, instance, id): #pylint:disable=redefined-builtin
return self.instance_inputs[instance][id]
def make_graph(self):
G = networkx.DiGraph()
for child in self.inputs.values():
for parent in child.sources:
G.add_edge(parent, child)
return G
def plot(self, output=None):
import matplotlib.pyplot as plt #pylint:disable=import-error
plt.close()
networkx.draw(self.make_graph())
if output:
plt.savefig(output)
else:
plt.show()
| |
import redis
import logging
import json
import time
import datetime
import itertools
import random
import urllib2
import HTMLParser
import hashlib
import re
import deckstats
# this keeps all the code that is shared amongst most of the mdoules and future modules
# it mostly contains redis storage and the recommendation engine stuff
# This is the redis configurations.
# Note: 6379 is the default Redis port, so if you have any other apps
# hitting against redis, you might want to stand up your own.
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
logging.getLogger().setLevel(logging.DEBUG)
# A global variable that pools a Redis connection.
_REDIS = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=2)
# this is what the scraper will post places
USER_AGENT = "reddit.com/r/edh recommender by /u/orangeoctopus v2.0"
############# UTILITY FUNCTIONS ###############
# Returns a redis instance. This checks to see if the connetion is open
# and if not creates a new one. Using this function makes it so we don't
# have to recreate the redis connection every time we want to use it.
def get_redis():
'''Returns a redis instance using the defaults provided at the top of mr.py'''
global _REDIS
try:
_REDIS.ping()
except redis.ConnectionError as ce:
_REDIS = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=2)
return _REDIS
CARDS = get_redis().hgetall('CARDS_JSON')
for key in CARDS:
CARDS[key] = json.loads(CARDS[key])
def hash_pyobj(python_obj):
return hashlib.sha256(json.dumps(python_obj)).hexdigest()
# Nasty hack of a function that removes all the characters that annoy me in
# magic cards.
def strip_accents(s):
return s.replace(u'\xc3', 'ae').replace(u'\xe6', 'ae').replace(u'\xc6', 'ae').replace(u'\xe9', 'e').replace(u'\xf6', 'o') \
.replace(u'\xfb', 'u').replace(u'\xe2', 'a').replace(u'\xe1', 'a').replace(u'\xe0', 'a') \
.replace(u'\xae', 'r').replace(u'\xfa', 'u').replace(u'\xed', 'i')
# The official sanitization function. Any cardnames should be sent through this before
# hitting the data store or whatever.
def sanitize_cardname(cn):
cn = u_to_str(cn)
return HTMLParser.HTMLParser().unescape(strip_accents(cn.strip().lower())).encode('utf-8')
def u_to_str(ustr):
return ''.join(c for c in ustr if ord(c) < 128).encode('utf-8')
def date_from_str(dstr):
return datetime.datetime(*[ int(p) for p in re.split('[ \.:-]', dstr)[:-1]])
# Undoes most of what sanitize_cardname does. This is used for presentation purposes.
def cap_cardname(cn):
return cn.strip().lower().title().replace("'S", "'s").replace(' The', ' the').replace(' Of', ' of')
# looks up the cardname cn in the redis data store. It turns a nice dictionary object that maps the json object.
def lookup_card(cn):
cn = sanitize_cardname(cn)
try:
card_obj = CARDS[str(cn)]
except KeyError:
logging.warn('I couldn\'t find this card: ' + str(cn))
return None
return card_obj
# figures out the color identitify for a particular card
def color_identity(cn):
card = lookup_card(cn)
if card is None:
raise ValueError('Card doesnt exist ' + str(cn))
colors = { '{W}' : 'WHITE' , '{B}' : 'BLACK' , '{U}' : 'BLUE', '{R}' : 'RED', '{G}' : 'GREEN' }
oc = set()
for colorsig in colors:
if card.has_key('manaCost') and colorsig in card['manaCost'].replace('/', '}{'):
oc.add(colors[colorsig])
elif card.has_key('text') and colorsig in ' '.join(card['text'].replace(')', '(').split('(')[::2]).replace('/', '}{'):
oc.add(colors[colorsig])
return sorted(list(oc))
# returns true if the card is banned
def is_banned(cn):
return get_redis().sismember('BANNED', sanitize_cardname(cn))
# adds a deck to the redis data store
def add_deck(deck_dict):
try:
# prepare the name of the key in redis (it's DECKS_ followed by sorted colors in the color identity, all caps)
color_str = 'DECKS_' + '_'.join(color_identity(deck_dict['commander']))
except ValueError:
logging.warn("This commander doesn't exist, not adding it to my corpus: " + deck_dict['commander'])
return
logging.debug('Adding the deck with the commander ' + deck_dict['commander'])
if deck_dict['commander'] == 'jedit ojanen':
logging.warn('jedit ojanen means someone submitted a deck without a commander. Im not going to add it')
return
# check to see if this exact deck exists already:
for deck in get_decks(color_identity(deck_dict['commander'])):
if deck['cards'] == deck_dict['cards']:
logging.debug('this deck is an exact dup. I\'m not going to add it at all.')
return False
else:
# add it to the beginning of the list
get_redis().lpush(color_str, json.dumps(deck_dict))
return True
# Returns all of the decks for a particular color. Turn dedup on if you want to remove dups
def get_decks(colors, dedup=False):
if type(colors) is str:
color_str = colors
else:
color_str = 'DECKS_' + '_'.join(sorted(c.upper() for c in colors))
logging.debug('Retrieving all decks from ' + color_str)
out =[ json.loads(d) for d in get_redis().lrange(color_str, 0, -1) ]
if dedup:
out = dedup_decks(out)
return out
def get_all_decks(dedup=False):
deck_strs = get_redis().keys('DECKS_*')
logging.debug('Retrieving ALLLL decks')
out = []
for ds in deck_strs:
decks = [ json.loads(d) for d in get_redis().lrange(ds, 0, -1) ]
if dedup:
decks = dedup_decks(decks)
out.extend(decks)
return out
# This function wraps a URL get request with a cache.
def urlopen(url):
# The cache is stored in Redis
r = get_redis()
# TODO: I liked how I did caching before. Here I keep everything
# in one key, URL_CACHE. Unfortunately, I can't set expirations per
# key in a hash, I can only expire top-level keys. This makes it so
# you have to flush the cache manually
if r.hexists('URL_CACHE', url):
logging.debug("Cache hit on " + url)
return r.hget('URL_CACHE', url)
logging.debug("Cache miss on " + url)
req = urllib2.Request(url, headers={'User-Agent' : USER_AGENT})
con = urllib2.urlopen(req).read()
r.hset('URL_CACHE', url, con)
return con
# flushes the entire cache
def flush_cache():
get_redis().delete('URL_CACHE')
def add_recent(url_ref, commander, reddit_ref = None):
r = get_redis()
out = {'url' : url_ref.strip('/'), 'commander' : commander}
if reddit_ref is not None:
s = json.dumps(out)
r.lrem('RECENT', s, 0) # try to remove the non-reddit ref if it exists
out['reddit'] = reddit_ref
else:
# see if there is already a reddit based one...
for it in get_redis().lrange('RECENT', 0, -1):
o = json.loads(it)
if o.has_key('reddit') and o['url'] == out['url']:
r.lrem('RECENT', json.dumps(out), 0)
out = o
r.lrem('RECENT', it, 0)
break
s = json.dumps(out)
r.lrem('RECENT', s, 0)
r.lpush('RECENT', s)
r.ltrim('RECENT', 0, 99)
def get_recent_json():
return json.dumps(get_redis().lrange('RECENT', 0, -1))
################# RECOMMENDATION ENGINE ######################
# This is one of the most important functions.
# It says how close two decks are. The higher the number,
# the more close deck1 and deck2 are. The recommendation
# engine use this closeness score to compute nearest
# neighbor.
def rec_deck_closeness(deck1, deck2):
r = get_redis()
d1stat = deckstats.tally([deck1])
d2stat = deckstats.tally([deck2])
minsum = 0
maxsum = 0
for d1t, d2t in zip(sorted(d1stat['types'].items()), sorted(d2stat['types'].items())):
minsum += min(d1t[1], d2t[1])
maxsum += max(d1t[1], d2t[1])
typescore = float(minsum) / maxsum
minsum = 0
maxsum = 0
for d1c, d2c in zip(sorted(d1stat['colors'].items()), sorted(d2stat['colors'].items())):
minsum += min(d1c[1], d2c[1])
maxsum += max(d1c[1], d2c[1])
colorscore = float(minsum) / maxsum
minsum = 0
maxsum = 0
for d1m, d2m in zip(sorted(d1stat['curve']), sorted(d2stat['curve'])):
minsum += min(d1m[1], d2m[1])
maxsum += max(d1m[1], d2m[1])
curvescore = float(minsum) / maxsum
cards1 = set(deck1['cards'])
cards2 = set(deck2['cards'])
d1ind2 = 0
for c in cards1:
if c in cards2:
d1ind2 += 1
d1ind2 /= float(len(cards1))
d2ind1 = 0
for c in cards2:
if c in cards1:
d2ind1 += 1
d2ind1 /= float(len(cards2))
# Find how many non-land cards they have in common
#lenint = 0
#for c in set(deck1['cards']).intersection(set(deck2['cards'])):
# try:
# if 'Land' in lookup_card(c)['types']:
# continue
# except TypeError:
# continue
# lenint += 1.0
# If they share the same commander, give the score a bit of a boost
# The rationale is that we want decks with the same commander first,
# with perhaps some help from other similar commanders if we can't
# find enough.
if deck1['commander'] == deck2['commander']:
same_cmdr_bonus = 1.0
else:
same_cmdr_bonus = 0.0
# Give a bit of a bonus if the decks are similar in age. If they are
# within 90 days of each other (roughly a new set is release every 90 days),
# it just gets a 1.0. Otherwise, it slowly degrades.
# The rationale here is that we don't want to be basing recommendations
# on decks that are 3 years old because they aren't up to date.
#if deck1['date'] - deck2['date'] < 90:
# newness_bonus = 1.0
#else:
# newness_bonus = .99 ** ((deck1['date'] - deck2['date']) / 366.)
if deck1.has_key('scrapedate') and deck2.has_key('scrapedate'):
d1date = datetime.datetime.strptime(deck1['scrapedate'], '%Y-%m-%d %H:%M:%S.%f')
d2date = datetime.datetime.strptime(deck2['scrapedate'], '%Y-%m-%d %H:%M:%S.%f')
ddelta = abs(d1date - d2date).days
if ddelta < 30: #if decks are within a month (super recent):
dscore = 1.0
if ddelta < 90: #if decks are within 90 days (most recent set):
dscore = .9
elif ddelta < 365: #if decks are within one year (most recent block):
dscore = .5
else:
dscore = 0.0
else:
dscore = 0.0
# Compute the final score and return it!
weights = ((1.0, d1ind2), (1.0, d2ind1), (.15, same_cmdr_bonus), (.35, typescore), (.35, colorscore), (.1, curvescore), (.1, dscore))
out = sum(w * s for w, s in weights) / sum(w for w, s in weights)
return out
# Determines if two decks are duplicates
# You can adjust it to be more aggressive by making threshold higher.
# Threshold is the number of cards the two decks have in common.
def decks_are_dups(deck1, deck2, threshold = .7):
if deck1['commander'] != deck2['commander']:
return False
try:
if deck1['url'] == deck2['url']:
return True
except KeyError:
pass
# Find out if the difference in number of cards is < threshold. If it is, it's a dup.
avg_size = (len(deck1['cards']) + len(deck2['cards'])) / 2.0
in_common = len(set(deck1['cards']).intersection(set(deck2['cards'])))
if in_common / avg_size > threshold:
#print avg_size, in_common
return True
else:
return False
# For a list of decks, deduplicate ones that are near duplicates of others in the list
def dedup_decks(decks, threshold = .7):
sdecks = sorted( decks, key= lambda x: int(x['date'] if x.has_key('date') else 0), reverse=True )
badlist = []
for (i1, d1), (i2, d2) in itertools.combinations(enumerate(sdecks), 2):
if d1 in badlist or d2 in badlist:
continue
if decks_are_dups(d1, d2, threshold = threshold):
badlist.append(i2)
continue
#for k in badlist: print k, '!!!'
return [ d for i, d in enumerate(sdecks) if i not in badlist ]
# This function generates the recommendatins for a deck.
# The optional parameter k tells you how far out to cast your net
# for similar decks. Smaller numbers will have more variance and bias,
# but larger numbers will degenrate into "goodstuff.dec" for those particular colors.
# See "Collaborative Filtering" on the Google. This approach is based on that.
def recommend(deck, k=15, returnk=False):
nn = datetime.datetime.now()
logging.debug("Finding recommendations for deck with general " + str(deck['commander']))
# Go calculate all of the closeness scores for this deck to all of the other decks in the corpus.
scores = []
for deck2 in get_decks(color_identity(deck['commander'])):
if decks_are_dups(deck, deck2):
logging.debug("The deck submitted here is a duplicate of another deck in my corpus...")
continue
d = rec_deck_closeness(deck, deck2) ** 2 # notice that I square the score.
# squaring the score makes closer decks weighted higher. I found empirically this gives better results.
# Keep the score around but also keep the cards that were different.
scores.append((d, deck2, set(deck2['cards']) - set(deck['cards']), set(deck['cards']) - set(deck2['cards'])))
# Pull off the top K highest scores. Break ties randomly.
topk = sorted(scores, key=lambda x: (x[0], random.random()), reverse=True)[:k]
for dd in topk:
logging.debug("Deck similar to this one: " + str(strip_accents(dd[1]['commander'])) + ' score: %.2f' % dd[0] )
total_score = float(sum(ee[0] for ee in topk))
card_counts = {}
uniq_counts = {}
# go through each deck in the top k and tally some cards
for dist, deck2, newcards, uniqcards in topk:
for nc in newcards:
if is_banned(nc):
continue
if not nc in card_counts:
card_counts[nc] = 0.0
card_counts[nc] += ( dist / total_score ) # dist / total score is what gives weight.
for uc in uniqcards:
if uc == deck['commander']:
continue
if not uc in uniq_counts:
uniq_counts[uc] = 0.0
uniq_counts[uc] += ( dist / total_score )
# Get ordered lists of card counts
newrecs = sorted(card_counts.items(), key=lambda x:x[1], reverse=True)
outrecs = sorted(uniq_counts.items(), key=lambda x:x[1], reverse=True)
logging.debug("Done finding recommendations for deck with general " + str(deck['commander']) + " (took %s time)" % str(datetime.datetime.now() - nn))
if returnk:
return newrecs, outrecs, [ deck for _, deck, _, _ in topk ]
else:
return newrecs, outrecs
| |
# Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import ddt
import mock
from oslo_config import cfg
from oslo_utils import importutils
from manila import exception
from manila.share import configuration as config
from manila.share import driver
from manila.share.drivers.glusterfs import layout
from manila import test
from manila.tests import fake_share
from manila.tests import fake_utils
CONF = cfg.CONF
fake_local_share_path = '/mnt/nfs/testvol/fakename'
fake_path_to_private_key = '/fakepath/to/privatekey'
fake_remote_server_password = 'fakepassword'
def fake_access(kwargs):
fake_access_rule = fake_share.fake_access(**kwargs)
fake_access_rule.to_dict = lambda: fake_access_rule.values
return fake_access_rule
class GlusterfsFakeShareDriver(layout.GlusterfsShareDriverBase):
supported_layouts = ('layout_fake.FakeLayout',
'layout_something.SomeLayout')
supported_protocols = ('NFS,')
_supported_access_types = ('ip',)
_supported_access_levels = ('rw',)
@ddt.ddt
class GlusterfsShareDriverBaseTestCase(test.TestCase):
"""Tests GlusterfsShareDriverBase."""
def setUp(self):
super(GlusterfsShareDriverBaseTestCase, self).setUp()
CONF.set_default('driver_handles_share_servers', False)
fake_conf, __ = self._setup()
self._driver = GlusterfsFakeShareDriver(False, configuration=fake_conf)
self.fake_share = mock.Mock(name='fake_share')
self.fake_context = mock.Mock(name='fake_context')
self.fake_access = mock.Mock(name='fake_access')
def _setup(self):
fake_conf = config.Configuration(None)
fake_layout = mock.Mock()
self.mock_object(importutils, "import_object",
mock.Mock(return_value=fake_layout))
return fake_conf, fake_layout
def test_init(self):
self.assertRaises(IndexError, layout.GlusterfsShareDriverBase, False,
configuration=config.Configuration(None))
@ddt.data({'has_snap': None, 'layout_name': None},
{'has_snap': False, 'layout_name': 'layout_fake.FakeLayout'},
{'has_snap': True, 'layout_name': 'layout_something.SomeLayout'})
@ddt.unpack
def test_init_subclass(self, has_snap, layout_name):
conf, _layout = self._setup()
if layout_name is not None:
conf.glusterfs_share_layout = layout_name
if has_snap is None:
del(_layout._snapshots_are_supported)
else:
_layout._snapshots_are_supported = has_snap
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
snap_result = {None: False}.get(has_snap, has_snap)
layout_result = {None: 'layout_fake.FakeLayout'}.get(layout_name,
layout_name)
importutils.import_object.assert_called_once_with(
'manila.share.drivers.glusterfs.%s' % layout_result,
_driver, configuration=conf)
self.assertEqual(_layout, _driver.layout)
self.assertEqual(snap_result, _driver.snapshots_are_supported)
def test_init_nosupp_layout(self):
conf = config.Configuration(None)
conf.glusterfs_share_layout = 'nonsense_layout'
self.assertRaises(exception.GlusterfsException,
GlusterfsFakeShareDriver, False, configuration=conf)
def test_setup_via_manager(self):
self.assertIsNone(self._driver._setup_via_manager(mock.Mock()))
def test_supported_access_types(self):
self.assertEqual(('ip',), self._driver.supported_access_types)
def test_supported_access_levels(self):
self.assertEqual(('rw',), self._driver.supported_access_levels)
def test_access_rule_validator(self):
rule = mock.Mock()
abort = mock.Mock()
valid = mock.Mock()
self.mock_object(layout.ganesha_utils, 'validate_access_rule',
mock.Mock(return_value=valid))
ret = self._driver._access_rule_validator(abort)(rule)
self.assertEqual(valid, ret)
layout.ganesha_utils.validate_access_rule.assert_called_once_with(
('ip',), ('rw',), rule, abort)
@ddt.data({'inset': ([], ['ADD'], []), 'outset': (['ADD'], []),
'recovery': False},
{'inset': ([], [], ['DELETE']), 'outset': ([], ['DELETE']),
'recovery': False},
{'inset': (['EXISTING'], ['ADD'], ['DELETE']),
'outset': (['ADD'], ['DELETE']), 'recovery': False},
{'inset': (['EXISTING'], [], []), 'outset': (['EXISTING'], []),
'recovery': True})
@ddt.unpack
def test_update_access(self, inset, outset, recovery):
conf, _layout = self._setup()
gluster_mgr = mock.Mock(name='gluster_mgr')
self.mock_object(_layout, '_share_manager',
mock.Mock(return_value=gluster_mgr))
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
self.mock_object(_driver, '_update_access_via_manager', mock.Mock())
rulemap = {t: fake_access({'access_type': "ip",
'access_level': "rw",
'access_to': t}) for t in (
'EXISTING', 'ADD', 'DELETE')}
in_rules, out_rules = (
[
[
rulemap[t] for t in r
] for r in rs
] for rs in (inset, outset))
_driver.update_access(self.fake_context, self.fake_share, *in_rules)
_layout._share_manager.assert_called_once_with(self.fake_share)
_driver._update_access_via_manager.assert_called_once_with(
gluster_mgr, self.fake_context, self.fake_share,
*out_rules, recovery=recovery)
def test_update_access_via_manager(self):
self.assertRaises(NotImplementedError,
self._driver._update_access_via_manager,
mock.Mock(), self.fake_context, self.fake_share,
[self.fake_access], [self.fake_access])
@ddt.data('NFS', 'PROTATO')
def test_check_proto_baseclass(self, proto):
self.assertRaises(exception.ShareBackendException,
layout.GlusterfsShareDriverBase._check_proto,
{'share_proto': proto})
def test_check_proto(self):
GlusterfsFakeShareDriver._check_proto({'share_proto': 'NFS'})
def test_check_proto_notsupported(self):
self.assertRaises(exception.ShareBackendException,
GlusterfsFakeShareDriver._check_proto,
{'share_proto': 'PROTATO'})
@ddt.data('', '_from_snapshot')
def test_create_share(self, variant):
conf, _layout = self._setup()
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
self.mock_object(_driver, '_check_proto', mock.Mock())
getattr(_driver, 'create_share%s' % variant)(self.fake_context,
self.fake_share)
_driver._check_proto.assert_called_once_with(self.fake_share)
getattr(_layout,
'create_share%s' % variant).assert_called_once_with(
self.fake_context, self.fake_share)
@ddt.data(True, False)
def test_update_share_stats(self, internal_exception):
data = mock.Mock()
conf, _layout = self._setup()
def raise_exception(*args, **kwargs):
raise NotImplementedError
layoutstats = mock.Mock()
mock_kw = ({'side_effect': raise_exception} if internal_exception
else {'return_value': layoutstats})
self.mock_object(_layout, '_update_share_stats', mock.Mock(**mock_kw))
self.mock_object(driver.ShareDriver, '_update_share_stats',
mock.Mock())
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
_driver._update_share_stats(data)
if internal_exception:
self.assertFalse(data.update.called)
else:
data.update.assert_called_once_with(layoutstats)
driver.ShareDriver._update_share_stats.assert_called_once_with(
data)
@ddt.data('do_setup', 'create_snapshot', 'delete_share', 'delete_snapshot',
'ensure_share', 'manage_existing', 'unmanage', 'extend_share',
'shrink_share')
def test_delegated_methods(self, method):
conf, _layout = self._setup()
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
fake_args = (mock.Mock(), mock.Mock(), mock.Mock())
getattr(_driver, method)(*fake_args)
getattr(_layout, method).assert_called_once_with(*fake_args)
@ddt.ddt
class GlusterfsShareLayoutBaseTestCase(test.TestCase):
"""Tests GlusterfsShareLayoutBaseTestCase."""
def setUp(self):
super(GlusterfsShareLayoutBaseTestCase, self).setUp()
fake_utils.stub_out_utils_execute(self)
self._execute = fake_utils.fake_execute
self.addCleanup(fake_utils.fake_execute_set_repliers, [])
self.addCleanup(fake_utils.fake_execute_clear_log)
self.fake_driver = mock.Mock()
self.mock_object(self.fake_driver, '_execute',
self._execute)
class FakeLayout(layout.GlusterfsShareLayoutBase):
def _share_manager(self, share):
"""Return GlusterManager object representing share's backend."""
def do_setup(self, context):
"""Any initialization the share driver does while starting."""
def create_share(self, context, share, share_server=None):
"""Is called to create share."""
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create share from snapshot."""
def create_snapshot(self, context, snapshot, share_server=None):
"""Is called to create snapshot."""
def delete_share(self, context, share, share_server=None):
"""Is called to remove share."""
def delete_snapshot(self, context, snapshot, share_server=None):
"""Is called to remove snapshot."""
def ensure_share(self, context, share, share_server=None):
"""Invoked to ensure that share is exported."""
def manage_existing(self, share, driver_options):
"""Brings an existing share under Manila management."""
def unmanage(self, share):
"""Removes the specified share from Manila management."""
def extend_share(self, share, new_size, share_server=None):
"""Extends size of existing share."""
def shrink_share(self, share, new_size, share_server=None):
"""Shrinks size of existing share."""
def test_init_invalid(self):
self.assertRaises(TypeError, layout.GlusterfsShareLayoutBase,
mock.Mock())
def test_subclass(self):
fake_conf = mock.Mock()
_layout = self.FakeLayout(self.fake_driver, configuration=fake_conf)
self.assertEqual(fake_conf, _layout.configuration)
self.assertRaises(NotImplementedError, _layout._update_share_stats)
def test_check_mount_glusterfs(self):
fake_conf = mock.Mock()
_driver = mock.Mock()
_driver._execute = mock.Mock()
_layout = self.FakeLayout(_driver, configuration=fake_conf)
_layout._check_mount_glusterfs()
_driver._execute.assert_called_once_with(
'mount.glusterfs',
check_exit_code=False)
@ddt.data({'_errno': errno.ENOENT,
'_exception': exception.GlusterfsException},
{'_errno': errno.EACCES, '_exception': OSError})
@ddt.unpack
def test_check_mount_glusterfs_not_installed(self, _errno, _exception):
fake_conf = mock.Mock()
_layout = self.FakeLayout(self.fake_driver, configuration=fake_conf)
def exec_runner(*ignore_args, **ignore_kwargs):
raise OSError(_errno, os.strerror(_errno))
expected_exec = ['mount.glusterfs']
fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)])
self.assertRaises(_exception, _layout._check_mount_glusterfs)
| |
from __future__ import absolute_import
from pony.py23compat import buffer, int_types
import os
os.environ["NLS_LANG"] = "AMERICAN_AMERICA.UTF8"
import re
from datetime import datetime, date, time, timedelta
from decimal import Decimal
from uuid import UUID
import cx_Oracle
from pony.orm import core, dbapiprovider, sqltranslation
from pony.orm.core import log_orm, log_sql, DatabaseError, TranslationError
from pony.orm.dbschema import DBSchema, DBObject, Table, Column
from pony.orm.ormtypes import Json
from pony.orm.sqlbuilding import SQLBuilder
from pony.orm.dbapiprovider import DBAPIProvider, wrap_dbapi_exceptions, get_version_tuple
from pony.utils import throw, is_ident
NoneType = type(None)
class OraTable(Table):
def get_objects_to_create(table, created_tables=None):
result = Table.get_objects_to_create(table, created_tables)
for column in table.column_list:
if column.is_pk == 'auto':
sequence_name = column.converter.attr.kwargs.get('sequence_name')
sequence = OraSequence(table, sequence_name)
trigger = OraTrigger(table, column, sequence)
result.extend((sequence, trigger))
break
return result
class OraSequence(DBObject):
typename = 'Sequence'
def __init__(sequence, table, name=None):
sequence.table = table
table_name = table.name
if name is not None: sequence.name = name
elif isinstance(table_name, str): sequence.name = table_name + '_SEQ'
else: sequence.name = tuple(table_name[:-1]) + (table_name[0] + '_SEQ',)
def exists(sequence, provider, connection, case_sensitive=True):
if case_sensitive: sql = 'SELECT sequence_name FROM all_sequences ' \
'WHERE sequence_owner = :so and sequence_name = :sn'
else: sql = 'SELECT sequence_name FROM all_sequences ' \
'WHERE sequence_owner = :so and upper(sequence_name) = upper(:sn)'
owner_name, sequence_name = provider.split_table_name(sequence.name)
cursor = connection.cursor()
cursor.execute(sql, dict(so=owner_name, sn=sequence_name))
row = cursor.fetchone()
return row[0] if row is not None else None
def get_create_command(sequence):
schema = sequence.table.schema
seq_name = schema.provider.quote_name(sequence.name)
return schema.case('CREATE SEQUENCE %s NOCACHE') % seq_name
trigger_template = """
CREATE TRIGGER %s
BEFORE INSERT ON %s
FOR EACH ROW
BEGIN
IF :new.%s IS NULL THEN
SELECT %s.nextval INTO :new.%s FROM DUAL;
END IF;
END;""".strip()
class OraTrigger(DBObject):
typename = 'Trigger'
def __init__(trigger, table, column, sequence):
trigger.table = table
trigger.column = column
trigger.sequence = sequence
table_name = table.name
if not isinstance(table_name, str): table_name = table_name[-1]
trigger.name = table_name + '_BI' # Before Insert
def exists(trigger, provider, connection, case_sensitive=True):
if case_sensitive: sql = 'SELECT trigger_name FROM all_triggers ' \
'WHERE table_name = :tbn AND table_owner = :o ' \
'AND trigger_name = :trn AND owner = :o'
else: sql = 'SELECT trigger_name FROM all_triggers ' \
'WHERE table_name = :tbn AND table_owner = :o ' \
'AND upper(trigger_name) = upper(:trn) AND owner = :o'
owner_name, table_name = provider.split_table_name(trigger.table.name)
cursor = connection.cursor()
cursor.execute(sql, dict(tbn=table_name, trn=trigger.name, o=owner_name))
row = cursor.fetchone()
return row[0] if row is not None else None
def get_create_command(trigger):
schema = trigger.table.schema
quote_name = schema.provider.quote_name
trigger_name = quote_name(trigger.name)
table_name = quote_name(trigger.table.name)
column_name = quote_name(trigger.column.name)
seq_name = quote_name(trigger.sequence.name)
return schema.case(trigger_template) % (trigger_name, table_name, column_name, seq_name, column_name)
class OraColumn(Column):
auto_template = None
class OraSchema(DBSchema):
dialect = 'Oracle'
table_class = OraTable
column_class = OraColumn
class OraNoneMonad(sqltranslation.NoneMonad):
def __init__(monad, value=None):
assert value in (None, '')
sqltranslation.ConstMonad.__init__(monad, None)
class OraConstMonad(sqltranslation.ConstMonad):
@staticmethod
def new(value):
if value == '': value = None
return sqltranslation.ConstMonad.new(value)
class OraTranslator(sqltranslation.SQLTranslator):
dialect = 'Oracle'
rowid_support = True
json_path_wildcard_syntax = True
json_values_are_comparable = False
NoneMonad = OraNoneMonad
ConstMonad = OraConstMonad
class OraBuilder(SQLBuilder):
dialect = 'Oracle'
def INSERT(builder, table_name, columns, values, returning=None):
result = SQLBuilder.INSERT(builder, table_name, columns, values)
if returning is not None:
result.extend((' RETURNING ', builder.quote_name(returning), ' INTO :new_id'))
return result
def SELECT_FOR_UPDATE(builder, nowait, skip_locked, *sections):
assert not builder.indent
nowait = ' NOWAIT' if nowait else ''
skip_locked = ' SKIP LOCKED' if skip_locked else ''
last_section = sections[-1]
if last_section[0] != 'LIMIT':
return builder.SELECT(*sections), 'FOR UPDATE', nowait, skip_locked, '\n'
from_section = sections[1]
assert from_section[0] == 'FROM'
if len(from_section) > 2: throw(NotImplementedError,
'Table joins are not supported for Oracle queries which have both FOR UPDATE and ROWNUM')
order_by_section = None
for section in sections:
if section[0] == 'ORDER_BY': order_by_section = section
table_ast = from_section[1]
assert len(table_ast) == 3 and table_ast[1] == 'TABLE'
table_alias = table_ast[0]
rowid = [ 'COLUMN', table_alias, 'ROWID' ]
sql_ast = [ 'SELECT', sections[0], [ 'FROM', table_ast ], [ 'WHERE', [ 'IN', rowid,
('SELECT', [ 'ROWID', ['AS', rowid, 'row-id' ] ]) + sections[1:] ] ] ]
if order_by_section: sql_ast.append(order_by_section)
result = builder(sql_ast)
return result, 'FOR UPDATE', nowait, skip_locked, '\n'
def SELECT(builder, *sections):
prev_suppress_aliases = builder.suppress_aliases
builder.suppress_aliases = False
try:
last_section = sections[-1]
limit = offset = None
if last_section[0] == 'LIMIT':
limit = last_section[1]
if len(last_section) > 2: offset = last_section[2]
sections = sections[:-1]
result = builder._subquery(*sections)
indent = builder.indent_spaces * builder.indent
if sections[0][0] == 'ROWID':
indent0 = builder.indent_spaces
x = 't."row-id"'
else:
indent0 = ''
x = 't.*'
if not limit and not offset:
pass
elif not offset:
result = [ indent0, 'SELECT * FROM (\n' ]
builder.indent += 1
result.extend(builder._subquery(*sections))
builder.indent -= 1
result.extend((indent, ') WHERE ROWNUM <= %d\n' % limit))
else:
indent2 = indent + builder.indent_spaces
result = [ indent0, 'SELECT %s FROM (\n' % x, indent2, 'SELECT t.*, ROWNUM "row-num" FROM (\n' ]
builder.indent += 2
result.extend(builder._subquery(*sections))
builder.indent -= 2
if limit is None:
result.append('%s) t\n' % indent2)
result.append('%s) t WHERE "row-num" > %d\n' % (indent, offset))
else:
result.append('%s) t WHERE ROWNUM <= %d\n' % (indent2, limit + offset))
result.append('%s) t WHERE "row-num" > %d\n' % (indent, offset))
if builder.indent:
indent = builder.indent_spaces * builder.indent
return '(\n', result, indent + ')'
return result
finally:
builder.suppress_aliases = prev_suppress_aliases
def ROWID(builder, *expr_list):
return builder.ALL(*expr_list)
def LIMIT(builder, limit, offset=None):
assert False # pragma: no cover
def TO_REAL(builder, expr):
return 'CAST(', builder(expr), ' AS NUMBER)'
def TO_STR(builder, expr):
return 'TO_CHAR(', builder(expr), ')'
def DATE(builder, expr):
return 'TRUNC(', builder(expr), ')'
def RANDOM(builder):
return 'dbms_random.value'
def MOD(builder, a, b):
return 'MOD(', builder(a), ', ', builder(b), ')'
def DATE_ADD(builder, expr, delta):
return '(', builder(expr), ' + ', builder(delta), ')'
def DATE_SUB(builder, expr, delta):
return '(', builder(expr), ' - ', builder(delta), ')'
def DATE_DIFF(builder, expr1, expr2):
return builder(expr1), ' - ', builder(expr2)
def DATETIME_ADD(builder, expr, delta):
return '(', builder(expr), ' + ', builder(delta), ')'
def DATETIME_SUB(builder, expr, delta):
return '(', builder(expr), ' - ', builder(delta), ')'
def DATETIME_DIFF(builder, expr1, expr2):
return builder(expr1), ' - ', builder(expr2)
def build_json_path(builder, path):
path_sql, has_params, has_wildcards = SQLBuilder.build_json_path(builder, path)
if has_params: throw(TranslationError, "Oracle doesn't allow parameters in JSON paths")
return path_sql, has_params, has_wildcards
def JSON_QUERY(builder, expr, path):
expr_sql = builder(expr)
path_sql, has_params, has_wildcards = builder.build_json_path(path)
if has_wildcards: return 'JSON_QUERY(', expr_sql, ', ', path_sql, ' WITH WRAPPER)'
return 'REGEXP_REPLACE(JSON_QUERY(', expr_sql, ', ', path_sql, " WITH WRAPPER), '(^\\[|\\]$)', '')"
json_value_type_mapping = {bool: 'NUMBER', int: 'NUMBER', float: 'NUMBER'}
def JSON_VALUE(builder, expr, path, type):
if type is Json: return builder.JSON_QUERY(expr, path)
path_sql, has_params, has_wildcards = builder.build_json_path(path)
type_name = builder.json_value_type_mapping.get(type, 'VARCHAR2')
return 'JSON_VALUE(', builder(expr), ', ', path_sql, ' RETURNING ', type_name, ')'
def JSON_NONZERO(builder, expr):
return 'COALESCE(', builder(expr), ''', 'null') NOT IN ('null', 'false', '0', '""', '[]', '{}')'''
def JSON_CONTAINS(builder, expr, path, key):
assert key[0] == 'VALUE' and isinstance(key[1], str)
path_sql, has_params, has_wildcards = builder.build_json_path(path)
path_with_key_sql, _, _ = builder.build_json_path(path + [ key ])
expr_sql = builder(expr)
result = 'JSON_EXISTS(', expr_sql, ', ', path_with_key_sql, ')'
if json_item_re.match(key[1]):
item = r'"([^"]|\\")*"'
list_start = r'\[\s*(%s\s*,\s*)*' % item
list_end = r'\s*(,\s*%s\s*)*\]' % item
pattern = r'%s"%s"%s' % (list_start, key[1], list_end)
if has_wildcards:
sublist = r'\[[^]]*\]'
item_or_sublist = '(%s|%s)' % (item, sublist)
wrapper_list_start = r'^\[\s*(%s\s*,\s*)*' % item_or_sublist
wrapper_list_end = r'\s*(,\s*%s\s*)*\]$' % item_or_sublist
pattern = r'%s%s%s' % (wrapper_list_start, pattern, wrapper_list_end)
result += ' OR REGEXP_LIKE(JSON_QUERY(', expr_sql, ', ', path_sql, " WITH WRAPPER), '%s')" % pattern
else:
pattern = '^%s$' % pattern
result += ' OR REGEXP_LIKE(JSON_QUERY(', expr_sql, ', ', path_sql, "), '%s')" % pattern
return result
def JSON_ARRAY_LENGTH(builder, value):
throw(TranslationError, 'Oracle does not provide `length` function for JSON arrays')
def GROUP_CONCAT(builder, distinct, expr, sep=None):
assert distinct in (None, True, False)
if distinct and builder.provider.server_version >= (19,):
distinct = 'DISTINCT '
else:
distinct = ''
result = 'LISTAGG(', distinct, builder(expr)
if sep is not None:
result = result, ', ', builder(sep)
else:
result = result, ", ','"
return result, ') WITHIN GROUP(ORDER BY 1)'
json_item_re = re.compile('[\w\s]*')
class OraBoolConverter(dbapiprovider.BoolConverter):
def py2sql(converter, val):
# Fixes cx_Oracle 5.1.3 Python 3 bug:
# "DatabaseError: OCI-22062: invalid input string [True]"
return int(val)
def sql2py(converter, val):
return bool(val) # TODO: True/False, T/F, Y/N, Yes/No, etc.
def sql_type(converter):
return "NUMBER(1)"
class OraStrConverter(dbapiprovider.StrConverter):
def validate(converter, val, obj=None):
if val == '': return None
return dbapiprovider.StrConverter.validate(converter, val)
def sql2py(converter, val):
if isinstance(val, cx_Oracle.LOB):
val = val.read()
return val
def sql_type(converter):
# TODO: Add support for NVARCHAR2 and NCLOB datatypes
if converter.max_len:
return 'VARCHAR2(%d CHAR)' % converter.max_len
return 'CLOB'
class OraIntConverter(dbapiprovider.IntConverter):
signed_types = {None: 'NUMBER(38)', 8: 'NUMBER(3)', 16: 'NUMBER(5)', 24: 'NUMBER(7)', 32: 'NUMBER(10)', 64: 'NUMBER(19)'}
unsigned_types = {None: 'NUMBER(38)', 8: 'NUMBER(3)', 16: 'NUMBER(5)', 24: 'NUMBER(8)', 32: 'NUMBER(10)', 64: 'NUMBER(20)'}
def init(self, kwargs):
dbapiprovider.IntConverter.init(self, kwargs)
sequence_name = kwargs.pop('sequence_name', None)
if sequence_name is not None and not (self.attr.auto and self.attr.is_pk):
throw(TypeError, "Parameter 'sequence_name' can be used only for PrimaryKey attributes with auto=True")
class OraRealConverter(dbapiprovider.RealConverter):
# Note that Oracle has differnet representation of float numbers
def sql_type(converter):
return 'NUMBER'
class OraDecimalConverter(dbapiprovider.DecimalConverter):
def sql_type(converter):
return 'NUMBER(%d, %d)' % (converter.precision, converter.scale)
class OraBlobConverter(dbapiprovider.BlobConverter):
def sql2py(converter, val):
return buffer(val.read())
class OraDateConverter(dbapiprovider.DateConverter):
def sql2py(converter, val):
if isinstance(val, datetime): return val.date()
if not isinstance(val, date): throw(ValueError,
'Value of unexpected type received from database: instead of date got %s', type(val))
return val
class OraTimeConverter(dbapiprovider.TimeConverter):
sql_type_name = 'INTERVAL DAY(0) TO SECOND'
def __init__(converter, provider, py_type, attr=None):
dbapiprovider.TimeConverter.__init__(converter, provider, py_type, attr)
if attr is not None and converter.precision > 0:
# cx_Oracle 5.1.3 corrupts microseconds for values of DAY TO SECOND type
converter.precision = 0
def sql2py(converter, val):
if isinstance(val, timedelta):
total_seconds = val.days * (24 * 60 * 60) + val.seconds
if 0 <= total_seconds <= 24 * 60 * 60:
minutes, seconds = divmod(total_seconds, 60)
hours, minutes = divmod(minutes, 60)
return time(hours, minutes, seconds, val.microseconds)
elif not isinstance(val, time): throw(ValueError,
'Value of unexpected type received from database%s: instead of time or timedelta got %s'
% ('for attribute %s' % converter.attr if converter.attr else '', type(val)))
return val
def py2sql(converter, val):
return timedelta(hours=val.hour, minutes=val.minute, seconds=val.second, microseconds=val.microsecond)
class OraTimedeltaConverter(dbapiprovider.TimedeltaConverter):
sql_type_name = 'INTERVAL DAY TO SECOND'
def __init__(converter, provider, py_type, attr=None):
dbapiprovider.TimedeltaConverter.__init__(converter, provider, py_type, attr)
if attr is not None and converter.precision > 0:
# cx_Oracle 5.1.3 corrupts microseconds for values of DAY TO SECOND type
converter.precision = 0
class OraDatetimeConverter(dbapiprovider.DatetimeConverter):
sql_type_name = 'TIMESTAMP'
class OraUuidConverter(dbapiprovider.UuidConverter):
def sql_type(converter):
return 'RAW(16)'
class OraJsonConverter(dbapiprovider.JsonConverter):
json_kwargs = {'separators': (',', ':'), 'sort_keys': True, 'ensure_ascii': False}
optimistic = False # CLOBs cannot be compared with strings, and TO_CHAR(CLOB) returns first 4000 chars only
def sql2py(converter, dbval):
if hasattr(dbval, 'read'): dbval = dbval.read()
return dbapiprovider.JsonConverter.sql2py(converter, dbval)
def sql_type(converter):
return 'CLOB'
class OraProvider(DBAPIProvider):
dialect = 'Oracle'
paramstyle = 'named'
max_name_len = 30
table_if_not_exists_syntax = False
index_if_not_exists_syntax = False
varchar_default_max_len = 1000
uint64_support = True
dbapi_module = cx_Oracle
dbschema_cls = OraSchema
translator_cls = OraTranslator
sqlbuilder_cls = OraBuilder
name_before_table = 'owner'
converter_classes = [
(NoneType, dbapiprovider.NoneConverter),
(bool, OraBoolConverter),
(str, OraStrConverter),
(int_types, OraIntConverter),
(float, OraRealConverter),
(Decimal, OraDecimalConverter),
(datetime, OraDatetimeConverter),
(date, OraDateConverter),
(time, OraTimeConverter),
(timedelta, OraTimedeltaConverter),
(UUID, OraUuidConverter),
(buffer, OraBlobConverter),
(Json, OraJsonConverter),
]
@wrap_dbapi_exceptions
def inspect_connection(provider, connection):
cursor = connection.cursor()
cursor.execute('SELECT version FROM product_component_version '
"WHERE product LIKE 'Oracle Database %'")
provider.server_version = get_version_tuple(cursor.fetchone()[0])
cursor.execute("SELECT sys_context( 'userenv', 'current_schema' ) FROM DUAL")
provider.default_schema_name = cursor.fetchone()[0]
def should_reconnect(provider, exc):
reconnect_error_codes = (
3113, # ORA-03113: end-of-file on communication channel
3114, # ORA-03114: not connected to ORACLE
)
return isinstance(exc, cx_Oracle.OperationalError) \
and exc.args[0].code in reconnect_error_codes
def normalize_name(provider, name):
return name[:provider.max_name_len].upper()
def normalize_vars(provider, vars, vartypes):
DBAPIProvider.normalize_vars(provider, vars, vartypes)
for key, value in vars.items():
if value == '':
vars[key] = None
vartypes[key] = NoneType
@wrap_dbapi_exceptions
def set_transaction_mode(provider, connection, cache):
assert not cache.in_transaction
db_session = cache.db_session
if db_session is not None and db_session.serializable:
cursor = connection.cursor()
sql = 'SET TRANSACTION ISOLATION LEVEL SERIALIZABLE'
if core.local.debug: log_orm(sql)
cursor.execute(sql)
cache.immediate = True
if db_session is not None and (db_session.serializable or db_session.ddl):
cache.in_transaction = True
@wrap_dbapi_exceptions
def execute(provider, cursor, sql, arguments=None, returning_id=False):
if type(arguments) is list:
assert arguments and not returning_id
set_input_sizes(cursor, arguments[0])
cursor.executemany(sql, arguments)
else:
if arguments is not None: set_input_sizes(cursor, arguments)
if returning_id:
var = cursor.var(cx_Oracle.STRING, 40, cursor.arraysize, outconverter=int)
arguments['new_id'] = var
if arguments is None: cursor.execute(sql)
else: cursor.execute(sql, arguments)
value = var.getvalue()
if isinstance(value, list):
assert len(value) == 1
value = value[0]
return value
if arguments is None: cursor.execute(sql)
else: cursor.execute(sql, arguments)
def get_pool(provider, *args, **kwargs):
user = password = dsn = None
if len(args) == 1:
conn_str = args[0]
if '/' in conn_str:
user, tail = conn_str.split('/', 1)
if '@' in tail: password, dsn = tail.split('@', 1)
if None in (user, password, dsn): throw(ValueError,
"Incorrect connection string (must be in form of 'user/password@dsn')")
elif len(args) == 2: user, password = args
elif len(args) == 3: user, password, dsn = args
elif args: throw(ValueError, 'Invalid number of positional arguments')
def setdefault(kwargs, key, value):
kwargs_value = kwargs.setdefault(key, value)
if value is not None and value != kwargs_value:
throw(ValueError, 'Ambiguous value for ' + key)
setdefault(kwargs, 'user', user)
setdefault(kwargs, 'password', password)
setdefault(kwargs, 'dsn', dsn)
kwargs.setdefault('threaded', True)
kwargs.setdefault('min', 1)
kwargs.setdefault('max', 10)
kwargs.setdefault('increment', 1)
return OraPool(**kwargs)
def table_exists(provider, connection, table_name, case_sensitive=True):
owner_name, table_name = provider.split_table_name(table_name)
cursor = connection.cursor()
if case_sensitive: sql = 'SELECT table_name FROM all_tables WHERE owner = :o AND table_name = :tn'
else: sql = 'SELECT table_name FROM all_tables WHERE owner = :o AND upper(table_name) = upper(:tn)'
cursor.execute(sql, dict(o=owner_name, tn=table_name))
row = cursor.fetchone()
return row[0] if row is not None else None
def index_exists(provider, connection, table_name, index_name, case_sensitive=True):
owner_name, table_name = provider.split_table_name(table_name)
if not isinstance(index_name, str): throw(NotImplementedError)
if case_sensitive: sql = 'SELECT index_name FROM all_indexes WHERE owner = :o ' \
'AND index_name = :i AND table_owner = :o AND table_name = :t'
else: sql = 'SELECT index_name FROM all_indexes WHERE owner = :o ' \
'AND upper(index_name) = upper(:i) AND table_owner = :o AND table_name = :t'
cursor = connection.cursor()
cursor.execute(sql, dict(o=owner_name, i=index_name, t=table_name))
row = cursor.fetchone()
return row[0] if row is not None else None
def fk_exists(provider, connection, table_name, fk_name, case_sensitive=True):
owner_name, table_name = provider.split_table_name(table_name)
if not isinstance(fk_name, str): throw(NotImplementedError)
if case_sensitive:
sql = "SELECT constraint_name FROM user_constraints WHERE constraint_type = 'R' " \
'AND table_name = :tn AND constraint_name = :cn AND owner = :o'
else: sql = "SELECT constraint_name FROM user_constraints WHERE constraint_type = 'R' " \
'AND table_name = :tn AND upper(constraint_name) = upper(:cn) AND owner = :o'
cursor = connection.cursor()
cursor.execute(sql, dict(tn=table_name, cn=fk_name, o=owner_name))
row = cursor.fetchone()
return row[0] if row is not None else None
def table_has_data(provider, connection, table_name):
cursor = connection.cursor()
cursor.execute('SELECT 1 FROM %s WHERE ROWNUM = 1' % provider.quote_name(table_name))
return cursor.fetchone() is not None
def drop_table(provider, connection, table_name):
cursor = connection.cursor()
sql = 'DROP TABLE %s CASCADE CONSTRAINTS' % provider.quote_name(table_name)
cursor.execute(sql)
provider_cls = OraProvider
def to_int_or_decimal(val):
val = val.replace(',', '.')
if '.' in val: return Decimal(val)
return int(val)
def to_decimal(val):
return Decimal(val.replace(',', '.'))
def output_type_handler(cursor, name, defaultType, size, precision, scale):
if defaultType == cx_Oracle.NUMBER:
if scale == 0:
if precision: return cursor.var(cx_Oracle.STRING, 40, cursor.arraysize, outconverter=int)
return cursor.var(cx_Oracle.STRING, 40, cursor.arraysize, outconverter=to_int_or_decimal)
if scale != -127:
return cursor.var(cx_Oracle.STRING, 100, cursor.arraysize, outconverter=to_decimal)
elif defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(str, size, cursor.arraysize) # from cx_Oracle example
return None
class OraPool(object):
forked_pools = []
def __init__(pool, **kwargs):
pool.kwargs = kwargs
pool.cx_pool = cx_Oracle.SessionPool(**kwargs)
pool.pid = os.getpid()
def connect(pool):
pid = os.getpid()
if pool.pid != pid:
pool.forked_pools.append((pool.cx_pool, pool.pid))
pool.cx_pool = cx_Oracle.SessionPool(**pool.kwargs)
pool.pid = os.getpid()
if core.local.debug: log_orm('GET CONNECTION')
con = pool.cx_pool.acquire()
con.outputtypehandler = output_type_handler
return con, True
def release(pool, con):
pool.cx_pool.release(con)
def drop(pool, con):
pool.cx_pool.drop(con)
def disconnect(pool):
pass
def get_inputsize(arg):
if isinstance(arg, datetime):
return cx_Oracle.TIMESTAMP
return None
def set_input_sizes(cursor, arguments):
if type(arguments) is dict:
input_sizes = {}
for name, arg in arguments.items():
size = get_inputsize(arg)
if size is not None: input_sizes[name] = size
cursor.setinputsizes(**input_sizes)
elif type(arguments) is tuple:
input_sizes = map(get_inputsize, arguments)
cursor.setinputsizes(*input_sizes)
else: assert False, type(arguments) # pragma: no cover
| |
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import numpy as np
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils.extmath import logsumexp, pinvh
from .. import cluster
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covars : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
from scipy import linalg
U, s, V = linalg.svd(covar)
sqrtS = np.diag(np.sqrt(s))
sqrt_covar = np.dot(U, np.dot(sqrtS, V))
rand = np.dot(sqrt_covar, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
`weights_` : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`covars_` : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Ininite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def eval(self, X):
"""Evaluate the model on data
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob: array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('the shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(
X, self.means_, self.covars_, self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.eval(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.eval(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.eval(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in xrange(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
## initialization step
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_ = False
for i in xrange(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.eval(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < \
self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if log_likelihood[-1] > max_log_prob:
max_log_prob = log_likelihood[-1]
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
# self.n_iter == 0 occurs when using GMM within HMM
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (- 2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
from scipy import linalg
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
from scipy import linalg
import itertools
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(itertools.izip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + \
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape"
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in xrange(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
n_features = X.shape[1]
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
return (avg_X2 - avg_means2 + min_covar * np.eye(n_features)) / X.shape[0]
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| |
# Copyright (c) 2013, Albert Zeyer, www.az2000.de
# All rights reserved.
# Code under 2-clause BSD licence.
import dis
import types
def _modified_jumps(codestr, jumprel=None, jumpaddrmap=None, start=None, end=None):
if jumprel is not None:
assert jumpaddrmap is None, "specify only one of jumprel and jumpaddrmap"
jumpaddrmap = lambda addr, op, n: n + jumprel
assert jumpaddrmap is not None, "one of jumprel and jumpaddrmap must be specified"
codestr = bytearray(codestr)
if start is None: start = 0
if end is None: end = len(codestr)
i = start
while i < end:
op = codestr[i]
i += 1
if op >= dis.HAVE_ARGUMENT:
b1 = codestr[i]
b2 = codestr[i+1]
num = b2 * 256 + b1
del b1,b2
i += 2
else:
num = 0
if op in dis.hasjabs:
assert op >= dis.HAVE_ARGUMENT
num = jumpaddrmap(i, op, num)
codestr[i-2] = chr(num & 255)
codestr[i-1] = chr(num >> 8)
if op in dis.hasjrel and jumprel is None:
assert op >= dis.HAVE_ARGUMENT
num += i # because it is a relative jump
num = jumpaddrmap(i, op, num) # map
num -= i # convert back to relative jump
codestr[i-2] = chr(num & 255)
codestr[i-1] = chr(num >> 8)
return str(codestr)
def _codestr_without_final_return(codestr):
assert len(codestr) >= 4
assert codestr[-4] == dis.opmap["LOAD_CONST"]
assert codestr[-1] == dis.opmap["RETURN_VALUE"]
return codestr[:-4]
def _prefix_codestr(codestr1, codestr2):
# see dis.findlinestarts() about co_firstlineno and co_lnotab
codestr = codestr1 + codestr2
codestr = _modified_jumps(codestr, start=len(codestr1), end=len(codestr), jumprel=len(codestr1))
return codestr
def _modified_code(c, **kwargs):
CodeArgs = [
"argcount", "nlocals", "stacksize", "flags", "code",
"consts", "names", "varnames", "filename", "name",
"firstlineno", "lnotab", "freevars", "cellvars"]
c_dict = dict([(arg, getattr(c, "co_" + arg)) for arg in CodeArgs])
for key,value in kwargs.items():
assert key in c_dict
c_dict[key] = value
c = types.CodeType(*[c_dict[arg] for arg in CodeArgs])
return c
def _find_setup_blocks(codestr, start, end):
"Yields (op, absolute target instraddr, loop-type)"
i = start
SETUPS = [dis.opmap[opname] for opname in ["SETUP_LOOP", "SETUP_EXCEPT", "SETUP_FINALLY"]]
POP_BLOCK = dis.opmap["POP_BLOCK"]
FOR_ITER = dis.opmap["FOR_ITER"]
blockstack = []
while i < end:
op = ord(codestr[i])
i += 1
if op >= dis.HAVE_ARGUMENT:
b1 = ord(codestr[i])
b2 = ord(codestr[i+1])
num = b2 * 256 + b1
del b1,b2
i += 2
else:
num = 0
if op in SETUPS:
assert op >= dis.HAVE_ARGUMENT
blockstack += [[op, i + num, None]]
elif op == FOR_ITER:
assert len(blockstack) > 0
blockstack[-1][2] = op
elif op == POP_BLOCK:
assert len(blockstack) > 0
blockstack.pop(len(blockstack) - 1)
return blockstack
def replace_code(codeobj, instaddr, removelen=0, addcodestr=""):
assert isinstance(codeobj, types.CodeType)
assert removelen >= 0
if removelen == 0 and len(addcodestr) == 0: return codeobj
# Search right place in lnotab.
lnotab = codeobj.co_lnotab
assert len(lnotab) % 2 == 0
lnotab_instaddr = 0
lnotab_idx = 0
lnotab_len = len(lnotab)
while lnotab_idx < lnotab_len:
if lnotab_instaddr >= instaddr: break
addrincr, lineincr = map(ord, lnotab[lnotab_idx:lnotab_idx+2])
lnotab_instaddr += addrincr
lnotab_idx += 2
assert lnotab_instaddr >= instaddr, "instaddr %i not in lnotab" % instaddr
# If we skipped it, insert a dummy entry to lnotab.
if lnotab_instaddr > instaddr:
# Insert. addrincr, lineincr are from the last entry.
lnotab = \
lnotab[:lnotab_idx-2] + \
chr(addrincr - (lnotab_instaddr - instaddr)) + chr(0) + \
chr(lnotab_instaddr - instaddr) + chr(lineincr) + \
lnotab[lnotab_idx:]
lnotab_instaddr = instaddr
del addrincr, lineincr
assert lnotab_instaddr == instaddr
# And lnotab_idx is right where the upcoming lnotab-data starts.
# Check whether instaddr is sane.
codestr = codeobj.co_code
codeidx = 0
codelen = len(codestr)
while codeidx < codelen:
if codeidx >= instaddr: break
op = ord(codestr[codeidx])
codeidx += 1
if op >= dis.HAVE_ARGUMENT: codeidx += 2
assert codeidx == instaddr, "instaddr %i doesn't align in code" % instaddr
# Check whether removelen is sane.
while codeidx < codelen:
if codeidx >= instaddr + removelen: break
op = ord(codestr[codeidx])
codeidx += 1
if op >= dis.HAVE_ARGUMENT: codeidx += 2
assert codeidx == instaddr + removelen, "removelen %i doesn't align in code" % removelen
# Update lnotab for removed code.
start_lnotab_idx = lnotab_idx
lineincrcount = 0
# Search for the end pos of the removed code.
while lnotab_idx < lnotab_len:
if lnotab_instaddr >= instaddr + removelen: break
addrincr, lineincr = map(ord, lnotab[lnotab_idx:lnotab_idx+2])
lnotab_instaddr += addrincr
lineincrcount += lineincr
lnotab_idx += 2
assert lnotab_instaddr >= instaddr + removelen, "lnotab is invalid"
# If we skipped it, insert a dummy entry to lnotab.
if lnotab_instaddr > instaddr + removelen:
# Insert. addrincr, lineincr are from the last entry.
lnotab = \
lnotab[:lnotab_idx-2] + \
chr(addrincr - (lnotab_instaddr - (instaddr + removelen))) + chr(lineincr) + \
chr(lnotab_instaddr - (instaddr + removelen)) + chr(0) + \
lnotab[lnotab_idx:]
lnotab_instaddr = instaddr + removelen
assert lnotab_instaddr == instaddr + removelen
# And lnotab_idx is right where the upcoming lnotab-data starts.
# Remove lnotab[start_lnotab_idx:lnotab] but add the lineincrcount.
if start_lnotab_idx < lnotab_idx:
lnotab = \
lnotab[:start_lnotab_idx] + \
lnotab[lnotab_idx:]
lnotab_idx = start_lnotab_idx
while lineincrcount > 0:
lnotab = \
lnotab[:start_lnotab_idx] + \
chr(0) + chr(lineincrcount & 255) + \
lnotab[start_lnotab_idx:]
lineincrcount -= lineincrcount & 255
# Update lnotab for new code.
codelendiff = len(addcodestr)
while codelendiff > 0:
lnotab = \
lnotab[:lnotab_idx] + \
chr(codelendiff & 255) + chr(0) + \
lnotab[lnotab_idx:]
codelendiff -= codelendiff & 255
lnotab_idx += 2
assert codelendiff == 0
del codelendiff
# Check whether addcodestr is sane.
codeidx = 0
codelen = len(addcodestr)
while codeidx < codelen:
op = ord(addcodestr[codeidx])
codeidx += 1
if op >= dis.HAVE_ARGUMENT: codeidx += 2
assert codeidx == codelen, "addcodestr is not sane. %r" % ((codeidx, codelen),)
# Update jumps in code start.
def codestr_start_jumpaddrmap(addr, op, n):
if n <= instaddr: return n
if n >= instaddr + removelen: return n - removelen + len(addcodestr)
assert False, "invalid jump %i in code" % n
codestr_start = _modified_jumps(
codestr[:instaddr],
jumpaddrmap=codestr_start_jumpaddrmap)
# Update jumps in code end.
def codestr_end_jumpaddrmap(addr, op, n):
if op in dis.hasjrel:
assert n > 0 # I think this is true?
return n # Never modify this!
if n <= instaddr: return n
if n >= instaddr + removelen: return n - removelen + len(addcodestr)
assert False, "invalid jump %i in code" % n
codestr_end = _modified_jumps(
codestr[instaddr+removelen:],
jumpaddrmap=codestr_end_jumpaddrmap)
# Update codestr.
codestr = codestr_start + addcodestr + codestr_end
# Return new code object.
new_code = _modified_code(
codeobj,
code=codestr,
lnotab=lnotab,
)
return new_code
def restart_func(func, instraddr, localdict):
"""
Returns a new modified version of `func` which jumps right to instraddr
with the localdict in place.
Note that the way this currently works is limited because it uses
only standard Python functions and objects. Only while-loops are supported.
For-loops are not supported and are not possible this way.
Except-blocks and with-blocks are not tested and will probably crash.
"""
# Another possibility to do this:
# We need ctypes. Then we just save the full stack and the current op.
# We can just load the full stack via LOAD_CONST.
# This again has some problems such as that we don't want to resume
# right at the op (because that might be the one raising an exception).
# And if you want to resume somewhere else, you again need to manually
# recalculate the stack so that it stays same.
# And yet another:
# We could also return a modified version of the func which automatically
# replaces all for-loops with while-loops and store the iterator object
# in a temporary local variable. However, still left is the problem that
# many iterator objects (e.g. listiterator) are not pickable, so
# serializing doesn't work. This again could be hacked via ctypes.
preload_code = ""
code_consts = func.func_code.co_consts
LOAD_CONST = chr(dis.opmap["LOAD_CONST"])
STORE_FAST = chr(dis.opmap["STORE_FAST"])
for key,value in localdict.items():
co_const_idx = len(code_consts)
code_consts += (value,)
preload_code += LOAD_CONST + chr(co_const_idx & 255) + chr(co_const_idx >> 8)
varidx = func.func_code.co_varnames.index(key)
preload_code += STORE_FAST + chr(varidx & 255) + chr(varidx >> 8)
setup_blocks = _find_setup_blocks(func.func_code.co_code, start=0, end=instraddr)
preload_code_len = len(localdict) * 6 + len(setup_blocks) * 3 + 3
for op,targetaddr,looptype in setup_blocks:
# Note on the loop-type:
# Supporting for-loops is really complicated! We need to examine the stack of
# the frame to get the iterator object. This cannot be done with the
# normal Python APIs - we need ctypes to get raw access to PyFrameObject.
# Then, even more complicated is to get the right stack address. You
# have to go back from the opaddr where you are to the last FOR_ITER and count
# the stack modifications.
# Then, if you raise an exception and catch it outside, you already have lost
# the stack and thus the iterator object - so this is not an option.
# You need to do this while it is still active - e.g. from within an
# exception trace function.
assert looptype is None, "only while-loops supported at the moment"
targetaddr += preload_code_len
reladdr = targetaddr - (len(preload_code) + 3)
preload_code += chr(op) + chr(reladdr & 255) + chr(reladdr >> 8)
instraddr += preload_code_len
preload_code += chr(dis.opmap["JUMP_ABSOLUTE"])
preload_code += chr(instraddr & 255) + chr(instraddr >> 8)
# Just a check. LoadConst+StoreFast, then .. and then JumpAbs.
assert preload_code_len == len(preload_code)
codestr = _prefix_codestr(preload_code, func.func_code.co_code)
lnotab = func.func_code.co_lnotab
lnotab_moverel = len(preload_code)
while lnotab_moverel > 0:
lnotab = chr(lnotab_moverel & 255) + chr(0) + lnotab
lnotab_moverel -= lnotab_moverel & 255
new_code = _modified_code(
func.func_code,
consts=code_consts,
code=codestr,
lnotab=lnotab,
argcount=0
)
new_func = types.FunctionType(
new_code,
func.func_globals,
func.func_name,
func.func_defaults,
func.func_closure,
)
return new_func
def _get_varnameprefix_startidx(varnames, varnameprefix, start=1, incr=1):
varnameprefix += "_"
relidx = len(varnameprefix)
varnames = [name[relidx:] for name in varnames if name.startswith(varnameprefix)]
def map_postfix(s):
try: return int(s)
except ValueError: return -1
varnames = map(map_postfix, varnames)
varnames.sort()
if len(varnames) == 0:
return start
return varnames[-1] + incr
def _opiter(codestr):
i = 0
codelen = len(codestr)
while i < codelen:
codeaddr = i
op = ord(codestr[i])
i += 1
if op >= dis.HAVE_ARGUMENT:
b1,b2 = map(ord, codestr[i:i+2])
arg = b2 * 256 + b1
del b1,b2
i += 2
else:
arg = None
yield (codeaddr, op, arg)
def _codeops_compile(codeops):
codestr = ""
for op,arg in codeops:
if isinstance(op, str):
op = dis.opmap[op]
codestr += chr(op)
if op >= dis.HAVE_ARGUMENT:
assert arg is not None
assert arg >= 0 and arg < 256 * 256
codestr += chr(arg & 255) + chr(arg >> 8)
else:
assert arg is None
return codestr
def _list_getobjoradd(consts, obj, equalop=lambda a,b: a is b):
for i in range(len(consts)):
if equalop(consts[i], obj):
return consts, i
return consts + (obj,), len(consts)
def simplify_loops(func):
"""
Returns a new modified version of `func` which behaves exactly the same but
which has all for-loops replaced with while-loops. This makes it compatible
for `restart_func`.
"""
codeobj = func.func_code
names = codeobj.co_names
names, names_next_idx = _list_getobjoradd(names, "next")
names, names_StopIter_idx = _list_getobjoradd(names, "StopIteration")
codeobj = _modified_code(
codeobj,
names=names,
stacksize=codeobj.co_stacksize + 3 # not sure how much more we need, but e.g. there is a SETUP_EXPECT
)
varnames = codeobj.co_varnames
nlocals = codeobj.co_nlocals
varidx = _get_varnameprefix_startidx(varnames, "__loopiter")
oplist = list(_opiter(codeobj.co_code))
codeaddrdiff = 0
for i in range(len(oplist)):
codeaddr, op, arg = oplist[i]
codeaddr += codeaddrdiff
if op == dis.opmap["FOR_ITER"]:
# Get a new unique variable name for the iterator object.
varnameidx = len(varnames)
varnames += ("__loopiter_%i" % varidx,)
varidx += 1
nlocals += 1
codeobj = _modified_code(
codeobj,
varnames=varnames,
nlocals=nlocals
)
# We expect that the loop jumps back to the FOR_ITER and thus expects
# to have one item (the iter) on the stack. We don't want that for
# simple code resuming (via `restart_func`) because it is not what we have
# in a simple `while` loop.
# Thus, right in front of the FOR_ITER, insert the STORE_FAST for the iter.
# The way `replace_code` works, we need to replace the previous op because
# we want that jumps to the FOR_ITER keeps pointing there.
assert i > 0
codestr = _codeops_compile([
(oplist[i-1][1], oplist[i-1][2]),
("STORE_FAST", varnameidx)
])
codeobj = replace_code(
codeobj,
instaddr=oplist[i-1][0] + codeaddrdiff, # at the last op
removelen=oplist[i][0] - oplist[i-1][0], # just the last op
addcodestr=codestr)
codeaddrdiff += 3 # the STORE_FAST
codeaddr += 3
forIterAddr = codeaddr
forIterAbsJumpTarget = codeaddr + 3 + arg
# We expect the next op to be STORE_FAST, where we store the result of `next(iter)`.
assert i < len(oplist) - 1 and oplist[i+1][1] == dis.opmap["STORE_FAST"]
nextvar_varnameidx = oplist[i+1][2]
# Now call `next()` on it and catch StopIteration.
# Note that all jump-constants here are carefully adjusted.
# If you change something here, probably all of them need to be updated!
codeops = [
("SETUP_EXCEPT", 16), # in case of exception, jump to DUP_TOP
("LOAD_GLOBAL", names_next_idx),
("LOAD_FAST", varnameidx),
("CALL_FUNCTION", 1),
("STORE_FAST", nextvar_varnameidx),
("POP_BLOCK", None),
("JUMP_FORWARD", 17), # jump outside of `try/except`, one after END_FINALLY
("DUP_TOP", None),
("LOAD_GLOBAL", names_StopIter_idx),
("COMPARE_OP", 10), # exception match
("POP_JUMP_IF_FALSE", 35 + forIterAddr), # jump to END_FINALLY
("POP_TOP", None),
("POP_TOP", None),
("POP_TOP", None),
("JUMP_ABSOLUTE", forIterAbsJumpTarget + 30), # the FOR_ITER target (adjusted with diff)
("END_FINALLY", None),
]
codestr = _codeops_compile(codeops)
removelen = 6 # FOR_ITER and STORE_FAST
codeaddrdiff += len(codestr) - removelen
codeobj = replace_code(codeobj, instaddr=codeaddr, removelen=removelen, addcodestr=codestr)
new_func = types.FunctionType(
codeobj,
func.func_globals,
func.func_name,
func.func_defaults,
func.func_closure,
)
return new_func
def add_debug_prints_after_stores(func):
"""
Returns a new modified version of `func` which prints
every value after a STORE_FAST.
"""
codeobj = func.func_code
consts = codeobj.co_consts
varnameindexes = [None] * len(codeobj.co_varnames)
for i in range(len(varnameindexes)):
consts, varnameindexes[i] = _list_getobjoradd(consts, codeobj.co_varnames[i], lambda a,b: a == b)
consts, equalstridx = _list_getobjoradd(consts, "=", lambda a,b: a == b)
codeobj = _modified_code(
codeobj,
consts=consts,
)
oplist = list(_opiter(codeobj.co_code))
codeaddrdiff = 0
for i in range(len(oplist)):
codeaddr, op, arg = oplist[i]
codeaddr += codeaddrdiff
if op == dis.opmap["STORE_FAST"]:
varidx = arg
assert 0 <= varidx < len(varnameindexes)
# Replace the STORE_FAST by the following code. All jumps to
# it stay intact while jumps to the next instr will also stay
# intact (and not become jumps to this code).
addcodestr = _codeops_compile([
(op, arg),
("LOAD_CONST", varnameindexes[varidx]),
("PRINT_ITEM", None),
("LOAD_CONST", equalstridx),
("PRINT_ITEM", None),
("LOAD_FAST", varidx), # load the same var
("PRINT_ITEM", None),
("PRINT_NEWLINE", None)
])
codeobj = replace_code(
codeobj,
instaddr=codeaddr,
removelen=3, # remove the STORE_FAST
addcodestr=addcodestr
)
codeaddrdiff += len(addcodestr) - 3
new_func = types.FunctionType(
codeobj,
func.func_globals,
func.func_name,
func.func_defaults,
func.func_closure,
)
return new_func
| |
# Copyright (c) 2016 Stratoscale, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import http_client
from six.moves.urllib.parse import urlencode
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import router as router_v3
from cinder import context
from cinder import objects
from cinder import test
from cinder.tests.unit.api.contrib import test_snapshot_manage as test_contrib
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_service
CONF = cfg.CONF
def app():
# no auth, just let environ['cinder.context'] pass through
api = router_v3.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v3'] = api
return mapper
@ddt.ddt
@mock.patch('cinder.volume.api.API.get', test_contrib.volume_get)
class SnapshotManageTest(test.TestCase):
"""Test cases for cinder/api/v3/snapshot_manage.py"""
def setUp(self):
super(SnapshotManageTest, self).setUp()
self._admin_ctxt = context.RequestContext(fake.USER_ID,
fake.PROJECT_ID,
True)
def _get_resp_post(self, body, version=mv.MANAGE_EXISTING_LIST):
"""Helper to execute a POST manageable_snapshots API call."""
req = webob.Request.blank('/v3/%s/manageable_snapshots' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers = mv.get_mv_header(version)
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = self._admin_ctxt
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(app())
return res
@mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot')
@mock.patch('cinder.volume.api.API.create_snapshot_in_db')
@mock.patch('cinder.objects.service.Service.get_by_id')
def test_manage_snapshot_route(self, mock_service_get,
mock_create_snapshot, mock_rpcapi):
"""Test call to manage snapshot.
There is currently no change between the API in contrib and the API in
v3, so here we simply check that the call is routed properly, rather
than copying all the tests.
"""
mock_service_get.return_value = fake_service.fake_service_obj(
self._admin_ctxt,
binary='cinder-volume')
body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}}
res = self._get_resp_post(body)
self.assertEqual(http_client.ACCEPTED, res.status_int, res)
def test_manage_snapshot_previous_version(self):
body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}}
res = self._get_resp_post(
body, version=mv.get_prior_version(mv.MANAGE_EXISTING_LIST))
self.assertEqual(http_client.NOT_FOUND, res.status_int, res)
def _get_resp_get(self, host, detailed, paging,
version=mv.MANAGE_EXISTING_LIST, **kwargs):
"""Helper to execute a GET os-snapshot-manage API call."""
params = {'host': host} if host else {}
params.update(kwargs)
if paging:
params.update({'marker': '1234', 'limit': 10,
'offset': 4, 'sort': 'reference:asc'})
query_string = "?%s" % urlencode(params)
detail = ""
if detailed:
detail = "/detail"
req = webob.Request.blank('/v3/%s/manageable_snapshots%s%s' %
(fake.PROJECT_ID, detail, query_string))
req.method = 'GET'
req.headers = mv.get_mv_header(version)
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = self._admin_ctxt
res = req.get_response(app())
return res
@mock.patch('cinder.volume.api.API.get_manageable_snapshots',
wraps=test_contrib.api_get_manageable_snapshots)
def test_get_manageable_snapshots_route(self, mock_api_manageable):
"""Test call to get manageable volumes.
There is currently no change between the API in contrib and the API in
v3, so here we simply check that the call is routed properly, rather
than copying all the tests.
"""
res = self._get_resp_get('fakehost', False, False)
self.assertEqual(http_client.OK, res.status_int)
def test_get_manageable_snapshots_previous_version(self):
res = self._get_resp_get(
'fakehost', False, False,
version=mv.get_prior_version(mv.MANAGE_EXISTING_LIST))
self.assertEqual(http_client.NOT_FOUND, res.status_int)
@mock.patch('cinder.volume.api.API.get_manageable_snapshots',
wraps=test_contrib.api_get_manageable_snapshots)
def test_get_manageable_snapshots_detail_route(self, mock_api_manageable):
"""Test call to get manageable volumes (detailed).
There is currently no change between the API in contrib and the API in
v3, so here we simply check that the call is routed properly, rather
than copying all the tests.
"""
res = self._get_resp_get('fakehost', True, True)
self.assertEqual(http_client.OK, res.status_int)
def test_get_manageable_snapshots_detail_previous_version(self):
res = self._get_resp_get(
'fakehost', True, True,
version=mv.get_prior_version(mv.MANAGE_EXISTING_LIST))
self.assertEqual(http_client.NOT_FOUND, res.status_int)
@ddt.data((True, True, 'detail_list'), (True, False, 'summary_list'),
(False, True, 'detail_list'), (False, False, 'summary_list'))
@ddt.unpack
@mock.patch('cinder.objects.Service.is_up', True)
@mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt')
@mock.patch('cinder.objects.Service.get_by_id')
def test_get_manageable_detail(self, clustered, is_detail, view_method,
get_service_mock, get_cctxt_mock):
if clustered:
host = None
cluster_name = 'mycluster'
version = mv.MANAGE_EXISTING_CLUSTER
kwargs = {'cluster': cluster_name}
else:
host = 'fakehost'
cluster_name = None
version = mv.MANAGE_EXISTING_LIST
kwargs = {}
service = objects.Service(disabled=False, host='fakehost',
cluster_name=cluster_name)
get_service_mock.return_value = service
snaps = [mock.sentinel.snap1, mock.sentinel.snap2]
get_cctxt_mock.return_value.call.return_value = snaps
view_data = {'manageable-snapshots': [{'vol': 'mock.sentinel.snap1'},
{'vol': 'mock.sentinel.snap2'}]}
view_path = ('cinder.api.views.manageable_snapshots.ViewBuilder.' +
view_method)
with mock.patch(view_path, return_value=view_data) as detail_view_mock:
res = self._get_resp_get(host, is_detail, False, version=version,
**kwargs)
self.assertEqual(http_client.OK, res.status_int)
get_cctxt_mock.assert_called_once_with(service.service_topic_queue,
version=('3.10', '3.0'))
get_cctxt_mock.return_value.call.assert_called_once_with(
mock.ANY, 'get_manageable_snapshots', marker=None,
limit=CONF.osapi_max_limit, offset=0, sort_keys=['reference'],
sort_dirs=['desc'], want_objects=True)
detail_view_mock.assert_called_once_with(mock.ANY, snaps, len(snaps))
get_service_mock.assert_called_once_with(
mock.ANY, None, host=host, binary='cinder-volume',
cluster_name=cluster_name)
@ddt.data(mv.MANAGE_EXISTING_LIST, mv.MANAGE_EXISTING_CLUSTER)
def test_get_manageable_missing_host(self, version):
res = self._get_resp_get(None, True, False, version=version)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
def test_get_manageable_both_host_cluster(self):
res = self._get_resp_get('host', True, False,
version=mv.MANAGE_EXISTING_CLUSTER,
cluster='cluster')
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
| |
# -*- coding: utf-8 -*-
"""
sphinx.pycode
~~~~~~~~~~~~~
Utilities parsing and analyzing Python code.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import re
import sys
from os import path
from six import iteritems, text_type, BytesIO, StringIO
from sphinx import package_dir
from sphinx.errors import PycodeError
from sphinx.pycode import nodes
from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
from sphinx.util import get_module_source, detect_encoding
from sphinx.util.pycompat import TextIOWrapper
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
# load the Python grammar
_grammarfile = path.join(package_dir, 'pycode',
'Grammar-py%d.txt' % sys.version_info[0])
pygrammar = driver.load_grammar(_grammarfile)
pydriver = driver.Driver(pygrammar, convert=nodes.convert)
# an object with attributes corresponding to token and symbol names
class sym:
pass
for k, v in iteritems(pygrammar.symbol2number):
setattr(sym, k, v)
for k, v in iteritems(token.tok_name):
setattr(sym, v, k)
# a dict mapping terminal and nonterminal numbers to their names
number2name = pygrammar.number2symbol.copy()
number2name.update(token.tok_name)
_eq = nodes.Leaf(token.EQUAL, '=')
emptyline_re = re.compile('^\s*(#.*)?$')
class AttrDocVisitor(nodes.NodeVisitor):
"""
Visitor that collects docstrings for attribute assignments on toplevel and
in classes (class attributes and attributes set in __init__).
The docstrings can either be in special '#:' comments before the assignment
or in a docstring after it.
"""
def init(self, scope, encoding):
self.scope = scope
self.in_init = 0
self.encoding = encoding
self.namespace = []
self.collected = {}
self.tagnumber = 0
self.tagorder = {}
def add_tag(self, name):
name = '.'.join(self.namespace + [name])
self.tagorder[name] = self.tagnumber
self.tagnumber += 1
def visit_classdef(self, node):
"""Visit a class."""
self.add_tag(node[1].value)
self.namespace.append(node[1].value)
self.generic_visit(node)
self.namespace.pop()
def visit_funcdef(self, node):
"""Visit a function (or method)."""
# usually, don't descend into functions -- nothing interesting there
self.add_tag(node[1].value)
if node[1].value == '__init__':
# however, collect attributes set in __init__ methods
self.in_init += 1
self.generic_visit(node)
self.in_init -= 1
def visit_expr_stmt(self, node):
"""Visit an assignment which may have a special comment before (or
after) it.
"""
if _eq not in node.children:
# not an assignment (we don't care for augmented assignments)
return
# look *after* the node; there may be a comment prefixing the NEWLINE
# of the simple_stmt
parent = node.parent
idx = parent.children.index(node) + 1
while idx < len(parent):
if parent[idx].type == sym.SEMI:
idx += 1
continue # skip over semicolon
if parent[idx].type == sym.NEWLINE:
prefix = parent[idx].get_prefix()
if not isinstance(prefix, text_type):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
if docstring:
self.add_docstring(node, docstring)
return # don't allow docstrings both before and after
break
# now look *before* the node
pnode = node[0]
prefix = pnode.get_prefix()
# if the assignment is the first statement on a new indentation
# level, its preceding whitespace and comments are not assigned
# to that token, but the first INDENT or DEDENT token
while not prefix:
pnode = pnode.get_prev_leaf()
if not pnode or pnode.type not in (token.INDENT, token.DEDENT):
break
prefix = pnode.get_prefix()
if not isinstance(prefix, text_type):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
self.add_docstring(node, docstring)
def visit_simple_stmt(self, node):
"""Visit a docstring statement which may have an assignment before."""
if node[0].type != token.STRING:
# not a docstring; but still need to visit children
return self.generic_visit(node)
prev = node.get_prev_sibling()
if not prev:
return
if prev.type == sym.simple_stmt and \
prev[0].type == sym.expr_stmt and _eq in prev[0].children:
# need to "eval" the string because it's returned in its
# original form
docstring = literals.evalString(node[0].value, self.encoding)
docstring = prepare_docstring(docstring)
self.add_docstring(prev[0], docstring)
def add_docstring(self, node, docstring):
# add an item for each assignment target
for i in range(0, len(node) - 1, 2):
target = node[i]
if self.in_init and self.number2name[target.type] == 'power':
# maybe an attribute assignment -- check necessary conditions
if ( # node must have two children
len(target) != 2 or
# first child must be "self"
target[0].type != token.NAME or target[0].value != 'self' or
# second child must be a "trailer" with two children
self.number2name[target[1].type] != 'trailer' or
len(target[1]) != 2 or
# first child must be a dot, second child a name
target[1][0].type != token.DOT or
target[1][1].type != token.NAME):
continue
name = target[1][1].value
elif target.type != token.NAME:
# don't care about other complex targets
continue
else:
name = target.value
self.add_tag(name)
if docstring:
namespace = '.'.join(self.namespace)
if namespace.startswith(self.scope):
self.collected[namespace, name] = docstring
class ModuleAnalyzer(object):
# cache for analyzer objects -- caches both by module and file name
cache = {}
@classmethod
def for_string(cls, string, modname, srcname='<string>'):
if isinstance(string, bytes):
return cls(BytesIO(string), modname, srcname)
return cls(StringIO(string), modname, srcname, decoded=True)
@classmethod
def for_file(cls, filename, modname):
if ('file', filename) in cls.cache:
return cls.cache['file', filename]
try:
fileobj = open(filename, 'rb')
except Exception as err:
raise PycodeError('error opening %r' % filename, err)
obj = cls(fileobj, modname, filename)
cls.cache['file', filename] = obj
return obj
@classmethod
def for_module(cls, modname):
if ('module', modname) in cls.cache:
entry = cls.cache['module', modname]
if isinstance(entry, PycodeError):
raise entry
return entry
try:
type, source = get_module_source(modname)
if type == 'string':
obj = cls.for_string(source, modname)
else:
obj = cls.for_file(source, modname)
except PycodeError as err:
cls.cache['module', modname] = err
raise
cls.cache['module', modname] = obj
return obj
def __init__(self, source, modname, srcname, decoded=False):
# name of the module
self.modname = modname
# name of the source file
self.srcname = srcname
# file-like object yielding source lines
self.source = source
# cache the source code as well
pos = self.source.tell()
if not decoded:
self.encoding = detect_encoding(self.source.readline)
self.source.seek(pos)
self.code = self.source.read().decode(self.encoding)
self.source.seek(pos)
self.source = TextIOWrapper(self.source, self.encoding)
else:
self.encoding = None
self.code = self.source.read()
self.source.seek(pos)
# will be filled by tokenize()
self.tokens = None
# will be filled by parse()
self.parsetree = None
# will be filled by find_attr_docs()
self.attr_docs = None
self.tagorder = None
# will be filled by find_tags()
self.tags = None
def tokenize(self):
"""Generate tokens from the source."""
if self.tokens is not None:
return
try:
self.tokens = list(tokenize.generate_tokens(self.source.readline))
except tokenize.TokenError as err:
raise PycodeError('tokenizing failed', err)
self.source.close()
def parse(self):
"""Parse the generated source tokens."""
if self.parsetree is not None:
return
self.tokenize()
try:
self.parsetree = pydriver.parse_tokens(self.tokens)
except parse.ParseError as err:
raise PycodeError('parsing failed', err)
def find_attr_docs(self, scope=''):
"""Find class and module-level attributes and their documentation."""
if self.attr_docs is not None:
return self.attr_docs
self.parse()
attr_visitor = AttrDocVisitor(number2name, scope, self.encoding)
attr_visitor.visit(self.parsetree)
self.attr_docs = attr_visitor.collected
self.tagorder = attr_visitor.tagorder
# now that we found everything we could in the tree, throw it away
# (it takes quite a bit of memory for large modules)
self.parsetree = None
return attr_visitor.collected
def find_tags(self):
"""Find class, function and method definitions and their location."""
if self.tags is not None:
return self.tags
self.tokenize()
result = {}
namespace = []
stack = []
indent = 0
defline = False
expect_indent = False
emptylines = 0
def tokeniter(ignore = (token.COMMENT,)):
for tokentup in self.tokens:
if tokentup[0] not in ignore:
yield tokentup
tokeniter = tokeniter()
for type, tok, spos, epos, line in tokeniter:
if expect_indent:
if type != token.INDENT:
# no suite -- one-line definition
assert stack
dtype, fullname, startline, _ = stack.pop()
endline = epos[0]
namespace.pop()
result[fullname] = (dtype, startline, endline - emptylines)
expect_indent = False
if tok in ('def', 'class'):
name = next(tokeniter)[1]
namespace.append(name)
fullname = '.'.join(namespace)
stack.append((tok, fullname, spos[0], indent))
defline = True
elif type == token.INDENT:
expect_indent = False
indent += 1
elif type == token.DEDENT:
indent -= 1
# if the stacklevel is the same as it was before the last
# def/class block, this dedent closes that block
if stack and indent == stack[-1][3]:
dtype, fullname, startline, _ = stack.pop()
endline = spos[0]
namespace.pop()
result[fullname] = (dtype, startline, endline - emptylines)
elif type == token.NEWLINE:
# if this line contained a definition, expect an INDENT
# to start the suite; if there is no such INDENT
# it's a one-line definition
if defline:
defline = False
expect_indent = True
emptylines = 0
elif type == token.NL:
# count up if line is empty or comment only
if emptyline_re.match(line):
emptylines += 1
else:
emptylines = 0
self.tags = result
return result
if __name__ == '__main__':
import time
import pprint
x0 = time.time()
# ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html')
ma = ModuleAnalyzer.for_file('sphinx/environment.py',
'sphinx.environment')
ma.tokenize()
x1 = time.time()
ma.parse()
x2 = time.time()
# for (ns, name), doc in iteritems(ma.find_attr_docs()):
# print '>>', ns, name
# print '\n'.join(doc)
pprint.pprint(ma.find_tags())
x3 = time.time()
# print nodes.nice_repr(ma.parsetree, number2name)
print("tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2))
| |
# Class/function to process verilog file
import re, string, os
import pprint
import functools
# regular expression for signal/variable declaration:
# start of line follow by 1 to 4 word,
# an optionnal array size,
# an optional list of words
# the signal itself (not part of the regular expression)
re_bw = r'[\w\*\(\)\/><\:\-\+`\$\s]+'
re_var = r'^\s*(\w+\s+)?(\w+\s+)?([A-Za-z_][\w\:\.]*\s+)(\['+re_bw+r'\])?\s*([A-Za-z_][\w=,\s]*,\s*)?\b'
re_decl = r'(?:^|,|(?:\w|\)|#)\s*\(|;)\s*(?:const\s+)?(\w+\s+)?(\w+\s+)?(\w+\s+)?([A-Za-z_][\w\:\.]*\b\s*)((?:\['+re_bw+r'\]\s*)*)((?:[A-Za-z_]\w*(?:\s*\[[^=\^\&\|,;]*?\]\s*)?(?:\=\s*[\w\.\:]+\s*)?,\s*)*)\b'
re_enum = r'^\s*(typedef\s+)?(enum)\s+(\w+\s*)?(\['+re_bw+r'\])?\s*(\{[^\}]+\})\s*([A-Za-z_][\w=,\s]*,\s*)?\b'
re_union = r'^\s*(typedef\s+)?(struct|union|`\w+)\s+(packed\s+)?(signed|unsigned)?\s*(\{[\w,;\s`\[\:\]\/\*\+\-><\(\)\$]+\})\s*([A-Za-z_][\w=,\s]*,\s*)?\b'
re_tdp = r'^\s*(typedef\s+)(\w+)\s*(#\s*\(.*?\))?\s*()\b'
re_inst = r'^\s*(virtual)?(\s*)()(\w+)\s*(#\s*\([^;]+\))?\s*()\b'
re_param = r'^\s*parameter\b((?:\s*(?:\w+\s+)?(?:[A-Za-z_]\w+)\s*=\s*(?:[^,;]*)\s*,)*)(\s*(\w+\s+)?([A-Za-z_]\w+)\s*=\s*([^,;]*)\s*;)'
# Port direction list constant
port_dir = ['input', 'output','inout', 'ref']
def clean_comment(text):
def replacer(match):
s = match.group(0)
# Handle special case of ( * ) to not mistake with (* *)
if match.group(1)=='*' :
return s
if s.startswith('/') or s.startswith('('):
return " " # note: a space and not an empty string
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\(\s*(\*)\s*\)|\(\*.*?\*\)|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
# do we need trim whitespaces?
# txt_clean = re.sub(pattern, replacer, text)
# return re.sub(r'^\s*$', '', txt_clean, flags = re.MULTILINE)
return re.sub(pattern, replacer, text)
# Extract declaration of var_name from a file
def get_type_info_file(fname,var_name):
# print("Parsing file " + fname + " for variable " + var_name)
fdate = os.path.getmtime(fname)
ti = get_type_info_file_cache(fname, var_name, fdate)
# print(get_type_info_file_cache.cache_info())
return ti
@functools.lru_cache(maxsize=32)
def get_type_info_file_cache(fname, var_name, fdate):
with open(fname) as f:
flines = f.read()
ti = get_type_info(flines, var_name)
return ti
# Extract the declaration of var_name from txt
def get_type_info(txt,var_name,search_decl=True):
ti_not_found = {'decl':None,'type':None,'array':"",'bw':"", 'name':var_name, 'tag': '', 'value':None}
txt = clean_comment(txt)
m = re.search(r'(?s)'+re_enum+r'('+var_name+r')\b.*$', txt, flags=re.MULTILINE)
# print('[get_type_info] var = {}'.format(var_name))
# print('[get_type_info] text = {}'.format(txt))
# print('[get_type_info] RE = {}'.format(re_enum+r'('+var_name+r')\b.*$'))
if m:
# print('[get_type_info] {} type is Enum'.format(var_name))
return get_type_info_from_match(var_name,m,1,3,5,-1,'enum')[0]
# Struct
m = re.search(re_union+r'('+var_name+r')\b.*$', txt, flags=re.MULTILINE)
if m:
# print('[get_type_info] {} type is struct'.format(var_name))
return get_type_info_from_match(var_name,m,1,3,5,-1,'struct')[0]
# Typedef
m = re.search(re_tdp+r'('+var_name+r')\b\s*;.*$', txt, flags=re.MULTILINE)
if m:
# print('[get_type_info] {} type is typedef'.format(var_name))
return get_type_info_from_match(var_name,m,1,3,3,-1,'typedef')[0]
#
if not search_decl:
return ti_not_found
# Clocking block
m = re.search(r'(?s)\b(clocking)\s+('+var_name+r')(.*?)endclocking\b',txt)
if m :
# print('[get_type_info] {} type is Clocking'.format(var_name))
return get_clocking_info(var_name,m.group(3))
# Signal declaration
re_str = re_decl+r'('+var_name+r'\b\s*((?:\[[^=\^\&\|,;]*?\]\s*)*))(\s*=\s*(\'\{.+?\}|\{.+?\}|[^,;]+))?[^\.]*?($|,|;)'
# print('[get_type_info] RE Decl = {}'.format(re_str))
m = re.search(re_str, txt, flags=re.MULTILINE)
if m:
# print('[get_type_info] {} type is a declaration'.format(var_name))
return get_type_info_from_match(var_name,m,3,4,5,9,'decl')[0]
# Instances
m = re.search(re_inst+r'('+var_name+r')\b.*$', txt, flags=re.MULTILINE)
if m:
# print('[get_type_info] {} type is an instance'.format(var_name))
return get_type_info_from_match(var_name,m,3,4,5,9,'inst')[0]
return ti_not_found
# Extract the macro content from `define name macro_content
def get_macro(txt, name):
txt = clean_comment(txt)
m = re.search(r'(?s)^\s*`define\s+'+name+r'\b[ \t]*(?:\((.*?)\)[ \t]*)?(.*?)(?<!\\)\n',txt,re.MULTILINE)
if not m:
return ''
# remove line return
macro = m.groups()[1].replace('\\\n','')
param_list = m.groups()[0]
if param_list:
param_list = param_list.replace('\\\n','')
# remove escape character for string
macro = macro.replace('`"','"')
# TODO: Expand macro if there is some arguments
return macro,param_list
# Extract all signal declaration
def get_all_type_info(txt,no_inst=False):
# print('[get_all_type_info] \n'+txt)
# Cleanup function contents since this can contains some signal declaration
# print('[get_all_type_info] Cleanup functions/task')
txt = re.sub(r'(?s)^[ \t]*(import|export)[ \t]*(\".*?\"[ \t]*)?(pure)?[ \t]*(?P<block>function|task)\b.*?;','',txt, flags=re.MULTILINE)
txt = re.sub(r'(?s)^[ \t\w]*extern\b[^;]+;','',txt, flags=re.MULTILINE)
txt = re.sub(r'(?s)^[ \t\w]*(?P<block>function|task)\b.*?\bend(?P=block)\b.*?$','',txt, flags=re.MULTILINE)
# Cleanup constraint definition
# print('[get_all_type_info] Cleanup constraint')
# txt = re.sub(r'(?s)constraint\s+\w+\s*\{\s*(?:[^\{\}]+(?:\{[^\{\}]*\})?)*?\s*\}','',txt, flags=re.MULTILINE)
constraint = [(m.group('name'),m.start(),m.end()) for m in re.finditer(r'(?s)constraint\s+(?P<name>\w+)\s*\{',txt)]
for name,start,end in reversed(constraint):
cnt = 1
pos = end+1
while cnt > 0 and cnt < 64:
m = re.search(r'{|}',txt[pos:])
if not m:
print('[SV] Error parsing constraint {}, unbalanced curly bracket !'.format(name))
cnt = -1
else:
pos = pos + m.end()+1
cnt = cnt + 1 if m.group(0)=='{' else cnt - 1
if cnt > 64 :
print('[SV] Too many nested bracket in constraint {} !'.format(name))
cnt = -1
# print('Constraint {} going from {} to {} (cnt={})'.format(name,start,pos,cnt))
if pos>start and cnt==0:
txt = txt[0:start]+txt[pos:]
# print('[get_all_type_info] \n'+txt)
# Suppose text has already been cleaned
ti = []
# Look all modports
# print('[get_all_type_info] Look for modports')
r = re.compile(r'(?s)modport\s+(\w+)\s*\((.*?)\);', flags=re.MULTILINE)
modports = r.findall(txt)
if modports:
for modport in modports:
ti.append({'decl':modport[1].replace('\n',''),'type':'','array':'','bw':'', 'name':modport[0], 'tag':'modport'})
# remove modports before looking for I/O and field to avoid duplication of signals
txt = r.sub('',txt)
# Look for clocking block
# print('[get_all_type_info] Look for clocking block')
r = re.compile(r'(?s)(default\s+)?(clocking)\s+(\w+)(.*?)endclocking(\s*:\s*\w+)?', flags=re.MULTILINE)
cbs = r.findall(txt)
if cbs:
for cb in cbs:
ti.append(get_clocking_info(cb[2],cb[3]))
# print('[get_all_type_info] Clocking: {}'.format(ti))
# remove clocking block before looking for I/O and field to avoid duplication of signals
txt = r.sub('',txt)
# Look for enum declaration
# print('[get_all_type_info] Look for enum declaration')
r = re.compile(re_enum+r'(\w+\b(\s*\[[^=\^\&\|,;]*?\]\s*)?)\s*;',flags=re.MULTILINE)
for m in r.finditer(txt):
ti_tmp = get_type_info_from_match('',m,1,3,5,-1,'enum')
# print('[get_all_type_info] enum groups=%s => ti=%s' %(str(m.groups()),str(ti_tmp)))
ti += [x for x in ti_tmp if x['type']]
# remove enum declaration since the content could be interpreted as signal declaration
txt = r.sub('',txt)
# Look for struct declaration
# print('[get_all_type_info] Look for struct declaration')
r = re.compile(re_union+r'(\w+\b(\s*\[[^=\^\&\|,;]*?\]\s*)?)\s*;',flags=re.MULTILINE)
# print('[get_all_type_info] struct re="{0}"'.format(r.pattern))
for m in r.finditer(txt):
ti_tmp = get_type_info_from_match('',m,1,3,5,-1,'struct')
# print('[get_all_type_info] struct groups=%s => ti=%s' %(str(m.groups()),str(ti_tmp)))
ti += [x for x in ti_tmp if x['type']]
# remove struct declaration since the content could be interpreted as signal declaration
txt = r.sub('',txt)
# Look for typedef declaration
# print('[get_all_type_info] Look for typedef declaration')
r = re.compile(re_tdp+r'(\w+\b(\s*\[[^=\^\&\|,;]*?\]\s*)?)\s*;',flags=re.MULTILINE)
for m in r.finditer(txt):
ti_tmp = get_type_info_from_match('',m,1,3,3,-1,'typedef')
# print('[get_all_type_info] typedef groups=%s => ti=%s' %(str(m.groups()),str(ti_tmp)))
ti += [x for x in ti_tmp if x['type']]
# remove typedef declaration since the content could be interpreted as signal declaration
txt = r.sub('',txt)
# Look for signal declaration
# print('[get_all_type_info] Look for signal declaration')
# TODO: handle init value
re_str = re_decl+r'(\w+\b(\s*\[[^=\^\&\|,;\[\]]*?\]\s*)*)\s*(?:\=\s*(\'\{.+\}|[^;,]+)\s*)?(?=;|,|\)\s*;)'
# print('[get_all_type_info] decl re="{0}"'.format(re_str))
r = re.compile(re_str,flags=re.MULTILINE)
for m in r.finditer(txt):
ti_tmp = get_type_info_from_match('',m,3,4,5,8,'decl')
# print('[get_all_type_info] decl groups=%s => ti=%s' %(str(m.groups()),str(ti_tmp)))
ti += [x for x in ti_tmp if x['type']]
# Look for interface instantiation
if not no_inst:
# print('[get_all_type_info] Look for interface instantiation')
re_str = re_inst+r'(\w+\b(\s*\[[^=\^\&\|,;]*?\]\s*)?)\s*\('
r = re.compile(re_str,flags=re.MULTILINE)
# print('[get_all_type_info] inst re="{0}"'.format(re_str))
for m in r.finditer(txt):
ti_tmp = get_type_info_from_match('',m,3,4,5,-1,'inst')
# print('[get_all_type_info] inst groups=%s => ti=%s' %(str(m.groups()),str(ti_tmp)))
ti += [x for x in ti_tmp if x['type']]
# print('[get_all_type_info] {0}'.format(ti))
# Look for non-ansi declaration where a signal is declared twice (I/O then reg/wire) and merge it into one declaration
ti_dict = {}
pop_list = []
for (i,x) in enumerate(ti[:]) :
if x['name'] in ti_dict:
ti_index = ti_dict[x['name']][1]
# print('[get_all_type_info] Duplicate found for %s => %s and %s' %(x['name'],ti_dict[x['name']],x))
if ti[ti_index]['type'].split()[0] in port_dir:
ti[ti_index]['decl'] = ti[ti_index]['decl'].replace(ti[ti_index]['type'],ti[ti_index]['type'].split()[0] + ' ' + x['type'])
ti[ti_index]['type'] = x['type']
pop_list.append(i)
else :
ti_dict[x['name']] = (x,i)
for i in sorted(pop_list,reverse=True):
ti.pop(i)
# pprint.pprint(ti, width=200)
return ti
# Get type info from a match object
def get_type_info_from_match(var_name,m,idx_type,idx_bw,idx_max,idx_val,tag):
ti_not_found = {'decl':None,'type':None,'array':"",'bw':"", 'name':var_name, 'tag':tag, 'value':None}
#return a tuple of None if not found
if not m:
return [ti_not_found]
if not m.groups()[idx_type]:
return [ti_not_found]
line = m.group(0).strip()
# print("[SV:get_type_info_from_match] varname={0} str='{7}' m={1} idx_type={2} idx_bw={3} idx_max={4},idx_val={5} tag={6}".format(var_name,m.groups(),idx_type,idx_bw,idx_max,idx_val,tag,line))
# Extract the type itself: should be the mandatory word, except if is a sign qualifier
t = str.rstrip(m.groups()[idx_type])
# Remove potential false positive
if t in ['begin', 'end', 'endcase', 'endspecify', 'else', 'posedge', 'negedge', 'timeunit', 'timeprecision','assign', 'disable', 'property', 'initial', 'assert', 'cover', 'always_comb'] or t.endswith('.'):
return [ti_not_found]
t = t.split('.')[0] # Handle interface with portmod (maybe add more checks)
if t=="unsigned" or t=="signed": # TODO check if other cases might happen
if m.groups()[2] is not None:
t = str.rstrip(m.groups()[2]) + ' ' + t
elif m.groups()[1] is not None:
t = str.rstrip(m.groups()[1]) + ' ' + t
elif m.groups()[0] is not None and not m.groups()[0].startswith('end'):
t = str.rstrip(m.groups()[0]) + ' ' + t
elif t=="const": # identifying a variable as simply const is typical of a struct/union : look for it
m = re.search( re_union+var_name+r'.*$', line, flags=re.MULTILINE)
if m is None:
return [ti_not_found]
t = m.groups()[1]
idx_bw = 3
# print("[SV:get_type_info_from_match] type={} Group => {}".format(t,str(m.groups())))
value = None
ft = ''
bw = ''
if var_name!='':
signal_list = re.findall(r'('+var_name + r')\b\s*((?:\[(.*?)\]\s*)*)', m.groups()[idx_max+1], flags=re.MULTILINE)
if idx_val > 0 and len(m.groups())>idx_val and m.groups()[idx_val]:
value = str.rstrip(m.groups()[idx_val])
else:
signal_list = []
re_str = r'(\w+)\b\s*((?:\[(.*)\]\s*)*)(?:\=\s*(\'\{.+?\}|[^;,]+)\s*)?,?'
if m.groups()[idx_max]:
signal_list = re.findall(re_str, m.groups()[idx_max], flags=re.MULTILINE)
# print("[SV:get_type_info_from_match] idxmax => signal_list = " + str(signal_list))
if m.groups()[idx_max+1]:
s = m.groups()[idx_max+1]
# print("[SV:get_type_info_from_match] idxmax+1 => s = " + str(s))
if idx_val > 0 and len(m.groups())>idx_val and m.groups()[idx_val]:
s += ' = ' + m.groups()[idx_val]
signal_list += re.findall(re_str, s, flags=re.MULTILINE)
# print("[SV:get_type_info_from_match] idxmax+1 => signal_list = " + str(signal_list))
# remove reserved keyword that could end up in the list
signal_list = [s for s in signal_list if s[0] not in ['if','case', 'casex', 'casez', 'for', 'foreach', 'generate', 'input', 'output', 'inout', 'return']]
if not signal_list:
return [ti_not_found]
# print("[SV:get_type_info_from_match] signal_list = " + str(signal_list) + ' for line ' + line)
#Concat the first 5 word if not None (basically all signal declaration until signal list)
for i in range(0,idx_max):
# print('[get_type_info_from_match] tag='+tag+ ' name='+str(signal_list)+ ' match (' + str(i) + ') = ' + str(m.groups()[i]).strip())
if m.groups()[i] is not None:
tmp = m.groups()[i].strip()
if tmp:
# Cleanup space in enum/struct declaration
if i==4 and t in ['enum','struct']:
tmp = re.sub(r'\s+',' ',tmp,flags=re.MULTILINE)
#Cleanup spaces in bitwidth
if i==idx_bw:
tmp = re.sub(r'\s+','',tmp,flags=re.MULTILINE)
bw = tmp
# regex can catch more than wanted, so filter based on a list
if not tmp.startswith('end'):
ft += tmp + ' '
if not ft.strip():
return [ti_not_found]
ti = []
if t=='class' and len(signal_list)==1 :
# For class try to create a valid complete class declaration by adding at least the endclass
# If the declaration was on more than one line assume it was because of parameters, and close parenthesis
l = line.strip()
if not l.endswith(';'):
if l.endswith(','):
l = l[:-1]
l+= ');'
l+='\nendclass'
ti.append(parse_class(l))
if ti[0]:
ti[0]['tag'] = 'decl'
else:
ti[0] = ti_not_found
else :
for signal in signal_list :
# print("signal: " + str(signal) )
fts = ft + signal[0]
# Check if the variable is an array and the type of array (fixed, dynamic, queue, associative)
at = ""
if signal[1]!='':
fts += signal[1].strip()
if signal[1].count('[')>1:
at='multidimension'
elif signal[2] =="":
at='dynamic'
elif signal[2]=='$':
at='queue'
elif signal[2]=='*':
at='associative'
else:
ma= re.match(r'[A-Za-z_][\w]*$',signal[2])
if ma:
at='associative'
else:
at='fixed'
if not value and len(signal)>=4:
value = signal[3]
d = {'decl':fts,'type':t,'array':at,'bw':bw, 'name':signal[0], 'tag':tag, 'value': value}
if at:
d['array_dim'] = signal[1].strip()
ft0 = ft.split()[0]
if ft0 in ['local','protected']:
d['access'] = ft0
# TODO: handle init value inside list
# print("Array: " + str(m) + "=>" + str(at))
ti.append(d)
return ti
def get_clocking_info(name, content):
ports = []
for m_port in re.finditer(r'input\s+([^;]+);',content):
ports+=[{'name':x.strip(),'type':'input'} for x in m_port.group(1).split(',')]
for m_oport in re.finditer(r'output\s+([^;]+);',content):
ports+=[{'name':x.strip(),'type':'output'} for x in m_port.group(1).split(',')]
ti = {'decl':'clocking '+name,'type':'clocking','array':'','bw':'', 'name':name, 'tag':'clocking',
'port':ports}
# print('[get_clocking_info] {}'.format(ti))
return ti
###############################################################################
# Parse a module for port/signal/instance/... information
def parse_module_file(fname,mname=r'\w+',inst_only=False,no_inst=False):
# print("Parsing file " + fname + " for module " + mname)
fdate = os.path.getmtime(fname)
minfo = parse_module_file_cache(fname, mname, fdate,inst_only,no_inst)
# print(parse_module_file_cache.cache_info())
return minfo
@functools.lru_cache(maxsize=32)
def parse_module_file_cache(fname, mname, fdate,inst_only=False,no_inst=False):
with open(fname) as f:
flines = f.read()
minfo = parse_module(flines, mname,inst_only,no_inst)
return minfo
def parse_module(flines,mname=r'\w+',inst_only=False,no_inst=False):
flines = clean_comment(flines)
re_str = r"(?s)(?P<type>module|interface)\s+(?P<name>"+mname+r")(?P<import>\s+import\s+.*?;)?\s*(#\s*\((?P<param>.*?)\))?\s*(\((?P<port>.*?)\))?\s*;(?P<content>.*?)(?P<ending>endmodule|endinterface)"
# print("[SV:parse_module] name={} -> re = {}".format(mname,re_str))
# print("Parsing for module " + mname + ' in \n' + flines)
m = re.search(re_str, flines, re.MULTILINE)
if m is None:
return None
mname = m.group('name')
txt = m.group(0)
if inst_only:
minfo = {'name': mname, 'param':[], 'port':[], 'inst':[], 'type':m.group('type'), 'signal' : []}
re_str = r'^[ \t]*(\w+)\s*(?:#\s*\([^;]+\))?\s*\b(\w+)\b(?:\s*\[[^=\^\&\|,;]*?\]\s*)?\s*\('
li = re.findall(re_str,txt,flags=re.MULTILINE)
for l in li:
if l[0] not in ['module', 'class','interface', 'begin', 'end', 'endcase', 'endspecify', 'else', 'posedge', 'negedge', 'timeunit', 'timeprecision','assign', 'disable', 'property', 'initial', 'assert', 'cover','generate']:
minfo['inst'].append({'type':l[0],'name':l[1]})
return minfo
# Extract list of param if any
params_name = []
params = extract_params(m)
if params:
params_name = [param['name'] for param in params]
# Extract all type information inside the module : signal/port declaration, interface/module instantiation
if m.group('param'):
txt = txt.replace(m.group('param'),'')
ati = []
if m.group('port'):
ati += get_all_type_info(m.group('port')+';')
if m.group('content'):
ati += get_all_type_info(m.group('content'))
# print('[SV.parse_module] ati = ')
# pprint.pprint(ati,width=200)
# Extract port name
ports = []
ports_name = []
if m.group('port'):
s = m.group('port')
ports_name = re.findall(r"(\w+)\s*(?=,|$|=|\[[^=\^\&\|,;]*?\]\s*(?=,|$|=))",s)
# get type for each port
ports = []
ports = [ti for ti in ati if ti['name'] in ports_name]
ports_name += params_name
# Extract instances name
inst = [ti for ti in ati if ti['type']!='module' and ti['type']!='interface' and ti['tag']=='inst']
# Extract signal name
signals = [ti for ti in ati if ti['type'] not in ['module','interface'] and ti['tag'] not in ['inst','modport','clocking'] and ti['name'] not in ports_name ]
minfo = {'name': mname, 'param':params, 'port':ports, 'inst':inst, 'type':m.group('type'), 'signal' : signals}
modports = [ti for ti in ati if ti['tag']=='modport']
if modports:
minfo['modport'] = modports
clocking = [ti for ti in ati if ti['tag']=='clocking']
if clocking:
minfo['clocking'] = clocking
# print('[SV.parse_module] minfo = ')
# pprint.pprint(minfo,width=200)
return minfo
# Extract params using a matching group already containg group for params and content
def extract_params(m):
params = []
param_type = ''
pos = 0
## Parameter define in ANSI style
r = re.compile(r"(parameter\s+)?(?P<decl>\b\w+\b\s*(\[[\w\:\-\+`\s]+\]\s*)?)?(?P<name>\w+)\s*=\s*(?P<value>[^,;\n]+)")
if m.group('param'):
s = clean_comment(m.group('param'))
for mp in r.finditer(s):
params.append(mp.groupdict())
if not params[-1]['decl']:
params[-1]['decl'] = param_type;
else :
params[-1]['decl'] = params[-1]['decl'].strip();
param_type = params[-1]['decl']
params[-1]['value'] = params[-1]['value'].strip()
params[-1]['position'] = pos
pos = pos + 1
## look for parameter defined inline
if m.group('content'):
s = clean_comment(m.group('content'))
r_param_list = re.compile(re_param,flags=re.MULTILINE)
for mpl in r_param_list.finditer(s):
param_type = ''
for mp in r.finditer(mpl.group(0)):
params.append(mp.groupdict())
if not params[-1]['decl']:
params[-1]['decl'] = param_type;
else :
params[-1]['decl'] = params[-1]['decl'].strip();
param_type = params[-1]['decl']
params[-1]['value'] = params[-1]['value'].strip()
params[-1]['position'] = pos
pos = pos + 1
return params
# Parse a package for port information
def parse_package_file(fname,pname=r'\w+'):
# print("Parsing file " + fname + " for package " + pname)
fdate = os.path.getmtime(fname)
minfo = parse_package_file_cache(fname, pname, fdate)
# print(parse_package_file_cache.cache_info())
return minfo
@functools.lru_cache(maxsize=32)
def parse_package_file_cache(fname, pname, fdate):
with open(fname) as f:
flines = f.read()
minfo = parse_package(flines, pname)
return minfo
def parse_package(flines,pname=r'\w+'):
# print("Parsing for package " + pname + ' in \n' + flines)
m = re.search(r"(?s)(?P<type>package)\s+(?P<name>"+pname+")\s*;\s*(?P<content>.+?)(?P<ending>endpackage)", flines, re.MULTILINE)
if m is None:
return None
txt = clean_comment(m.group('content'))
ti = get_all_function(txt)
ti += get_all_type_info(txt,no_inst=True)
# print(ti)
return ti
def parse_function(flines,funcname):
fi = get_all_function(flines,funcname)
# print('Parse function {} in :\n{}'.format(flines,funcname))
if not fi:
return None
else :
# print(fi)
return fi[0]
# Parse a class for function and members
def parse_class_file(fname,cname=r'\w+'):
# print("Parsing file " + fname + " for module " + cname)
fdate = os.path.getmtime(fname)
info = parse_class_file_cache(fname, cname, fdate)
# print(parse_class_file_cache.cache_info())
return info
@functools.lru_cache(maxsize=32)
def parse_class_file_cache(fname, cname, fdate):
with open(fname) as f:
contents = f.read()
flines = clean_comment(contents)
info = parse_class(flines, cname)
return info
def parse_class(flines,cname=r'\w+'):
#print("Parsing for class " + cname + ' in \n' + flines)
re_str = r"(?s)(?P<type>class)\s+(?P<name>"+cname+r")\s*(#\s*\((?P<param>.*?)\))?\s*(extends\s+(?P<extend>\w+(?:\s*#\(.*?\))?))?\s*;(?P<content>.*?)(?P<ending>endclass)"
# print('[parse_class] regexp = {}'.format(re_str))
re_class = re.compile(re_str, flags=re.MULTILINE)
m = re_class.search(flines)
if m is None:
return None
txt = clean_comment(m.group('content'))
# print('[parse_class] Matched class in :\n'+txt)
ci = {'type':'class', 'name': m.group('name'), 'extend': None if 'extend' not in m.groupdict() else m.group('extend'), 'function' : []}
ci['decl'] = 'class {name} {param}{extend}'.format(\
name=ci['name'],\
param='' if not m.group('param') else '#({0}) '.format(m.group('param')),\
extend='' if not ci['extend'] else 'extends {0}'.format(ci['extend']) )
# print('[parse_class] Init ci:\n'+str(ci))
ci['param'] = extract_params(m)
# print('[parse_class] ci after params extract\n'+str(ci))
# Extract all functions
ci['function'] = get_all_function(txt)
# print('[parse_class] ci after function extract\n'+str(ci))
# Extract members
ci['member'] = get_all_type_info(txt,no_inst=True)
# print('[parse_class] Final ci:\n'+str(ci))
return ci
def get_all_function(txt,funcname=r'\w+'):
fil = [] # Function Info list
names = []
re_str = r'(?s)(extern)\s+(?:\b(protected|local)\s+)?(\b(?:virtual|static)\s+)?\b(function|task)\s+((?:\w+\s+)?(?:\w+\s+|\[[\d:]+\]\s+)?)\b('+funcname+r')\b\s*(\((.*?)\s*\))?\s*;()'
fl = re.findall(re_str,txt,flags=re.MULTILINE)
txt = re.sub(re_str,'',txt,flags=re.MULTILINE)
re_str = r'(?s)^[ \t]*(import)\s+".*?"\s*()()(function)\s+((?:\w+\s+)?(?:\w+\s+|\[[\d:]+\]\s+)?)\b('+funcname+r')\b\s*(\((.*?)\s*\))?\s*;()'
fl += re.findall(re_str,txt,flags=re.MULTILINE)
txt = re.sub(re_str,'',txt,flags=re.MULTILINE)
# txt = re.sub(r'\n([ \t]*\n)+','\n',txt,flags=re.MULTILINE)
# print('Content after filter : \n' + txt)
re_str = r'(?s)()(?:\b(protected|local)\s+)?(\bvirtual\s+)?\b(function|task)\s+((?:\w+\s+)?(?:\w+\s+|\[[\d:]+\]\s+)?)\b((?:\w+::)?'+funcname+r')\b\s*(\((.*?)\s*\))?\s*;(.*?)\bend\4\b'
fl += re.findall(re_str,txt,flags=re.MULTILINE)
for ( f_def, f_access, f_virtual, f_type, f_return,f_name,f_args_,f_args, f_content) in fl:
# print('Parsing function {} {}'.format(f_name,f_args_))
if f_name in names:
continue
else :
names.append(f_name)
# Arguments in declaration -> parse them
if f_args:
# print('Parsing type from arguments {}'.format(f_args))
pi = get_all_type_info(f_args + ';')
# Empty list of argument in declaration -> nothing to do
elif f_args_:
pi=[]
# Non-Ansi style declaration -> search for arguments in the function body
else:
# print('Parsing type from content {}'.format(f_content))
ti_all = get_all_type_info(f_content)
pi = [x for x in ti_all if x['decl'].startswith(('input','output','inout','ref'))]
f_decl = '{acc} {virt} {type} {ret} {name}'.format(acc=f_access, virt=f_virtual, type=f_type, ret=f_return,name=f_name)
f_decl = re.sub(r'\s+',' ',f_decl.strip())
d = {'name': f_name, 'type': f_type, 'port': pi, 'return': f_return, 'decl': f_decl, 'definition': f_def}
if f_access:
d['access'] = f_access
if d['return'].startswith('automatic'):
d['return'] = ' '.join(d['return'].split()[1:])
fil.append(d)
# print([x['name'] for x in fil])
return fil
# Fill all entry of a case for enum or vector (limited to 8b)
# ti is the type infor return by get_type_info
def fill_case(ti,length=0):
if not ti['type']:
print('[fill_case] No type for signal ' + str(ti['name']))
return (None,None)
# print('[fill_case] ti = {0}'.format(ti))
t = ti['type'].split()[0]
s = '\n'
if t == 'enum':
# extract enum from the declaration
m = re.search(r'\{(.*)\}', ti['decl'])
if m :
el = re.findall(r"(\w+).*?(,|$)",m.groups()[0])
maxlen = max([len(x[0]) for x in el])
if maxlen < 7:
maxlen = 7
for x in el:
s += '\t' + x[0].ljust(maxlen) + ' : ;\n'
s += '\tdefault'.ljust(maxlen+1) + ' : ;\nendcase'
return (s,[x[0] for x in el])
elif t in ['logic','bit','reg','wire','input','output']:
m = re.search(r'\[\s*(\d+)\s*\:\s*(\d+)',ti['bw'])
if m :
# If no length was provided use the complete bitwidth
if length>0:
bw = length
else :
bw = int(m.groups()[0]) + 1 - int(m.groups()[1])
if bw <=8 :
for i in range(0,(1<<bw)):
s += '\t' + str(i).ljust(7) + ' : ;\n'
s += '\tdefault : ;\nendcase'
return (s,range(0,(1<<bw)))
print('[fill_case] Type not supported: ' + str(t))
return (None,None)
# Extract all enum values from the declaration
def get_enum_values(decl):
m = re.search(r'\{(.*)\}', decl)
if not m:
return []
return re.findall(r"(\w+).*?(?:,|$)",m.groups()[0])
| |
"""These tests are all about the "join rewriting" feature built
to support SQLite's lack of right-nested joins. SQlite as of
version 3.7.16 no longer has this limitation.
"""
from sqlalchemy import Table, Column, Integer, MetaData, ForeignKey, \
select, exists, union
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy import testing
m = MetaData()
a = Table('a', m,
Column('id', Integer, primary_key=True)
)
b = Table('b', m,
Column('id', Integer, primary_key=True),
Column('a_id', Integer, ForeignKey('a.id'))
)
b_a = Table('b_a', m,
Column('id', Integer, primary_key=True),
)
b1 = Table('b1', m,
Column('id', Integer, primary_key=True),
Column('a_id', Integer, ForeignKey('a.id'))
)
b2 = Table('b2', m,
Column('id', Integer, primary_key=True),
Column('a_id', Integer, ForeignKey('a.id'))
)
a_to_b = Table('a_to_b', m,
Column('a_id', Integer, ForeignKey('a.id')),
Column('b_id', Integer, ForeignKey('b.id')),
)
c = Table('c', m,
Column('id', Integer, primary_key=True),
Column('b_id', Integer, ForeignKey('b.id'))
)
d = Table('d', m,
Column('id', Integer, primary_key=True),
Column('c_id', Integer, ForeignKey('c.id'))
)
e = Table('e', m,
Column('id', Integer, primary_key=True)
)
f = Table('f', m,
Column('id', Integer, primary_key=True),
Column('a_id', ForeignKey('a.id'))
)
b_key = Table('b_key', m,
Column('id', Integer, primary_key=True, key='bid'),
)
a_to_b_key = Table('a_to_b_key', m,
Column('aid', Integer, ForeignKey('a.id')),
Column('bid', Integer, ForeignKey('b_key.bid')),
)
class _JoinRewriteTestBase(AssertsCompiledSQL):
def _test(self, s, assert_):
self.assert_compile(
s,
assert_
)
compiled = s.compile(dialect=self.__dialect__)
# column name should be in result map, as we never render
# .key in SQL
for key, col in zip([c.name for c in s.c], s.inner_columns):
key = key % compiled.anon_map
assert col in compiled._create_result_map()[key][1]
_a_bkeyselect_bkey = ""
def test_a_bkeyselect_bkey(self):
assoc = a_to_b_key.select().alias()
j1 = assoc.join(b_key)
j2 = a.join(j1)
s = select([a, b_key], use_labels=True).select_from(j2)
self._test(s, self._a_bkeyselect_bkey)
def test_a_bc(self):
j1 = b.join(c)
j2 = a.join(j1)
# TODO: if we remove 'b' or 'c', shouldn't we get just
# the subset of cols from anon_1 ?
# TODO: do this test also with individual cols, things change
# lots based on how you go with this
s = select([a, b, c], use_labels=True).\
select_from(j2).\
where(b.c.id == 2).\
where(c.c.id == 3).order_by(a.c.id, b.c.id, c.c.id)
self._test(s, self._a_bc)
def test_a_bkeyassoc(self):
j1 = b_key.join(a_to_b_key)
j2 = a.join(j1)
s = select([a, b_key.c.bid], use_labels=True).\
select_from(j2)
self._test(s, self._a_bkeyassoc)
def test_a_bkeyassoc_aliased(self):
bkey_alias = b_key.alias()
a_to_b_key_alias = a_to_b_key.alias()
j1 = bkey_alias.join(a_to_b_key_alias)
j2 = a.join(j1)
s = select([a, bkey_alias.c.bid], use_labels=True).\
select_from(j2)
self._test(s, self._a_bkeyassoc_aliased)
def test_a__b_dc(self):
j1 = c.join(d)
j2 = b.join(j1)
j3 = a.join(j2)
s = select([a, b, c, d], use_labels=True).\
select_from(j3).\
where(b.c.id == 2).\
where(c.c.id == 3).\
where(d.c.id == 4).\
order_by(a.c.id, b.c.id, c.c.id, d.c.id)
self._test(
s,
self._a__b_dc
)
def test_a_bc_comma_a1_selbc(self):
# test here we're emulating is
# test.orm.inheritance.test_polymorphic_rel:
# PolymorphicJoinsTest.test_multi_join
j1 = b.join(c)
j2 = b.join(c).select(use_labels=True).alias()
j3 = a.join(j1)
a_a = a.alias()
j4 = a_a.join(j2)
s = select([a, a_a, b, c, j2], use_labels=True).\
select_from(j3).select_from(j4).order_by(j2.c.b_id)
self._test(
s,
self._a_bc_comma_a1_selbc
)
def test_a_atobalias_balias_c_w_exists(self):
a_to_b_alias = a_to_b.alias()
b_alias = b.alias()
j1 = a_to_b_alias.join(b_alias)
j2 = a.outerjoin(j1, a.c.id == a_to_b_alias.c.a_id)
# TODO: if we put straight a_to_b_alias here,
# it fails to alias the columns clause.
s = select([a,
a_to_b_alias.c.a_id,
a_to_b_alias.c.b_id,
b_alias.c.id,
b_alias.c.a_id,
exists().select_from(c).
where(c.c.b_id == b_alias.c.id).label(None)],
use_labels=True).select_from(j2)
self._test(
s,
self._a_atobalias_balias_c_w_exists
)
def test_a_atobalias_balias(self):
a_to_b_alias = a_to_b.alias()
b_alias = b.alias()
j1 = a_to_b_alias.join(b_alias)
j2 = a.outerjoin(j1, a.c.id == a_to_b_alias.c.a_id)
s = select([a, a_to_b_alias, b_alias], use_labels=True).select_from(j2)
self._test(
s,
self._a_atobalias_balias
)
def test_b_ab1_union_b_ab2(self):
j1 = a.join(b1)
j2 = a.join(b2)
b_j1 = b.join(j1)
b_j2 = b.join(j2)
s = union(
select([b_j1], use_labels=True),
select([b_j2], use_labels=True)
).select(use_labels=True)
self._test(
s,
self._b_ab1_union_c_ab2
)
def test_b_a_id_double_overlap_annotated(self):
# test issue #3057
# this involves annotations so try to loop those in.
j1 = b.join(b_a, b.c.id == b_a.c.id)
annot = [
b.c.id._annotate({}),
b.c.a_id._annotate({}),
b_a.c.id._annotate({})
]
s = select(annot).select_from(j1).apply_labels().alias()
s = select(list(s.c)).apply_labels()
self._test(
s,
self._b_a_id_double_overlap_annotated
)
def test_f_b1a_where_in_b2a(self):
# test issue #3130
b1a = a.join(b1)
b2a = a.join(b2)
subq = select([b2.c.id]).select_from(b2a)
s = select([f]).select_from(f.join(b1a)).where(b1.c.id.in_(subq))
s = s.apply_labels()
self._test(
s,
self._f_b1a_where_in_b2a
)
def test_anon_scalar_subqueries(self):
s1 = select([1]).as_scalar()
s2 = select([2]).as_scalar()
s = select([s1, s2]).apply_labels()
self._test(
s,
self._anon_scalar_subqueries
)
class JoinRewriteTest(_JoinRewriteTestBase, fixtures.TestBase):
"""test rendering of each join with right-nested rewritten as
aliased SELECT statements.."""
@util.classproperty
def __dialect__(cls):
dialect = default.DefaultDialect()
dialect.supports_right_nested_joins = False
return dialect
_a__b_dc = (
"SELECT a.id AS a_id, anon_1.b_id AS b_id, "
"anon_1.b_a_id AS b_a_id, anon_1.c_id AS c_id, "
"anon_1.c_b_id AS c_b_id, anon_1.d_id AS d_id, "
"anon_1.d_c_id AS d_c_id "
"FROM a JOIN (SELECT b.id AS b_id, b.a_id AS b_a_id, "
"anon_2.c_id AS c_id, anon_2.c_b_id AS c_b_id, "
"anon_2.d_id AS d_id, anon_2.d_c_id AS d_c_id "
"FROM b JOIN (SELECT c.id AS c_id, c.b_id AS c_b_id, "
"d.id AS d_id, d.c_id AS d_c_id "
"FROM c JOIN d ON c.id = d.c_id) AS anon_2 "
"ON b.id = anon_2.c_b_id) AS anon_1 ON a.id = anon_1.b_a_id "
"WHERE anon_1.b_id = :id_1 AND anon_1.c_id = :id_2 AND "
"anon_1.d_id = :id_3 "
"ORDER BY a.id, anon_1.b_id, anon_1.c_id, anon_1.d_id"
)
_a_bc = (
"SELECT a.id AS a_id, anon_1.b_id AS b_id, "
"anon_1.b_a_id AS b_a_id, anon_1.c_id AS c_id, "
"anon_1.c_b_id AS c_b_id FROM a JOIN "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id "
"FROM b JOIN c ON b.id = c.b_id) AS anon_1 "
"ON a.id = anon_1.b_a_id "
"WHERE anon_1.b_id = :id_1 AND anon_1.c_id = :id_2 "
"ORDER BY a.id, anon_1.b_id, anon_1.c_id"
)
_a_bc_comma_a1_selbc = (
"SELECT a.id AS a_id, a_1.id AS a_1_id, anon_1.b_id AS b_id, "
"anon_1.b_a_id AS b_a_id, anon_1.c_id AS c_id, "
"anon_1.c_b_id AS c_b_id, anon_2.b_id AS anon_2_b_id, "
"anon_2.b_a_id AS anon_2_b_a_id, anon_2.c_id AS anon_2_c_id, "
"anon_2.c_b_id AS anon_2_c_b_id FROM a "
"JOIN (SELECT b.id AS b_id, b.a_id AS b_a_id, c.id AS c_id, "
"c.b_id AS c_b_id FROM b JOIN c ON b.id = c.b_id) AS anon_1 "
"ON a.id = anon_1.b_a_id, "
"a AS a_1 JOIN "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id "
"FROM b JOIN c ON b.id = c.b_id) AS anon_2 "
"ON a_1.id = anon_2.b_a_id ORDER BY anon_2.b_id"
)
_a_bkeyassoc = (
"SELECT a.id AS a_id, anon_1.b_key_id AS b_key_id "
"FROM a JOIN "
"(SELECT b_key.id AS b_key_id, a_to_b_key.aid AS a_to_b_key_aid, "
"a_to_b_key.bid AS a_to_b_key_bid FROM b_key "
"JOIN a_to_b_key ON b_key.id = a_to_b_key.bid) AS anon_1 "
"ON a.id = anon_1.a_to_b_key_aid"
)
_a_bkeyassoc_aliased = (
"SELECT a.id AS a_id, anon_1.b_key_1_id AS b_key_1_id "
"FROM a JOIN (SELECT b_key_1.id AS b_key_1_id, "
"a_to_b_key_1.aid AS a_to_b_key_1_aid, "
"a_to_b_key_1.bid AS a_to_b_key_1_bid FROM b_key AS b_key_1 "
"JOIN a_to_b_key AS a_to_b_key_1 ON b_key_1.id = a_to_b_key_1.bid) AS "
"anon_1 ON a.id = anon_1.a_to_b_key_1_aid"
)
_a_bkeyselect_bkey = (
"SELECT a.id AS a_id, anon_2.anon_1_aid AS anon_1_aid, "
"anon_2.anon_1_bid AS anon_1_bid, anon_2.b_key_id AS b_key_id "
"FROM a JOIN (SELECT anon_1.aid AS anon_1_aid, "
"anon_1.bid AS anon_1_bid, "
"b_key.id AS b_key_id "
"FROM (SELECT a_to_b_key.aid AS aid, a_to_b_key.bid AS bid "
"FROM a_to_b_key) AS anon_1 "
"JOIN b_key ON b_key.id = anon_1.bid) AS anon_2 "
"ON a.id = anon_2.anon_1_aid")
_a_atobalias_balias_c_w_exists = (
"SELECT a.id AS a_id, "
"anon_1.a_to_b_1_a_id AS a_to_b_1_a_id, "
"anon_1.a_to_b_1_b_id AS a_to_b_1_b_id, "
"anon_1.b_1_id AS b_1_id, anon_1.b_1_a_id AS b_1_a_id, "
"EXISTS (SELECT * FROM c WHERE c.b_id = anon_1.b_1_id) AS anon_2 "
"FROM a LEFT OUTER JOIN (SELECT a_to_b_1.a_id AS a_to_b_1_a_id, "
"a_to_b_1.b_id AS a_to_b_1_b_id, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id "
"FROM a_to_b AS a_to_b_1 "
"JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) AS anon_1 "
"ON a.id = anon_1.a_to_b_1_a_id")
_a_atobalias_balias = (
"SELECT a.id AS a_id, anon_1.a_to_b_1_a_id AS a_to_b_1_a_id, "
"anon_1.a_to_b_1_b_id AS a_to_b_1_b_id, anon_1.b_1_id AS b_1_id, "
"anon_1.b_1_a_id AS b_1_a_id FROM a LEFT OUTER JOIN "
"(SELECT a_to_b_1.a_id AS a_to_b_1_a_id, "
"a_to_b_1.b_id AS a_to_b_1_b_id, "
"b_1.id AS b_1_id, b_1.a_id AS b_1_a_id FROM a_to_b AS a_to_b_1 "
"JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) AS anon_1 "
"ON a.id = anon_1.a_to_b_1_a_id")
_b_ab1_union_c_ab2 = (
"SELECT b_id AS b_id, b_a_id AS b_a_id, a_id AS a_id, b1_id AS b1_id, "
"b1_a_id AS b1_a_id FROM "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, anon_1.a_id AS a_id, "
"anon_1.b1_id AS b1_id, anon_1.b1_a_id AS b1_a_id "
"FROM b JOIN (SELECT a.id AS a_id, b1.id AS b1_id, b1.a_id AS b1_a_id "
"FROM a JOIN b1 ON a.id = b1.a_id) AS anon_1 ON anon_1.a_id = b.a_id "
"UNION "
"SELECT b.id AS b_id, b.a_id AS b_a_id, anon_2.a_id AS a_id, "
"anon_2.b2_id AS b2_id, anon_2.b2_a_id AS b2_a_id "
"FROM b JOIN (SELECT a.id AS a_id, b2.id AS b2_id, b2.a_id AS b2_a_id "
"FROM a JOIN b2 ON a.id = b2.a_id) AS anon_2 ON anon_2.a_id = b.a_id)"
)
_b_a_id_double_overlap_annotated = (
"SELECT anon_1.b_id AS anon_1_b_id, anon_1.b_a_id AS anon_1_b_a_id, "
"anon_1.id_1 AS anon_1_id_1 "
"FROM (SELECT b.id AS b_id, b.a_id AS b_a_id, b_a.id AS id_1 "
"FROM b JOIN b_a ON b.id = b_a.id) AS anon_1"
)
_f_b1a_where_in_b2a = (
"SELECT f.id AS f_id, f.a_id AS f_a_id "
"FROM f JOIN (SELECT a.id AS a_id, b1.id AS b1_id, b1.a_id AS b1_a_id "
"FROM a JOIN b1 ON a.id = b1.a_id) AS anon_1 ON anon_1.a_id = f.a_id "
"WHERE anon_1.b1_id IN (SELECT b2.id "
"FROM a JOIN b2 ON a.id = b2.a_id)"
)
_anon_scalar_subqueries = (
"SELECT (SELECT 1) AS anon_1, (SELECT 2) AS anon_2"
)
class JoinPlainTest(_JoinRewriteTestBase, fixtures.TestBase):
"""test rendering of each join with normal nesting."""
@util.classproperty
def __dialect__(cls):
dialect = default.DefaultDialect()
return dialect
_a_bkeyselect_bkey = (
"SELECT a.id AS a_id, b_key.id AS b_key_id FROM a JOIN "
"((SELECT a_to_b_key.aid AS aid, a_to_b_key.bid AS bid "
"FROM a_to_b_key) AS anon_1 JOIN b_key ON b_key.id = anon_1.bid) "
"ON a.id = anon_1.aid"
)
_a__b_dc = (
"SELECT a.id AS a_id, b.id AS b_id, "
"b.a_id AS b_a_id, c.id AS c_id, "
"c.b_id AS c_b_id, d.id AS d_id, "
"d.c_id AS d_c_id "
"FROM a JOIN (b JOIN (c JOIN d ON c.id = d.c_id) "
"ON b.id = c.b_id) ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2 AND "
"d.id = :id_3 "
"ORDER BY a.id, b.id, c.id, d.id"
)
_a_bc = (
"SELECT a.id AS a_id, b.id AS b_id, "
"b.a_id AS b_a_id, c.id AS c_id, "
"c.b_id AS c_b_id FROM a JOIN "
"(b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2 "
"ORDER BY a.id, b.id, c.id"
)
_a_bc_comma_a1_selbc = (
"SELECT a.id AS a_id, a_1.id AS a_1_id, b.id AS b_id, "
"b.a_id AS b_a_id, c.id AS c_id, "
"c.b_id AS c_b_id, anon_1.b_id AS anon_1_b_id, "
"anon_1.b_a_id AS anon_1_b_a_id, anon_1.c_id AS anon_1_c_id, "
"anon_1.c_b_id AS anon_1_c_b_id FROM a "
"JOIN (b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id, "
"a AS a_1 JOIN "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id "
"FROM b JOIN c ON b.id = c.b_id) AS anon_1 "
"ON a_1.id = anon_1.b_a_id ORDER BY anon_1.b_id"
)
_a_bkeyassoc = (
"SELECT a.id AS a_id, b_key.id AS b_key_id "
"FROM a JOIN "
"(b_key JOIN a_to_b_key ON b_key.id = a_to_b_key.bid) "
"ON a.id = a_to_b_key.aid"
)
_a_bkeyassoc_aliased = (
"SELECT a.id AS a_id, b_key_1.id AS b_key_1_id FROM a "
"JOIN (b_key AS b_key_1 JOIN a_to_b_key AS a_to_b_key_1 "
"ON b_key_1.id = a_to_b_key_1.bid) ON a.id = a_to_b_key_1.aid"
)
_a_atobalias_balias_c_w_exists = (
"SELECT a.id AS a_id, a_to_b_1.a_id AS a_to_b_1_a_id, "
"a_to_b_1.b_id AS a_to_b_1_b_id, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id, "
"EXISTS (SELECT * FROM c WHERE c.b_id = b_1.id) AS anon_1 "
"FROM a LEFT OUTER JOIN "
"(a_to_b AS a_to_b_1 JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) "
"ON a.id = a_to_b_1.a_id")
_a_atobalias_balias = (
"SELECT a.id AS a_id, a_to_b_1.a_id AS a_to_b_1_a_id, "
"a_to_b_1.b_id AS a_to_b_1_b_id, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id "
"FROM a LEFT OUTER JOIN (a_to_b AS a_to_b_1 "
"JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) ON a.id = a_to_b_1.a_id"
)
_b_ab1_union_c_ab2 = (
"SELECT b_id AS b_id, b_a_id AS b_a_id, a_id AS a_id, b1_id AS b1_id, "
"b1_a_id AS b1_a_id FROM "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, a.id AS a_id, "
"b1.id AS b1_id, "
"b1.a_id AS b1_a_id FROM b "
"JOIN (a JOIN b1 ON a.id = b1.a_id) ON a.id = b.a_id "
"UNION "
"SELECT b.id AS b_id, b.a_id AS b_a_id, a.id AS a_id, b2.id AS b2_id, "
"b2.a_id AS b2_a_id FROM b "
"JOIN (a JOIN b2 ON a.id = b2.a_id) ON a.id = b.a_id)")
_b_a_id_double_overlap_annotated = (
"SELECT anon_1.b_id AS anon_1_b_id, anon_1.b_a_id AS anon_1_b_a_id, "
"anon_1.id_1 AS anon_1_id_1 FROM "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, b_a.id AS id_1 "
"FROM b JOIN b_a ON b.id = b_a.id) AS anon_1"
)
_f_b1a_where_in_b2a = (
"SELECT f.id AS f_id, f.a_id AS f_a_id "
"FROM f JOIN (a JOIN b1 ON a.id = b1.a_id) ON a.id = f.a_id "
"WHERE b1.id IN (SELECT b2.id "
"FROM a JOIN b2 ON a.id = b2.a_id)"
)
_anon_scalar_subqueries = (
"SELECT (SELECT 1) AS anon_1, (SELECT 2) AS anon_2"
)
class JoinNoUseLabelsTest(_JoinRewriteTestBase, fixtures.TestBase):
@util.classproperty
def __dialect__(cls):
dialect = default.DefaultDialect()
dialect.supports_right_nested_joins = False
return dialect
def _test(self, s, assert_):
s.use_labels = False
self.assert_compile(
s,
assert_
)
_a_bkeyselect_bkey = (
"SELECT a.id, b_key.id FROM a JOIN ((SELECT a_to_b_key.aid AS aid, "
"a_to_b_key.bid AS bid FROM a_to_b_key) AS anon_1 "
"JOIN b_key ON b_key.id = anon_1.bid) ON a.id = anon_1.aid"
)
_a__b_dc = (
"SELECT a.id, b.id, "
"b.a_id, c.id, "
"c.b_id, d.id, "
"d.c_id "
"FROM a JOIN (b JOIN (c JOIN d ON c.id = d.c_id) "
"ON b.id = c.b_id) ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2 AND "
"d.id = :id_3 "
"ORDER BY a.id, b.id, c.id, d.id"
)
_a_bc = (
"SELECT a.id, b.id, "
"b.a_id, c.id, "
"c.b_id FROM a JOIN "
"(b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2 "
"ORDER BY a.id, b.id, c.id"
)
_a_bc_comma_a1_selbc = (
"SELECT a.id, a_1.id, b.id, "
"b.a_id, c.id, "
"c.b_id, anon_1.b_id, "
"anon_1.b_a_id, anon_1.c_id, "
"anon_1.c_b_id FROM a "
"JOIN (b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id, "
"a AS a_1 JOIN "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id "
"FROM b JOIN c ON b.id = c.b_id) AS anon_1 "
"ON a_1.id = anon_1.b_a_id ORDER BY anon_1.b_id"
)
_a_bkeyassoc = (
"SELECT a.id, b_key.id FROM a JOIN (b_key JOIN a_to_b_key "
"ON b_key.id = a_to_b_key.bid) ON a.id = a_to_b_key.aid"
)
_a_bkeyassoc_aliased = (
"SELECT a.id, b_key_1.id FROM a JOIN (b_key AS b_key_1 "
"JOIN a_to_b_key AS a_to_b_key_1 ON b_key_1.id = a_to_b_key_1.bid) "
"ON a.id = a_to_b_key_1.aid"
)
_a_atobalias_balias_c_w_exists = (
"SELECT a.id, a_to_b_1.a_id, a_to_b_1.b_id, b_1.id, b_1.a_id, "
"EXISTS (SELECT * FROM c WHERE c.b_id = b_1.id) AS anon_1 "
"FROM a LEFT OUTER JOIN "
"(a_to_b AS a_to_b_1 JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) "
"ON a.id = a_to_b_1.a_id"
)
_a_atobalias_balias = (
"SELECT a.id, a_to_b_1.a_id, a_to_b_1.b_id, b_1.id, b_1.a_id "
"FROM a LEFT OUTER JOIN (a_to_b AS a_to_b_1 "
"JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) ON a.id = a_to_b_1.a_id"
)
_b_ab1_union_c_ab2 = (
"SELECT b_id, b_a_id, a_id, b1_id, b1_a_id "
"FROM (SELECT b.id AS b_id, b.a_id AS b_a_id, a.id AS a_id, "
"b1.id AS b1_id, b1.a_id AS b1_a_id "
"FROM b JOIN (a JOIN b1 ON a.id = b1.a_id) ON a.id = b.a_id "
"UNION "
"SELECT b.id AS b_id, b.a_id AS b_a_id, a.id AS a_id, b2.id AS b2_id, "
"b2.a_id AS b2_a_id "
"FROM b JOIN (a JOIN b2 ON a.id = b2.a_id) ON a.id = b.a_id)"
)
_b_a_id_double_overlap_annotated = (
"SELECT anon_1.b_id, anon_1.b_a_id, anon_1.id_1 FROM "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, b_a.id AS id_1 "
"FROM b JOIN b_a ON b.id = b_a.id) AS anon_1"
)
_f_b1a_where_in_b2a = (
"SELECT f.id, f.a_id "
"FROM f JOIN (a JOIN b1 ON a.id = b1.a_id) ON a.id = f.a_id "
"WHERE b1.id IN (SELECT b2.id "
"FROM a JOIN b2 ON a.id = b2.a_id)"
)
_anon_scalar_subqueries = (
"SELECT (SELECT 1) AS anon_1, (SELECT 2) AS anon_2"
)
class JoinExecTest(_JoinRewriteTestBase, fixtures.TestBase):
"""invoke the SQL on the current backend to ensure compatibility"""
__backend__ = True
_a_bc = _a_bc_comma_a1_selbc = _a__b_dc = _a_bkeyassoc = \
_a_bkeyassoc_aliased = _a_atobalias_balias_c_w_exists = \
_a_atobalias_balias = _b_ab1_union_c_ab2 = \
_b_a_id_double_overlap_annotated = _f_b1a_where_in_b2a = \
_anon_scalar_subqueries = None
@classmethod
def setup_class(cls):
m.create_all(testing.db)
@classmethod
def teardown_class(cls):
m.drop_all(testing.db)
def _test(self, selectable, assert_):
result = testing.db.execute(selectable)
result.close()
for col in selectable.inner_columns:
assert col in result._metadata._keymap
@testing.skip_if("oracle", "oracle's cranky")
@testing.skip_if("mssql", "can't query EXISTS in the columns "
"clause w/o subquery")
def test_a_atobalias_balias_c_w_exists(self):
super(JoinExecTest, self).test_a_atobalias_balias_c_w_exists()
@testing.only_on(
"sqlite",
"non-standard aliasing rules used at the moment, "
"possibly fix this or add another test that uses "
"cross-compatible aliasing")
def test_b_ab1_union_b_ab2(self):
super(JoinExecTest, self).test_b_ab1_union_b_ab2()
class DialectFlagTest(fixtures.TestBase, AssertsCompiledSQL):
def test_dialect_flag(self):
d1 = default.DefaultDialect(supports_right_nested_joins=True)
d2 = default.DefaultDialect(supports_right_nested_joins=False)
j1 = b.join(c)
j2 = a.join(j1)
s = select([a, b, c], use_labels=True).\
select_from(j2)
self.assert_compile(
s,
"SELECT a.id AS a_id, b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, "
"c.b_id AS c_b_id FROM a JOIN (b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id",
dialect=d1)
self.assert_compile(
s, "SELECT a.id AS a_id, anon_1.b_id AS b_id, "
"anon_1.b_a_id AS b_a_id, "
"anon_1.c_id AS c_id, anon_1.c_b_id AS c_b_id "
"FROM a JOIN (SELECT b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, "
"c.b_id AS c_b_id FROM b JOIN c ON b.id = c.b_id) AS anon_1 "
"ON a.id = anon_1.b_a_id", dialect=d2)
| |
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
import unittest
from mock import patch, Mock
from vnc_api.vnc_api import Domain, Project, NetworkIpam, VirtualNetwork, VnSubnetsType
from kube_manager.vnc import vnc_kubernetes
from kube_manager.tests.vnc.db_mock import DBBaseKM, DBMock
from kube_manager.tests.vnc.vnc_api_mock import VncApiMock
from kube_manager.vnc.vnc_kubernetes_config import VncKubernetesConfig as \
vnc_kubernetes_config
class VncKubernetesTest(unittest.TestCase):
def setUp(self):
VncApiMock.init()
DBMock.init()
vnc_kubernetes.VncKubernetes.reset()
self.args = Mock()
self.args.admin_user = "admin"
self.args.admin_password = "qwerty"
self.args.admin_tenant = "default"
self.args.vnc_endpoint_ip = '127.0.0.1'
self.args.vnc_endpoint_port = "8082"
self.args.auth_token_url = "token"
self.args.cluster_project = None
self.args.cluster_network = None
self.args.cluster_pod_network = None
self.args.cluster_service_network = None
self.args.cluster_name = "cluster"
self.args.pod_subnets = ['10.10.0.0/16']
self.args.ip_fabric_subnets = ['20.20.0.0/16']
self.args.service_subnets = ['192.168.0.0/24']
self.args.kubernetes_api_secure_port = "8443"
self.args.auth_user = "admin"
self.args.auth_password = "qwerty"
self.args.auth_tenant = "default"
self.args.cassandra_server_list = ()
self.args.aps_name="test-aps"
self.args.rabbit_port = None
self.args.collectors = ""
api = VncApiMock(
self.args.auth_user,
self.args.auth_password,
self.args.auth_tenant,
self.args.vnc_endpoint_ip,
self.args.vnc_endpoint_port,
self.args.auth_token_url
)
domain_uuid = api.domain_create(Domain("default-domain"))
domain = api.domain_read(id=domain_uuid)
proj_uuid = api.project_create(Project("default-project", parent_obj=domain))
proj = api.project_read(id=proj_uuid)
net = VirtualNetwork("ip-fabric", proj)
api.virtual_network_create(net)
def tearDown(self):
vnc_kubernetes.VncKubernetes.reset()
pass
def verify_if_created(self, res_type, name, parent_fq_name):
obj = VncApiMock.read(res_type, fq_name=parent_fq_name+[name])
self.assertEquals(name, obj.name)
uuid = obj.uuid
ok, obj_list = DBMock.read(res_type.replace('-', '_'), [uuid])
self.assertEquals(True, ok)
self.assertEquals(parent_fq_name+[name], obj_list[0]['fq_name'])
return obj
def verify_if_synchronized(self, cls, obj):
km_obj = cls.get(obj.uuid)
self.assertEquals(obj.name, km_obj.name)
self.assertEquals(obj.uuid, km_obj.uuid)
return km_obj
@patch("kube_manager.vnc.db.KubeNetworkManagerDB", new=DBMock)
@patch("kube_manager.vnc.vnc_kubernetes.VncApi", new=VncApiMock)
@patch("kube_manager.vnc.vnc_kubernetes.VncAmqpHandle")
def test_sync_km(self, mock_vnc_amqp_handle_init):
# Put some objects to database
DBMock.create('domain', '123', {
'uuid': '123',
'fq_name': ['test-domain']
})
DBMock.create('project', '234', {
'uuid': '234',
'fq_name': ['test-domain', 'test-proj-1'],
'parent_uuid': '123'
})
DBMock.create('project', '345', {
'uuid': '345',
'fq_name': ['test-domain', 'test-proj-2'],
'parent_uuid': '123'
})
mock_vnc_amqp_handle = Mock()
mock_vnc_amqp_handle_init.return_value = mock_vnc_amqp_handle
vnc_kubernetes.VncKubernetes(self.args, Mock())
mock_vnc_amqp_handle.establish.assert_called_once_with()
# check if KM dictionaries are synchronized with database
self.assertEquals(2, len(vnc_kubernetes.DomainKM.list_obj()))
self.assertEquals(5, len(vnc_kubernetes.ProjectKM.list_obj()))
obj = vnc_kubernetes.DomainKM.get('123')
self.assertIsNotNone(obj)
self.assertEquals(['test-domain'], obj.fq_name)
self.assertEquals('123', obj.uuid)
obj = vnc_kubernetes.ProjectKM.get('234')
self.assertIsNotNone(obj)
self.assertEquals('test-proj-1', obj.name)
self.assertEquals(['test-domain', 'test-proj-1'], obj.fq_name)
self.assertEquals('234', obj.uuid)
obj = vnc_kubernetes.ProjectKM.get('345')
self.assertIsNotNone(obj)
self.assertEquals('test-proj-2', obj.name)
self.assertEquals(['test-domain', 'test-proj-2'], obj.fq_name)
self.assertEquals('345', obj.uuid)
@patch("kube_manager.vnc.db.KubeNetworkManagerDB", new=DBMock)
@patch("kube_manager.vnc.vnc_kubernetes.VncApi", new=VncApiMock)
@patch("kube_manager.vnc.vnc_kubernetes.VncAmqpHandle", new=Mock())
def test_nested_mode(self):
old_nested_mode = DBBaseKM.is_nested()
self.args.nested_mode = "1"
vnc_kubernetes.VncKubernetes(self.args, Mock())
self.assertTrue(DBBaseKM.is_nested())
DBBaseKM.set_nested(old_nested_mode)
@patch("kube_manager.vnc.db.KubeNetworkManagerDB", new=DBMock)
@patch("kube_manager.vnc.vnc_kubernetes.VncApi", new=VncApiMock)
@patch("kube_manager.vnc.vnc_kubernetes.VncAmqpHandle", new=Mock())
def test_create_resources(self):
vnc_kubernetes.VncKubernetes(self.args, Mock())
default_proj_name = vnc_kubernetes_config.cluster_project_name('default')
kube_system_proj_name = vnc_kubernetes_config.cluster_project_name('kube-system')
# Verify projects
system_proj = self.verify_if_created('project', kube_system_proj_name,
['default-domain'])
default_proj = self.verify_if_created('project', default_proj_name,
['default-domain'])
self.verify_if_synchronized(vnc_kubernetes.ProjectKM, system_proj)
self.verify_if_synchronized(vnc_kubernetes.ProjectKM, default_proj)
# Verify cluster pod network
net = self.verify_if_created('virtual-network', 'cluster-default-pod-network',
['default-domain', default_proj_name])
self.verify_if_synchronized(vnc_kubernetes.VirtualNetworkKM, net)
ipam_refs = net.get_network_ipam_refs()
self.assertEquals(1, len(ipam_refs))
self.assertEquals([], ipam_refs[0]['attr'].ipam_subnets)
# Verify pod ipam
pod_ipam = self.verify_if_created('network-ipam', self.args.cluster_name + '-pod-ipam',
['default-domain', default_proj_name])
self.verify_if_synchronized(vnc_kubernetes.NetworkIpamKM, pod_ipam)
self.assertEquals('flat-subnet', pod_ipam.get_ipam_subnet_method())
self.assertEquals(16, pod_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix_len())
self.assertEquals('10.10.0.0', pod_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix())
# Verify cluster service network
net = self.verify_if_created(
'virtual-network', 'cluster-default-service-network',
['default-domain', default_proj_name])
self.verify_if_synchronized(vnc_kubernetes.VirtualNetworkKM, net)
ipam_refs = net.get_network_ipam_refs()
self.assertEquals(1, len(ipam_refs))
self.assertEquals([], ipam_refs[0]['attr'].ipam_subnets)
# Verify service ipam
service_ipam = self.verify_if_created('network-ipam', self.args.cluster_name +'-service-ipam',
['default-domain', default_proj_name])
self.verify_if_synchronized(vnc_kubernetes.NetworkIpamKM, service_ipam)
self.assertEquals('flat-subnet', pod_ipam.get_ipam_subnet_method())
self.assertEquals(24, service_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix_len())
self.assertEquals('192.168.0.0', service_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix())
@patch("kube_manager.vnc.db.KubeNetworkManagerDB", new=DBMock)
@patch("kube_manager.vnc.vnc_kubernetes.VncApi", new=VncApiMock)
@patch("kube_manager.vnc.vnc_kubernetes.VncAmqpHandle", new=Mock())
def test_resources_exists(self):
api = VncApiMock(
self.args.auth_user,
self.args.auth_password,
self.args.auth_tenant,
self.args.vnc_endpoint_ip,
self.args.vnc_endpoint_port,
self.args.auth_token_url
)
domain_fq_name = ['default-domain']
domain = api.domain_read(fq_name=domain_fq_name)
proj_uuid = api.project_create(Project("default", parent_obj=domain))
proj = api.project_read(id=proj_uuid)
# Create cluster-default-pod-network
ipam_uuid = api.network_ipam_create(NetworkIpam("pod-ipam", proj))
ipam = api.network_ipam_read(id=ipam_uuid)
net = VirtualNetwork("cluster-default-pod-network", proj)
# No subnets are associated with IPAM at this point.
# Subnets will be updated in the IPAM, when cluster is created.
net.add_network_ipam(ipam, VnSubnetsType([]))
api.virtual_network_create(net)
# Create cluster-default-service-network
ipam_uuid = api.network_ipam_create(NetworkIpam("service-ipam", proj))
ipam = api.network_ipam_read(id=ipam_uuid)
net = VirtualNetwork("cluster-default-service-network", proj)
# No subnets are associated with IPAM at this point.
# Subnets will be updated in the IPAM, when cluster is created.
net.add_network_ipam(ipam, VnSubnetsType([]))
api.virtual_network_create(net)
vnc_kubernetes.VncKubernetes(self.args, Mock())
| |
import os
from os import path
import sqlite3
from .link import Link
class Links(object):
def __init__(self):
self._links = {}
def add(self, link):
self._links[link.url] = link
def find_by_tag(self, tag):
return [link for link in self._links.values() if tag in link.tags]
def get_all(self):
return self._links.values()
def remove(self, link):
del self._links[link.url]
def find_by_url(self, url):
return self._links[url]
class SqliteLinks(object):
def __init__(self, table_gateways):
self._links_table = table_gateways['links']
self._tags_table = table_gateways['tags']
def add(self, link):
self._links_table.save(link.url, link.date)
self._tags_table.reset_tags(link.url, link.tags)
def find_by_tag(self, tag):
found = []
for url in self._tags_table.get_urls_of_links_with_tag(tag):
date = self._links_table.get_date(url)
tags = self._tags_table.get_tags(url)
found.append(Link(url, tags, date))
return found
def get_all(self):
all_links = []
for url, date in self._links_table.get_all():
tags = self._tags_table.get_tags(url)
all_links.append(Link(url, tags, date))
return all_links
def remove(self, link):
self._tags_table.remove_tags(link.url)
self._links_table.remove_url_and_date(link.url)
def find_by_url(self, url):
date = self._links_table.get_date(url)
tags = self._tags_table.get_tags(url)
return Link(url, tags, date)
class SqliteTable(object):
def __init__(self, sqlite_connection):
self._connection = sqlite_connection
self._set_up()
def _set_up(self):
with self._connection as connection:
connection.execute(self.SQL_COMMAND_FOR_TABLE_CREATION)
class LinksTable(SqliteTable):
SQL_COMMAND_FOR_TABLE_CREATION = '''
create table if not exists links(
url
primary key
not null,
date_saved
not null
)
'''
def get_all(self):
with self._connection as connection:
return connection.execute('select url, date_saved from links').fetchall()
def save(self, url, date):
with self._connection as connection:
connection.execute(
'insert or ignore into links(url, date_saved) values(?, ?)',
(url, date)
)
def get_date(self, url):
with self._connection as connection:
row = connection.execute(
'select date_saved from links where url = ?',
(url,)
).fetchone()
date = row[0]
return date
def remove_url_and_date(self, url):
with self._connection as connection:
connection.execute('delete from links where url = ?', (url,))
class TagsTable(SqliteTable):
SQL_COMMAND_FOR_TABLE_CREATION = '''
create table if not exists tags(
url
not null,
name
not null,
foreign key(url) references links(url)
on delete restrict
on update restrict
)
'''
def get_urls_of_links_with_tag(self, tag):
with self._connection as connection:
list_of_rows = connection.execute(
'select url from tags where name = ?',
(tag,)
).fetchall()
return tuple(url for (url,) in list_of_rows)
def get_tags(self, url):
with self._connection as connection:
list_of_rows = connection.execute(
'select name from tags where url = ?',
(url,)
).fetchall()
return tuple(tag for (tag,) in list_of_rows)
def reset_tags(self, url, tags):
self.remove_tags(url)
self.add_tags(url, tags)
def remove_tags(self, url):
with self._connection as connection:
connection.execute('delete from tags where url = ?', (url,))
def add_tags(self, url, tags):
with self._connection as connection:
connection.executemany(
'insert into tags(url, name) values(?, ?)',
[(url, tag) for tag in tags]
)
class SqliteConnectionFactory(object):
@staticmethod
def create_autoclosing_on_disk():
return AutoclosingSqliteConnection()
@classmethod
def create_in_memory(cls):
connection_to_in_memory_database = sqlite3.connect(':memory:')
cls._enable_enforcement_of_foreign_key_constraints(connection_to_in_memory_database)
return connection_to_in_memory_database
@staticmethod
def _enable_enforcement_of_foreign_key_constraints(sqlite_connection):
sqlite_connection.execute('pragma foreign_keys = on')
@classmethod
def create_on_disk(cls, data_directory):
connection_to_on_disk_database = sqlite3.connect(data_directory.path_to_database_file)
cls._enable_enforcement_of_foreign_key_constraints(connection_to_on_disk_database)
return connection_to_on_disk_database
class AutoclosingSqliteConnection(object):
def __init__(self, provider_of_sqlite_connection=None):
self._provider_of_sqlite_connection = provider_of_sqlite_connection if provider_of_sqlite_connection is not None \
else ProviderOfConnectionToOnDiskSqliteDatabase()
def __enter__(self):
self._current_connection = self._provider_of_sqlite_connection.get()
self._current_connection.__enter__()
return self._current_connection
def __exit__(self, type_, value, traceback):
self._current_connection.__exit__(type_, value, traceback)
self._current_connection.close()
return False
class ProviderOfConnectionToOnDiskSqliteDatabase(object):
def __init__(self):
self._directory = ApplicationDataDirectory()
def get(self):
return SqliteConnectionFactory.create_on_disk(self._directory)
class ApplicationDataDirectory(object):
@property
def path(self):
return path.expanduser('~/.linkstore/')
@property
def name_of_database_file(self):
return 'linkstore.sqlite'
@property
def path_to_database_file(self):
self._ensure_data_directory_exists()
return path.join(self.path, self.name_of_database_file)
def _ensure_data_directory_exists(self):
if path.exists(self.path):
return
os.mkdir(self.path)
| |
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (c) 2015 Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import ctypes
from ctypes.util import find_library
from collections import deque
from abc import abstractmethod
from . import exception as exception
from .exception import THEMIS_CODES, ThemisError
themis = ctypes.cdll.LoadLibrary(find_library('themis'))
ON_GET_PUBLIC_KEY = ctypes.CFUNCTYPE(
ctypes.c_int, ctypes.POINTER(ctypes.c_byte),
ctypes.c_size_t, ctypes.POINTER(ctypes.c_byte),
ctypes.c_size_t, ctypes.POINTER(ctypes.py_object))
ON_SEND_DATA = ctypes.CFUNCTYPE(
ctypes.c_ssize_t, ctypes.POINTER(ctypes.c_byte),
ctypes.c_size_t, ctypes.POINTER(ctypes.py_object))
ON_RECEIVE_DATA = ctypes.CFUNCTYPE(
ctypes.c_ssize_t, ctypes.POINTER(ctypes.c_byte),
ctypes.c_size_t, ctypes.POINTER(ctypes.py_object))
ON_STATE_CHANGE = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_void_p)
class TransportStruct(ctypes.Structure):
# set of callbacks
_fields_ = [
# use for "sending" data.
# if set to None - secure session methods send/receive not usable
("send_data", ON_SEND_DATA),
# use for "receiving" data.
# if set to None - secure session methods send/receive not usable
("receive_data", ON_RECEIVE_DATA),
# not used for in current version
("state_changed", ON_STATE_CHANGE),
# [necessary] use for getting peer public key by it ID
# (see ssession.__init__ method).
("get_public_key_for_id", ON_GET_PUBLIC_KEY),
# some user_data, that will be passed to any of callbacks
("user_data", ctypes.POINTER(ctypes.py_object))]
ssession_create = themis.secure_session_create
ssession_create.restype = ctypes.POINTER(ctypes.c_int)
ssession_create.argtypes = [ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.POINTER(TransportStruct)]
def on_send(data, data_length, user_data):
try:
user_data[0].send(ctypes.string_at(data, data_length))
except Exception:
return THEMIS_CODE.NETWORK_ERROR
return data_length
def on_receive(data, data_length, user_data):
try:
received_data = user_data[0].receive(data_length)
except Exception as e:
return THEMIS_CODE.NETWORK_ERROR
ctypes.memmove(data, received_data, len(received_data))
return len(received_data)
def on_get_pub_key(user_id, id_length, key_buffer, key_buffer_length,
user_data):
real_user_id = ctypes.string_at(user_id, id_length)
pub_key = user_data[0].get_pub_key_by_id(real_user_id)
ctypes.memmove(key_buffer, pub_key, len(pub_key))
return 0
def on_change_status(buffer_length, user_data):
return 0
on_send_ = ON_SEND_DATA(on_send)
on_receive_ = ON_RECEIVE_DATA(on_receive)
on_change_status_ = ON_STATE_CHANGE(on_change_status)
on_get_pub_key_ = ON_GET_PUBLIC_KEY(on_get_pub_key)
lp_conn_type = ctypes.POINTER(ctypes.py_object)
class SString(bytes):
def __new__(cls, value):
obj = bytes.__new__(cls, value)
obj.is_control = False
return obj
def set_control(self):
self.is_control = True
def unset_control(self):
self.is_control = False
def is_control(self):
return self.is_control
def __str__(self):
return bytes.__str__(self)
class SSession(object):
def __init__(self, user_id, sign_key, transport):
# user_id - user identification ("server" for example)
# sign_key - private key of session owner
# transport - refference for transport_t object.
self.session_ctx = ctypes.POINTER(ctypes.c_int)
if transport is None:
self.session_ctx = ssession_create(
ctypes.byref(ctypes.create_string_buffer(user_id)),
len(user_id),
ctypes.byref(ctypes.create_string_buffer(sign_key)),
len(sign_key),
0)
else:
self.lp_conn = lp_conn_type(ctypes.py_object(transport))
self.transport_ = TransportStruct(
on_send_, on_receive_, on_change_status_, on_get_pub_key_,
self.lp_conn)
self.session_ctx = ssession_create(
ctypes.byref(ctypes.create_string_buffer(user_id)),
len(user_id),
ctypes.byref(ctypes.create_string_buffer(sign_key)),
len(sign_key),
ctypes.byref(self.transport_))
if not self.session_ctx:
raise exception.ThemisError(THEMIS_CODES.FAIL,
"Secure Session failed creating")
def __del__(self):
themis.secure_session_destroy(self.session_ctx)
def connect(self):
res = themis.secure_session_connect(self.session_ctx)
if res != THEMIS_CODES.SUCCESS:
raise exception.ThemisError(res, "Secure Session failed connecting")
def send(self, message):
send_message = ctypes.create_string_buffer(message)
res = themis.secure_session_send(
self.session_ctx, ctypes.byref(send_message), len(message))
if res == THEMIS_CODES.NETWORK_ERROR:
raise exception.ThemisError(res, "Secure Session failed sending")
return res
def receive(self):
message = ctypes.create_string_buffer(1024)
message_length = ctypes.c_size_t(1024)
res = themis.secure_session_receive(self.session_ctx, message,
message_length)
if res == THEMIS_CODES.NETWORK_ERROR:
raise exception.ThemisError(res, "Secure Session failed receiving")
elif res < 0:
return ""
return ctypes.string_at(message, res)
def is_established(self):
return themis.secure_session_is_established(self.session_ctx) == 1
def connect_request(self):
req_size = ctypes.c_int(0)
res = themis.secure_session_generate_connect_request(
self.session_ctx, None, ctypes.byref(req_size))
if res != THEMIS_CODES.BUFFER_TOO_SMALL:
raise exception.ThemisError(
res, "Secure Session failed generating connect request")
req_buffer = ctypes.create_string_buffer(req_size.value)
res = themis.secure_session_generate_connect_request(
self.session_ctx, ctypes.byref(req_buffer), ctypes.byref(req_size))
if res != THEMIS_CODES.SUCCESS:
raise exception.ThemisError(
res, "Secure Session failed generating connect request")
return ctypes.string_at(req_buffer, req_size)
def wrap(self, message):
send_message = ctypes.create_string_buffer(message)
wrapped_message_length = ctypes.c_int(0)
res = themis.secure_session_wrap(
self.session_ctx, ctypes.byref(send_message), len(message), 0,
ctypes.byref(wrapped_message_length))
if res != THEMIS_CODES.BUFFER_TOO_SMALL:
raise exception.ThemisError(res, "Secure Session failed encrypting")
wrapped_message = ctypes.create_string_buffer(
wrapped_message_length.value)
res = themis.secure_session_wrap(
self.session_ctx, ctypes.byref(send_message), len(message),
ctypes.byref(wrapped_message), ctypes.byref(wrapped_message_length))
if res != THEMIS_CODES.SUCCESS:
raise exception.ThemisError(res, "Secure Session failed encrypting")
return ctypes.string_at(wrapped_message, wrapped_message_length)
def unwrap(self, message):
wrapped_message = ctypes.create_string_buffer(message)
unwrapped_message_length = ctypes.c_int(0)
res = themis.secure_session_unwrap(
self.session_ctx, wrapped_message, len(message), 0,
ctypes.byref(unwrapped_message_length))
if res == THEMIS_CODES.SUCCESS:
return SString(b"")
if res != THEMIS_CODES.BUFFER_TOO_SMALL:
raise exception.ThemisError(res, "Secure Session failed decrypting")
unwrapped_message = ctypes.create_string_buffer(
unwrapped_message_length.value)
res = themis.secure_session_unwrap(
self.session_ctx, wrapped_message, len(message),
ctypes.byref(unwrapped_message),
ctypes.byref(unwrapped_message_length))
rez = SString(ctypes.string_at(unwrapped_message,
unwrapped_message_length))
if res == THEMIS_CODES.SEND_AS_IS:
rez.set_control()
elif res != THEMIS_CODES.SUCCESS:
raise exception.ThemisError(res, "Secure Session failed decrypting")
return rez
class MemoryTransport(object):
def __init__(self):
self.message_list = deque()
def send(self, message):
self.message_list.append(message)
def receive(self, buffer_length):
return self.message_list.popleft()
@abstractmethod
def get_pub_key_by_id(self, user_id):
raise NotImplementedError
class SimpleMemoryTransport(MemoryTransport):
def __init__(self, expected_client_id, public_key):
self._expected_client_id = expected_client_id
self._public_key = public_key
super(SimpleMemoryTransport, self).__init__()
def get_pub_key_by_id(self, user_id):
if user_id == self._expected_client_id:
return self._public_key
else:
raise ThemisError(THEMIS_CODES.FAIL, "Incorrect user_id")
class sstring(SString):
def __init__(self, *args, **kwargs):
warnings.warn("sstring is deprecated in favor of SString.")
super(sstring, self).__init__()
class ssession(SSession):
def __init__(self, *args, **kwargs):
warnings.warn("ssession is deprecated in favor of SSession.")
super(ssession, self).__init__(*args, **kwargs)
class mem_transport(MemoryTransport):
def __init__(self):
warnings.warn(
"mem_transport is deprecated in favor of MemoryTransport.")
super(mem_transport, self).__init__()
| |
# -*- coding: utf-8 -*-
"""Database related tools."""
import logging
from datetime import datetime
import dateutil.parser
from sqlalchemy import (
Column,
MetaData,
Table,
create_engine,
inspect,
select,
type_coerce,
)
from sqlalchemy.exc import (
DatabaseError,
NoSuchTableError,
)
from sqlalchemy.types import (
BIGINT,
BLOB,
BOOLEAN,
DATE,
DATETIME,
INTEGER,
NUMERIC,
SMALLINT,
TEXT,
TIMESTAMP,
TypeDecorator,
)
from esis.util import datetime_to_timestamp
logger = logging.getLogger(__name__)
class Database(object):
"""Generic database object.
:param db_filename: Path to the sqlite database file
:type db_filename: str
"""
def __init__(self, db_filename):
"""Connect to database and create session object."""
self.db_filename = db_filename
self.engine = create_engine(
'sqlite:///{}'.format(db_filename),
)
self.connection = None
self.metadata = MetaData(bind=self.engine)
def connect(self):
"""Create connection."""
logger.debug('Connecting to SQLite database: %r', self.db_filename)
self.connection = self.engine.connect()
def disconnect(self):
"""Close connection."""
assert not self.connection.closed
logger.debug(
'Disconnecting from SQLite database: %r', self.db_filename)
self.connection.close()
def __enter__(self):
"""Connect on entering context."""
self.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Disconnect on exiting context."""
self.disconnect()
def __getitem__(self, table_name):
"""Get table object in database.
:param table_name: Name of the table
:type table_name: str
:return: Table object that can be used in queries
:rtype: sqlalchemy.schema.Table
"""
if not isinstance(table_name, basestring):
raise TypeError('Unexpected table name: {}'.format(table_name))
table = self.metadata.tables.get(table_name)
if table is None:
self.reflect([table_name])
table = Table(table_name, self.metadata, autoload=True)
return table
def reflect(self, table_names):
"""Get table metadata through reflection.
sqlalchemy already provides a reflect method, but it will stop at the
first failure, while this method will try to get as much as possible.
:param table_names: Table names to inspect
:type table_names: list(str)
"""
inspector = inspect(self.engine)
for table_name in table_names:
columns = []
for column_data in inspector.get_columns(table_name):
# Rename 'type' to 'type_' to create column object
column_type = column_data.pop('type', None)
column_data['type_'] = column_type
columns.append(Column(**column_data))
if not columns:
raise NoSuchTableError(table_name)
Table(table_name, self.metadata, *columns)
def run_quick_check(self):
"""Check database integrity.
Some files, especially those files created after carving, might not
contain completely valid data.
"""
try:
result = self.connection.execute('PRAGMA quick_check;')
except DatabaseError:
return False
passed = result.fetchone()[0] == 'ok'
if not passed:
logger.warning('Integrity check failure: %r', self.db_filename)
return passed
class DBReader(object):
"""Iterate through all db tables and rows easily.
:param database: Database to traverse
:type database: esis.db.Database
"""
# Name suffixes for the shadow tables that support full text search
FTS_SUFFIXES = (
'content',
'segdir',
'segments',
'stat',
'docsize',
)
def __init__(self, database):
"""Connect to database and get table metadata."""
self.database = database
master_table = database['sqlite_master']
query = (
select([master_table.c.name])
.where(master_table.c.type == 'table')
)
result = database.connection.execute(query)
all_table_names = set(row[0] for row in result.fetchall())
ignored_table_names = ['sqlite_master']
sequence_table_name = 'sqlite_sequence'
if sequence_table_name in all_table_names:
ignored_table_names.append(sequence_table_name)
fts_table_names = self._get_fts_table_names(all_table_names)
ignored_table_names.extend(fts_table_names)
logger.debug(
'%d tables ignored: %r',
len(ignored_table_names),
', '.join(sorted(ignored_table_names)))
ignored_table_names = set(ignored_table_names)
table_names = all_table_names - ignored_table_names
self.db_tables = [
database[table_name]
for table_name in table_names
]
logger.info('%d tables found', len(self.db_tables))
def _get_fts_table_names(self, all_table_names):
"""Get a list of FTS-related table names.
:param all_table_names: The names for all tables in the database
:type all_table_names: set(str)
"""
master_table = self.database['sqlite_master']
query = (
select([master_table.c.name])
.where(master_table.c.sql.like('%USING fts%'))
)
result = self.database.connection.execute(query)
fts_table_names = [row[0] for row in result.fetchall()]
shadow_table_names = []
for fts_table_name in fts_table_names:
for suffix in self.FTS_SUFFIXES:
shadow_table_name = '{}_{}'.format(fts_table_name, suffix)
if shadow_table_name in all_table_names:
shadow_table_names.append(shadow_table_name)
return fts_table_names + shadow_table_names
def tables(self):
"""Generator that traverses all tables in a database.
:return: Table name
:rtype: str
"""
for index, table in enumerate(self.db_tables):
logger.info(
'(%d/%d) Traversing %r...',
index + 1, len(self.db_tables), table.name)
yield table.name
class IntegerDecorator(TypeDecorator):
"""An integer class that translates 'null' values to None.
This is needed because some tables use 'null' instead of NULL and elastic
search fails to index documents with strings where integers should be
found.
"""
impl = INTEGER
def process_result_value(self, value, _dialect):
"""Translate 'null' to None if needed."""
if value == 'null' or value is None:
return None
if isinstance(value, basestring):
# Convert strings with only digits to integers
if value.isdigit():
return int(value)
try:
# Try to parse date as return timestamp
value_dt = dateutil.parser.parse(value)
except (TypeError, ValueError, OverflowError):
# Ignore parsing errors and log warning below
pass
else:
value = int(datetime_to_timestamp(value_dt))
# Return None by default if value cannot be parsed as integer
if not isinstance(value, (int, long)):
logger.warning('Invalid integer value: %s', value)
return None
return value
class DatetimeDecorator(TypeDecorator):
"""A datetime class that translates data to ISO strings.
The reason ISO strings are used instead of datetime objects or integer
timestamps is because is what elasticsearch handles as a datetime value.
Internally it seems to store it as an integer timestamp, but that's
transparent to the user.
"""
impl = TEXT
def process_result_value(self, value, _dialect):
"""Translate datetime/timestamp to ISO string."""
# Keep a copy of the original value just in case it's needed to log a
# warning later
original_value = value
if isinstance(value, datetime):
value = value.isoformat()
elif isinstance(value, (int, long)) and not isinstance(value, bool):
# Try to parse timestamp in seconds, millisecons and microseconds
for timestamp in (value, value / 1000, value / 1000000):
try:
value = datetime.utcfromtimestamp(timestamp).isoformat()
except ValueError:
pass
else:
break
elif isinstance(value, basestring):
# Parse datetime string and re-format it as an ISO string
try:
value = dateutil.parser.parse(value).isoformat()
except (TypeError, ValueError):
# Ignore parsing errors and log warning below
value = None
# Return None by default if no ISO string could be generated
if not isinstance(value, str):
logger.warning('Invalid datetime value: %s', original_value)
return None
return value
class TypeCoercionMixin(object):
"""A mixin to transform database values.
This is useful to get safe values from sqlalchemy when data types are not
very well defined in SQLite.
"""
# Column type coercions to avoid errors when getting values of a different
# type due to sqlite's flexibility in that regard
COERCIONS = {
# This is because NUMERIC type affinity in SQLite can use any
# storage class, so the safer option is to cast it to a string
NUMERIC: TEXT,
# Translate the 'null' string to None to avoid indexing failures
BOOLEAN: IntegerDecorator,
INTEGER: IntegerDecorator,
BIGINT: IntegerDecorator,
SMALLINT: IntegerDecorator,
# Translate integer timestamps to ISO formatted strings
DATE: DatetimeDecorator,
DATETIME: DatetimeDecorator,
TIMESTAMP: DatetimeDecorator,
}
def _coerce_column_type(self, column):
"""Coerce some column type.
:param column: Column to examine
:type column: sqlalchemy.sql.schema.Column
:return: Coerced column (if needed)
:rtype: sqlalchemy.sql.elements.Label | sqlalchemy.sql.schema.Column
"""
for from_type, to_type in self.COERCIONS.iteritems():
if isinstance(column.type, from_type):
# Preserve column name despite of the type coercion
return type_coerce(column, to_type).label(column.name)
# Don't coerce values if not really needed
return column
def _coerce(self, columns):
"""Coerce multiple columns types.
This is useful to use in select queries to make sure all columns will
return safe values.
:param columns: Columns to examine
:type column: list(sqlalchemy.sql.schema.Column)
:return: Coerced columns (if needed)
:rtype: list(
sqlalchemy.sql.elements.Label | sqlalchemy.sql.schema.Column)
"""
return [self._coerce_column_type(column) for column in columns]
class TableReader(TypeCoercionMixin):
"""Iterate over all rows easily.
:param database: Database being explored
:type database: esis.db.Database
:param table: Database table
:type table: sqlalchemy.sql.schema.Table
"""
def __init__(self, database, table_name):
"""Initialize reader object."""
self.database = database
self.table = database[table_name]
# Filter out columns that are not going to be indexed
# - BLOB: more investigation needed on how that works with
# elasticsearch
ignored_column_names = [
column.name
for column in self.table.columns
if isinstance(column.type, BLOB)
]
# Ignore '_id' column unless it has unique values
# This is because '_id' is used by elasticsearch and using the same
# one in multiple documents will result in those being overwritten
if '_id' in (column.name for column in self.table.columns):
query = select([self.table.c['_id']]).count()
row_count = self.database.connection.execute(query).scalar()
distinct_query = (
select([self.table.c['_id']]).distinct().count())
distinct_row_count = (
self.database.connection.execute(distinct_query).scalar())
if row_count != distinct_row_count:
ignored_column_names.append('_id')
if len(ignored_column_names) > 0:
logger.debug(
'%d columns ignored: %s',
len(ignored_column_names),
', '.join(sorted(ignored_column_names)))
ignored_column_names = set(ignored_column_names)
self.columns = [
column
for column in self.table.columns
if column.name not in ignored_column_names
]
logger.debug(
'%d columns found: %s',
len(self.columns),
', '.join(column.name for column in self.columns))
def get_schema(self):
"""Return table schema.
:return: Column names and their type
:rtype: dict(str, sqlalchemy.types.*)
"""
return {column.name: column.type
for column in self.columns}
def rows(self):
"""Generator that traverses all rows in a table.
:return: All rows in the table
:rtype: generator(sqlalchemy.engine.result.RowProxy)
"""
if self.columns:
query = select(self._coerce(self.columns))
result = self.database.connection.execute(query)
rows = result.fetchall()
logger.debug('%d rows found', len(rows))
for row in rows:
yield row
| |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI support for the Tornado web framework.
We export WSGIApplication, which is very similar to web.Application, except
no asynchronous methods are supported (since WSGI does not support
non-blocking requests properly). If you call self.flush() or other
asynchronous methods in your request handlers running in a WSGIApplication,
we throw an exception.
Example usage:
import tornado.web
import tornado.wsgi
import wsgiref.simple_server
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.wsgi.WSGIApplication([
(r"/", MainHandler),
])
server = wsgiref.simple_server.make_server('', 8888, application)
server.serve_forever()
See the 'appengine' demo for an example of using this module to run
a Tornado app on Google AppEngine.
Since no asynchronous methods are available for WSGI applications, the
httpclient and auth modules are both not available for WSGI applications.
We also export WSGIContainer, which lets you run other WSGI-compatible
frameworks on the Tornado HTTP server and I/O loop. See WSGIContainer for
details and documentation.
"""
import cgi
import cStringIO
import escape
import httplib
import httputil
import logging
import sys
import time
import urllib
import web
class WSGIApplication(web.Application):
"""A WSGI-equivalent of web.Application.
We support the same interface, but handlers running in a WSGIApplication
do not support flush() or asynchronous methods.
"""
def __init__(self, handlers=None, default_host="", **settings):
web.Application.__init__(self, handlers, default_host, transforms=[],
wsgi=True, **settings)
def __call__(self, environ, start_response):
handler = web.Application.__call__(self, HTTPRequest(environ))
assert handler._finished
status = str(handler._status_code) + " " + \
httplib.responses[handler._status_code]
headers = handler._headers.items()
for cookie_dict in getattr(handler, "_new_cookies", []):
for cookie in cookie_dict.values():
headers.append(("Set-Cookie", cookie.OutputString(None)))
start_response(status, headers)
return handler._write_buffer
class HTTPRequest(object):
"""Mimics httpserver.HTTPRequest for WSGI applications."""
def __init__(self, environ):
"""Parses the given WSGI environ to construct the request."""
self.method = environ["REQUEST_METHOD"]
self.path = urllib.quote(environ.get("SCRIPT_NAME", ""))
self.path += urllib.quote(environ.get("PATH_INFO", ""))
self.uri = self.path
self.arguments = {}
self.query = environ.get("QUERY_STRING", "")
if self.query:
self.uri += "?" + self.query
arguments = cgi.parse_qs(self.query)
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values: self.arguments[name] = values
self.version = "HTTP/1.1"
self.headers = httputil.HTTPHeaders()
if environ.get("CONTENT_TYPE"):
self.headers["Content-Type"] = environ["CONTENT_TYPE"]
if environ.get("CONTENT_LENGTH"):
self.headers["Content-Length"] = int(environ["CONTENT_LENGTH"])
for key in environ:
if key.startswith("HTTP_"):
self.headers[key[5:].replace("_", "-")] = environ[key]
if self.headers.get("Content-Length"):
self.body = environ["wsgi.input"].read()
else:
self.body = ""
self.protocol = environ["wsgi.url_scheme"]
self.remote_ip = environ.get("REMOTE_ADDR", "")
if environ.get("HTTP_HOST"):
self.host = environ["HTTP_HOST"]
else:
self.host = environ["SERVER_NAME"]
# Parse request body
self.files = {}
content_type = self.headers.get("Content-Type", "")
if content_type.startswith("application/x-www-form-urlencoded"):
for name, values in cgi.parse_qs(self.body).iteritems():
self.arguments.setdefault(name, []).extend(values)
elif content_type.startswith("multipart/form-data"):
if 'boundary=' in content_type:
boundary = content_type.split('boundary=',1)[1]
if boundary: self._parse_mime_body(boundary)
else:
logging.warning("Invalid multipart/form-data")
self._start_time = time.time()
self._finish_time = None
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics"""
return self.version == "HTTP/1.1"
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def _parse_mime_body(self, boundary):
if boundary.startswith('"') and boundary.endswith('"'):
boundary = boundary[1:-1]
if self.body.endswith("\r\n"):
footer_length = len(boundary) + 6
else:
footer_length = len(boundary) + 4
parts = self.body[:-footer_length].split("--" + boundary + "\r\n")
for part in parts:
if not part: continue
eoh = part.find("\r\n\r\n")
if eoh == -1:
logging.warning("multipart/form-data missing headers")
continue
headers = httputil.HTTPHeaders.parse(part[:eoh])
name_header = headers.get("Content-Disposition", "")
if not name_header.startswith("form-data;") or \
not part.endswith("\r\n"):
logging.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
name_values = {}
for name_part in name_header[10:].split(";"):
name, name_value = name_part.strip().split("=", 1)
name_values[name] = name_value.strip('"').decode("utf-8")
if not name_values.get("name"):
logging.warning("multipart/form-data value missing name")
continue
name = name_values["name"]
if name_values.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
self.files.setdefault(name, []).append(dict(
filename=name_values["filename"], body=value,
content_type=ctype))
else:
self.arguments.setdefault(name, []).append(value)
class WSGIContainer(object):
"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
Wrap a WSGI function in a WSGIContainer and pass it to HTTPServer to
run it. For example:
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return ["Hello world!\n"]
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
This class is intended to let other frameworks (Django, web.py, etc)
run on the Tornado HTTP server and I/O loop. It has not yet been
thoroughly tested in production.
"""
def __init__(self, wsgi_application):
self.wsgi_application = wsgi_application
def __call__(self, request):
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_application(
WSGIContainer.environ(request), start_response)
response.extend(app_response)
body = "".join(response)
if hasattr(app_response, "close"):
app_response.close()
if not data: raise Exception("WSGI app did not call start_response")
status_code = int(data["status"].split()[0])
headers = data["headers"]
header_set = set(k.lower() for (k,v) in headers)
body = escape.utf8(body)
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "server" not in header_set:
headers.append(("Server", "TornadoServer/0.1"))
parts = ["HTTP/1.1 " + data["status"] + "\r\n"]
for key, value in headers:
parts.append(escape.utf8(key) + ": " + escape.utf8(value) + "\r\n")
parts.append("\r\n")
parts.append(body)
request.write("".join(parts))
request.finish()
self._log(status_code, request)
@staticmethod
def environ(request):
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": request.path,
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": port,
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": cStringIO.StringIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers["Content-Type"]
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers["Content-Length"]
for key, value in request.headers.iteritems():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code, request):
if status_code < 400:
log_method = logging.info
elif status_code < 500:
log_method = logging.warning
else:
log_method = logging.error
request_time = 1000.0 * request.request_time()
summary = request.method + " " + request.uri + " (" + \
request.remote_ip + ")"
log_method("%d %s %.2fms", status_code, summary, request_time)
| |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Series, Timestamp
import pandas._testing as tm
from pandas.core.internals.blocks import IntBlock
class TestSeriesInternals:
# GH 10265
def test_convert(self):
# Tests: All to nans, coerce, true
# Test coercion returns correct type
s = Series(["a", "b", "c"])
results = s._convert(datetime=True, coerce=True)
expected = Series([NaT] * 3)
tm.assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([np.nan] * 3)
tm.assert_series_equal(results, expected)
expected = Series([NaT] * 3, dtype=np.dtype("m8[ns]"))
results = s._convert(timedelta=True, coerce=True)
tm.assert_series_equal(results, expected)
dt = datetime(2001, 1, 1, 0, 0)
td = dt - datetime(2000, 1, 1, 0, 0)
# Test coercion with mixed types
s = Series(["a", "3.1415", dt, td])
results = s._convert(datetime=True, coerce=True)
expected = Series([NaT, NaT, dt, NaT])
tm.assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([np.nan, 3.1415, np.nan, np.nan])
tm.assert_series_equal(results, expected)
results = s._convert(timedelta=True, coerce=True)
expected = Series([NaT, NaT, NaT, td], dtype=np.dtype("m8[ns]"))
tm.assert_series_equal(results, expected)
# Test standard conversion returns original
results = s._convert(datetime=True)
tm.assert_series_equal(results, s)
results = s._convert(numeric=True)
expected = Series([np.nan, 3.1415, np.nan, np.nan])
tm.assert_series_equal(results, expected)
results = s._convert(timedelta=True)
tm.assert_series_equal(results, s)
# test pass-through and non-conversion when other types selected
s = Series(["1.0", "2.0", "3.0"])
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([1.0, 2.0, 3.0])
tm.assert_series_equal(results, expected)
results = s._convert(True, False, True)
tm.assert_series_equal(results, s)
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0, 0)], dtype="O")
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0, 0)])
tm.assert_series_equal(results, expected)
results = s._convert(datetime=False, numeric=True, timedelta=True)
tm.assert_series_equal(results, s)
td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0)
s = Series([td, td], dtype="O")
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([td, td])
tm.assert_series_equal(results, expected)
results = s._convert(True, True, False)
tm.assert_series_equal(results, s)
s = Series([1.0, 2, 3], index=["a", "b", "c"])
result = s._convert(numeric=True)
tm.assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype("O")
r["a"] = "1"
result = r._convert(numeric=True)
tm.assert_series_equal(result, s)
r = s.copy().astype("O")
r["a"] = "1."
result = r._convert(numeric=True)
tm.assert_series_equal(result, s)
r = s.copy().astype("O")
r["a"] = "garbled"
result = r._convert(numeric=True)
expected = s.copy()
expected["a"] = np.nan
tm.assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, "na", 3, 4])
result = s._convert(datetime=True, numeric=True)
expected = Series([1, np.nan, 3, 4])
tm.assert_series_equal(result, expected)
s = Series([1, "", 3, 4])
result = s._convert(datetime=True, numeric=True)
tm.assert_series_equal(result, expected)
# dates
s = Series(
[
datetime(2001, 1, 1, 0, 0),
datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0),
]
)
s2 = Series(
[
datetime(2001, 1, 1, 0, 0),
datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0),
"foo",
1.0,
1,
Timestamp("20010104"),
"20010105",
],
dtype="O",
)
result = s._convert(datetime=True)
expected = Series(
[Timestamp("20010101"), Timestamp("20010102"), Timestamp("20010103")],
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
result = s._convert(datetime=True, coerce=True)
tm.assert_series_equal(result, expected)
expected = Series(
[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20010103"),
NaT,
NaT,
NaT,
Timestamp("20010104"),
Timestamp("20010105"),
],
dtype="M8[ns]",
)
result = s2._convert(datetime=True, numeric=False, timedelta=False, coerce=True)
tm.assert_series_equal(result, expected)
result = s2._convert(datetime=True, coerce=True)
tm.assert_series_equal(result, expected)
s = Series(["foo", "bar", 1, 1.0], dtype="O")
result = s._convert(datetime=True, coerce=True)
expected = Series([NaT] * 2 + [Timestamp(1)] * 2)
tm.assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype="float32")
result = s._convert(datetime=True, coerce=True)
tm.assert_series_equal(result, s)
# FIXME: dont leave commented-out
# r = s.copy()
# r[0] = np.nan
# result = r._convert(convert_dates=True,convert_numeric=False)
# assert result.dtype == 'M8[ns]'
# dateutil parses some single letters into today's value as a date
expected = Series([NaT])
for x in "abcdefghijklmnopqrstuvwxyz":
s = Series([x])
result = s._convert(datetime=True, coerce=True)
tm.assert_series_equal(result, expected)
s = Series([x.upper()])
result = s._convert(datetime=True, coerce=True)
tm.assert_series_equal(result, expected)
def test_convert_no_arg_error(self):
s = Series(["1.0", "2"])
msg = r"At least one of datetime, numeric or timedelta must be True\."
with pytest.raises(ValueError, match=msg):
s._convert()
def test_convert_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([1, 1, 3, 5], dtype="i8")
tm.assert_series_equal(r, e)
def test_convert_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
def test_constructor_no_pandas_array(self):
ser = pd.Series([1, 2, 3])
result = pd.Series(ser.array)
tm.assert_series_equal(ser, result)
assert isinstance(result._mgr.blocks[0], IntBlock)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
ser = pd.Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
def test_from_array(self):
result = pd.Series(pd.array(["1H", "2H"], dtype="timedelta64[ns]"))
assert result._mgr.blocks[0].is_extension is False
result = pd.Series(pd.array(["2015"], dtype="datetime64[ns]"))
assert result._mgr.blocks[0].is_extension is False
def test_from_list_dtype(self):
result = pd.Series(["1H", "2H"], dtype="timedelta64[ns]")
assert result._mgr.blocks[0].is_extension is False
result = pd.Series(["2015"], dtype="datetime64[ns]")
assert result._mgr.blocks[0].is_extension is False
def test_hasnans_uncached_for_series():
# GH#19700
idx = pd.Index([0, 1])
assert idx.hasnans is False
assert "hasnans" in idx._cache
ser = idx.to_series()
assert ser.hasnans is False
assert not hasattr(ser, "_cache")
ser.iloc[-1] = np.nan
assert ser.hasnans is True
assert Series.hasnans.__doc__ == pd.Index.hasnans.__doc__
| |
import numpy as np
import pyqtgraph as pg
from ..fitting.fitting_functions import basic_fit, advanced_fit
from .get import Get
from . import FittingTabSelected
from src.iBeatles.fitting.display import Display as FittingDisplay
class SelectedBinsHandler(object):
def __init__(self, parent=None, grand_parent=None):
self.parent = parent
self.grand_parent = grand_parent
def clear_all_selected_bins(self):
list_bins = self.parent.list_bins_selected_item
for _bin_ui in list_bins:
self.parent.image_view.removeItem(_bin_ui)
def clear_all_locked_bins(self):
list_bins = self.parent.list_bins_locked_item
for _bin_ui in list_bins:
self.parent.image_view.removeItem(_bin_ui)
def update_bins_selected(self):
self.clear_all_selected_bins()
o_get = Get(parent=self.parent)
fitting_tab_selected = o_get.main_tab_selected()
# March Dollase
if fitting_tab_selected == FittingTabSelected.march_dollase:
table_dictionary = self.grand_parent.march_table_dictionary
list_bins_selected_item = []
for _index in table_dictionary.keys():
box = table_dictionary[_index]['selected_item']
if table_dictionary[_index]['active']:
self.parent.image_view.addItem(box)
list_bins_selected_item.append(box)
self.parent.list_bins_selected_item = list_bins_selected_item
# kropff
else:
# only display the bin of the row selected
o_get = Get(parent=self.parent)
row_selected = o_get.kropff_row_selected()
table_dictionary = self.grand_parent.kropff_table_dictionary
list_bins_selected_item = []
for _index in row_selected:
str_index = str(_index)
box = table_dictionary[str_index]['selected_item']
self.parent.image_view.addItem(box)
list_bins_selected_item.append(box)
self.parent.list_bins_selected_item = list_bins_selected_item
self.parent.update_kropff_fitting_plot()
def update_bins_locked(self, all_flag=True):
self.clear_all_locked_bins()
o_get = Get(parent=self.parent)
fitting_tab_selected = o_get.main_tab_selected()
if fitting_tab_selected == FittingTabSelected.march_dollase:
table_dictionary = self.grand_parent.march_table_dictionary
list_bins_locked_item = []
for _index in table_dictionary.keys():
box = table_dictionary[_index]['locked_item']
if table_dictionary[_index]['lock']:
self.parent.image_view.addItem(box)
list_bins_locked_item.append(box)
self.parent.list_bins_locked_item = list_bins_locked_item
else:
return
def retrieve_list_bin_selected(self, flag_name='active'):
"""this is looking at the table_dictionary and the flag of the 'active' or 'lock' key
item to figure out if the row is checked or not"""
list_bin_selected = []
# if self.parent.bragg_edge_active_button_status:
# flag_name = 'active'
# else:
# flag_name = 'lock'
table_dictionary = self.grand_parent.march_table_dictionary
for _index in table_dictionary:
if table_dictionary[_index][flag_name]:
list_bin_selected.append(_index)
list_bin_selected.sort()
return list_bin_selected
def update_bragg_edge_plot(self):
self.parent.bragg_edge_plot.clear()
o_get = Get(parent=self.parent)
fitting_tab_selected = o_get.main_tab_selected()
if fitting_tab_selected == FittingTabSelected.march_dollase:
self.update_march_dollase_bragg_edge_plot()
else:
self.update_kropff_bragg_edge_plot()
def update_kropff_bragg_edge_plot(self):
table_dictionary = self.grand_parent.kropff_table_dictionary
x_axis = self.grand_parent.normalized_lambda_bragg_edge_x_axis
self.parent.bragg_edge_data['x_axis'] = x_axis
# retrieve image
data_2d = self.grand_parent.data_metadata['normalized']['data']
o_get = Get(parent=self.parent)
list_bin_selected = o_get.kropff_row_selected()
bragg_edge_data = []
# nbr_index_selected = len(list_bin_selected)
for _bin_selected in list_bin_selected:
_entry = table_dictionary[str(_bin_selected)]['bin_coordinates']
x0 = _entry['x0']
x1 = _entry['x1']
y0 = _entry['y0']
y1 = _entry['y1']
_data = data_2d[:, x0:x1, y0:y1]
inter1 = np.nanmean(_data, axis=1)
final = np.nanmean(inter1, axis=1)
bragg_edge_data.append(final)
bragg_edge_data = np.nanmean(bragg_edge_data, axis=0)
x_axis = self.grand_parent.normalized_lambda_bragg_edge_x_axis
try:
self.parent.bragg_edge_plot.plot(x_axis, bragg_edge_data)
except Exception:
return
self.parent.bragg_edge_plot.setLabel("bottom", u'\u03BB (\u212B)')
self.parent.bragg_edge_plot.setLabel("left", "Average Counts")
if self.grand_parent.fitting_bragg_edge_linear_selection == []:
linear_region_left_index = 0
linear_region_right_index = len(x_axis) - 1
self.grand_parent.fitting_bragg_edge_linear_selection = [linear_region_left_index,
linear_region_right_index]
else:
[linear_region_left_index, linear_region_right_index] = \
self.grand_parent.fitting_bragg_edge_linear_selection
lr_left = x_axis[linear_region_left_index]
lr_right = x_axis[linear_region_right_index]
linear_region_range = [lr_left, lr_right]
if self.parent.fitting_lr is None:
lr = pg.LinearRegionItem(values=linear_region_range,
orientation='vertical',
brush=None,
movable=True,
bounds=None)
lr.setZValue(-10)
lr.sigRegionChangeFinished.connect(self.parent.bragg_edge_linear_region_changed)
lr.sigRegionChanged.connect(self.parent.bragg_edge_linear_region_changing)
self.parent.bragg_edge_plot.addItem(lr)
self.parent.fitting_lr = lr
else:
lr = self.parent.fitting_lr
lr.setRegion(linear_region_range)
self.parent.bragg_edge_plot.addItem(lr)
o_display = FittingDisplay(parent=self.parent,
grand_parent=self.grand_parent)
o_display.display_lambda_0()
def update_march_dollase_bragg_edge_plot(self):
if self.grand_parent.display_active_row_flag:
flag_name = 'active'
else:
flag_name = 'lock'
list_bin_selected = self.retrieve_list_bin_selected(flag_name=flag_name)
if list_bin_selected == []:
return
table_dictionary = self.grand_parent.march_table_dictionary
# retrieve image
data_2d = self.grand_parent.data_metadata['normalized']['data']
# isolate data selected data[x0:x1, y0:y1] for each bin selected
bragg_edge_data = []
# nbr_index_selected = len(list_bin_selected)
for _bin_selected in list_bin_selected:
_entry = table_dictionary[str(_bin_selected)]['bin_coordinates']
x0 = _entry['x0']
x1 = _entry['x1']
y0 = _entry['y0']
y1 = _entry['y1']
_data = data_2d[:, x0:x1, y0:y1]
# inter1 = np.sum(_data, axis=1)
# final = np.sum(inter1, axis=1)
inter1 = np.nanmean(_data, axis=1)
final = np.nanmean(inter1, axis=1)
bragg_edge_data.append(final)
# if bragg_edge_data == []:
# bragg_edge_data = final
# else:
# bragg_edge_data += final
bragg_edge_data = np.nanmean(bragg_edge_data, axis=0)
x_axis = self.grand_parent.normalized_lambda_bragg_edge_x_axis
# save x and y-axis of bragg edge plot for initialization of a1, a2, a5 and a6
self.parent.bragg_edge_data['x_axis'] = x_axis
self.parent.bragg_edge_data['y_axis'] = bragg_edge_data
self.parent.bragg_edge_plot.plot(x_axis, bragg_edge_data)
# if self.parent.xaxis_button_ui['normalized']['file_index'].isChecked():
# self.parent.fitting_ui.bragg_edge_plot.setLabel("bottom", "File Index")
# elif self.parent.xaxis_button_ui['normalized']['tof'].isChecked():
# self.parent.fitting_ui.bragg_edge_plot.setLabel("bottom", u"TOF (\u00B5s)")
# else:
self.parent.bragg_edge_plot.setLabel("bottom", u'\u03BB (\u212B)')
self.parent.bragg_edge_plot.setLabel("left", "Average Counts")
o_get = Get(parent=self.parent,
grand_parent=self.grand_parent)
[linear_region_left_index, linear_region_right_index] = o_get.fitting_bragg_edge_linear_selection()
# if self.grand_parent.fitting_bragg_edge_linear_selection == []:
# linear_region_left_index = 0
# linear_region_right_index = len(x_axis) - 1
# self.grand_parent.fitting_bragg_edge_linear_selection = [linear_region_left_index,
# linear_region_right_index]
#
# else:
# [linear_region_left_index, linear_region_right_index] = \
# self.grand_parent.fitting_bragg_edge_linear_selection
lr_left = x_axis[linear_region_left_index]
lr_right = x_axis[linear_region_right_index]
linear_region_range = [lr_left, lr_right]
if self.parent.fitting_lr is None:
lr = pg.LinearRegionItem(values=linear_region_range,
orientation='vertical',
brush=None,
movable=True,
bounds=None)
lr.setZValue(-10)
lr.sigRegionChangeFinished.connect(self.parent.bragg_edge_linear_region_changed)
lr.sigRegionChanged.connect(self.parent.bragg_edge_linear_region_changing)
self.parent.bragg_edge_plot.addItem(lr)
self.parent.fitting_lr = lr
else:
lr = self.parent.fitting_lr
lr.setRegion(linear_region_range)
self.parent.bragg_edge_plot.addItem(lr)
display_fitting = True
if display_fitting:
parameters = self.get_average_parameters_activated()
_advanced_fitting_mode = self.parent.ui.advanced_table_checkBox.isChecked()
d_spacing = parameters['d_spacing']
alpha = parameters['alpha']
sigma = parameters['sigma']
a1 = parameters['a1']
a2 = parameters['a2']
if _advanced_fitting_mode:
a5 = parameters['a5']
a6 = parameters['a6']
if np.isnan(d_spacing) or np.isnan(alpha) or np.isnan(sigma) or np.isnan(a1) or np.isnan(a2):
return
fit_x_axis = np.linspace(lr_left, lr_right, num=100)
if _advanced_fitting_mode:
fit_y_axis = [advanced_fit(x, d_spacing, alpha, sigma, a1, a2,
a5,
a6) for x in fit_x_axis]
else:
fit_y_axis = [basic_fit(x, d_spacing, alpha, sigma, a1, a2) for x in fit_x_axis]
# fit_y_axis *= nbr_index_selected #FIXME
self.parent.bragg_edge_plot.plot(fit_x_axis, fit_y_axis, pen='r')
def get_average_parameters_activated(self):
table_dictionary = self.grand_parent.march_table_dictionary
d_spacing = []
alpha = []
sigma = []
a1 = []
a2 = []
a5 = []
a6 = []
for _index in table_dictionary.keys():
_entry = table_dictionary[_index]
if _entry['active']:
_d_spacing = _entry['d_spacing']['val']
_alpha = _entry['alpha']['val']
_sigma = _entry['sigma']['val']
_a1 = _entry['a1']['val']
_a2 = _entry['a2']['val']
_a5 = _entry['a5']['val']
_a6 = _entry['a6']['val']
d_spacing.append(_d_spacing)
alpha.append(_alpha)
sigma.append(_sigma)
a1.append(_a1)
a2.append(_a2)
a5.append(_a5)
a6.append(_a6)
mean_d_spacing = self.get_mean_value(d_spacing)
mean_alpha = self.get_mean_value(alpha)
mean_sigma = self.get_mean_value(sigma)
mean_a1 = self.get_mean_value(a1)
mean_a2 = self.get_mean_value(a2)
mean_a5 = self.get_mean_value(a5)
mean_a6 = self.get_mean_value(a6)
return {'d_spacing': mean_d_spacing,
'alpha' : mean_alpha,
'sigma' : mean_sigma,
'a1' : mean_a1,
'a2' : mean_a2,
'a5' : mean_a5,
'a6' : mean_a6}
def get_mean_value(self, array=[]):
if array == []:
return np.NaN
else:
return np.mean(array)
| |
import os
import urllib.parse
import datetime
import threading
import http.client
import socket
import errno
import sys
import select
import time
import json
import xlog
logging = xlog.getLogger("simple_http_server")
class HttpServerHandler():
default_request_version = "HTTP/1.1"
rbufsize = -1
wbufsize = 0
command = ""
path = ""
def __init__(self, sock, client, args, https=False):
self.connection = sock
self.rfile = sock.makefile("rb", self.rbufsize)
self.wfile = sock.makefile("wb", self.wbufsize)
self.client_address = client
self.args = args
self.https = https
self.setup()
def setup(self):
pass
def handle(self):
#logging.info('Connected from %r', self.client_address)
while True:
try:
self.close_connection = 1
self.handle_one_request()
except Exception as e:
#logging.warn("handle err:%r close", e)
self.close_connection = 1
if self.close_connection:
break
self.connection.close()
#logging.debug("closed from %s:%d", self.client_address[0], self.client_address[1])
def address_string(self):
return '%s:%s' % self.client_address[:2]
def parse_request(self):
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
requestline = self.raw_requestline
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1):
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = http.client.parse_headers(self.rfile)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif conntype.lower() == 'keep-alive':
self.close_connection = 0
return True
def handle_one_request(self):
try:
try:
line = self.rfile.readline(65535)
self.raw_requestline = line.decode('iso-8859-1')
except Exception as e:
#logging.exception("simple server handle except %r", e)
return
if len(self.raw_requestline) > 65536:
#logging.warn("recv command line too large")
return
if not self.raw_requestline:
#logging.warn("closed")
return
self.parse_request()
if self.command == "GET":
self.do_GET()
elif self.command == "POST":
self.do_POST()
elif self.command == "CONNECT":
self.do_CONNECT()
elif self.command == "HEAD":
self.do_HEAD()
elif self.command == "DELETE":
self.do_DELETE()
elif self.command == "OPTIONS":
self.do_OPTIONS()
elif self.command == "PUT":
self.do_PUT()
else:
logging.warn("unhandler cmd:%s path:%s from:%s", self.command, self.path, self.address_string())
return
self.wfile.flush() #actually send the response if not already done.
self.close_connection = 0
except socket.error as e:
#logging.warn("socket error:%r", e)
pass
except IOError as e:
if e.errno == errno.EPIPE:
logging.warn("PIPE error:%r", e)
pass
else:
logging.warn("IOError:%r", e)
pass
#except OpenSSL.SSL.SysCallError as e:
# logging.warn("socket error:%r", e)
except Exception as e:
logging.exception("handler:%r from:%s", e, self.address_string())
pass
def do_GET(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_POST(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_PUT(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_DELETE(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_OPTIONS(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_HEAD(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_CONNECT(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def send_not_found(self):
self.wfile.write(b'HTTP/1.1 404\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n404 Not Found')
def send_error(self, code, message=None):
self.wfile.write(('HTTP/1.1 %d\r\n' % code).encode())
self.wfile.write(b'Connection: close\r\n\r\n')
if message:
self.wfile.write(message.encode())
def send_response(self, mimetype="", content="", headers="", status=200):
data = []
data.append('HTTP/1.1 %d\r\n' % status)
if len(mimetype):
data.append('Content-Type: %s\r\n' % mimetype)
data.append('Content-Length: %s\r\n' % len(content.encode('utf-8')))
if len(headers):
if isinstance(headers, dict):
for key in headers:
data.append("%s: %s\r\n" % (key, headers[key]))
elif isinstance(headers, str):
data.append(headers)
data.append("\r\n")
if len(content) < 1024:
data.append(content)
data_str = "".join(data)
self.wfile.write(data_str.encode())
else:
data_str = "".join(data)
self.wfile.write(data_str.encode())
if len(content):
self.wfile.write(content.encode())
def send_file(self, filename, mimetype):
try:
if not os.path.isfile(filename):
self.send_not_found()
return
file_size = os.path.getsize(filename)
tme = (datetime.datetime.today()+datetime.timedelta(minutes=330)).strftime('%a, %d %b %Y %H:%M:%S GMT')
head = 'HTTP/1.1 200\r\nAccess-Control-Allow-Origin: *\r\nCache-Control:public, max-age=31536000\r\n'
head += 'Expires: %s\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n' % (tme, mimetype, file_size)
self.wfile.write(head.encode())
with open(filename, 'rb') as fp:
while True:
data = fp.read(65535)
if not data:
break
self.wfile.write(data)
except:
pass
#logging.warn("download broken")
def response_json(self, res_arr):
data = json.dumps(res_arr, indent=0, sort_keys=True)
self.send_response('application/json', data)
class HTTPServer():
def __init__(self, address, handler, args=(), use_https=False, cert=""):
self.sockets = []
self.running = True
if isinstance(address, tuple):
self.server_address = [address]
else:
#server can listen multi-port
self.server_address = address
self.handler = handler
self.args = args
self.use_https = use_https
self.cert = cert
self.init_socket()
#logging.info("server %s:%d started.", address[0], address[1])
def init_socket(self):
for addr in self.server_address:
self.add_listen(addr)
def add_listen(self, addr):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind(addr)
except Exception as e:
logging.error("bind to %s:%d fail", addr[0], addr[1])
raise e
if self.use_https:
import OpenSSL
if hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"):
ssl_version = OpenSSL.SSL.TLSv1_2_METHOD
elif hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"):
ssl_version = OpenSSL.SSL.TLSv1_1_METHOD
elif hasattr(OpenSSL.SSL, "TLSv1_METHOD"):
ssl_version = OpenSSL.SSL.TLSv1_METHOD
ctx = OpenSSL.SSL.Context(ssl_version)
#server.pem's location (containing the server private key and the server certificate).
fpem = self.cert
ctx.use_privatekey_file(fpem)
ctx.use_certificate_file(fpem)
sock = OpenSSL.SSL.Connection(ctx, sock)
sock.listen(200)
self.sockets.append(sock)
logging.info("server %s:%d started.", addr[0], addr[1])
def serve_forever(self):
while self.running:
r, w, e = select.select(self.sockets, [], [], 3)
for rsock in r:
try:
(sock, address) = rsock.accept()
except IOError as e:
logging.warn("socket accept fail(errno: %s).", e.args[0])
if e.args[0] == 10022:
logging.info("restart socket server.")
self.server_close()
self.init_socket()
break
self.process_connect(sock, address)
def process_connect(self, sock, address):
#logging.debug("connect from %s:%d", address[0], address[1])
client_obj = self.handler(sock, address, self.args, self.use_https)
client_thread = threading.Thread(target=client_obj.handle)
client_thread.start()
def shutdown(self):
self.running = False
def server_close(self):
for sock in self.sockets:
sock.close()
self.sockets = []
class TestHttpServer(HttpServerHandler):
def __init__(self, sock, client, args):
self.data_path = args
HttpServerHandler.__init__(self, sock, client, args)
def generate_random_lowercase(self, n):
min_lc = ord(b'a')
len_lc = 26
ba = bytearray(os.urandom(n))
for i, b in enumerate(ba):
ba[i] = min_lc + b % len_lc # convert 0..255 to 97..122
#sys.stdout.buffer.write(ba)
return ba
def do_GET(self):
url_path = urllib.parse.urlparse(self.path).path
req = urllib.parse.urlparse(self.path).query
reqs = urllib.parse.parse_qs(req, keep_blank_values=True)
logging.debug("GET %s from %s:%d", self.path, self.client_address[0], self.client_address[1])
if url_path == '/':
data = "OK\r\n"
self.wfile.write('HTTP/1.1 200\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %d\r\n\r\n%s' %(len(data.encode('utf-8')), data) )
elif url_path == '/null':
mimetype = "application/x-binary"
if "size" in reqs:
file_size = int(reqs['size'][0])
else:
file_size = 1024 * 1024 * 1024
self.wfile.write('HTTP/1.1 200\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n' % (mimetype, file_size))
start = 0
data = self.generate_random_lowercase(65535)
while start < file_size:
left = file_size - start
send_batch = min(left, 65535)
self.wfile.write(data[:send_batch])
start += send_batch
else:
target = os.path.abspath(os.path.join(self.data_path, url_path[1:]))
if os.path.isfile(target):
self.send_file(target, "application/x-binary")
else:
self.wfile.write('HTTP/1.1 404\r\nContent-Length: 0\r\n\r\n' )
def main(data_path="."):
logging.info("listen http on 8880")
httpd = HTTPServer(('', 8880), TestHttpServer, data_path)
http_thread = threading.Thread(target=httpd.serve_forever)
http_thread.setDaemon(True)
http_thread.start()
while True:
time.sleep(10)
if __name__ == "__main__":
if len(sys.argv) > 2:
data_path = sys.argv[1]
else:
data_path = "."
try:
main(data_path=data_path)
except Exception:
import traceback
traceback.print_exc(file=sys.stdout)
except KeyboardInterrupt:
sys.exit()
| |
import base64
import hashlib
import hmac
import re
import time
import warnings
from typing import Optional, Union, List, Iterator
from algoliasearch.exceptions import ValidUntilNotFoundException
from algoliasearch.helpers import endpoint, is_async_available, build_raw_response_batch
from algoliasearch.http.request_options import RequestOptions
from algoliasearch.http.serializer import QueryParametersSerializer
from algoliasearch.http.verb import Verb
from algoliasearch.responses import (
IndexingResponse,
AddApiKeyResponse,
UpdateApiKeyResponse,
DeleteApiKeyResponse,
RestoreApiKeyResponse,
MultipleIndexBatchIndexingResponse,
DictionaryResponse,
)
from algoliasearch.search_index import SearchIndex
from algoliasearch.configs import SearchConfig
from algoliasearch.http.transporter import Transporter
from algoliasearch.http.requester import Requester
from algoliasearch.http.hosts import CallType
class SearchClient(object):
@property
def app_id(self):
# type: () -> str
return self._config.app_id
def __init__(self, transporter, search_config):
# type: (Transporter, SearchConfig) -> None
self._transporter = transporter
self._config = search_config
def init_index(self, name):
# type: (str) -> SearchIndex
return SearchIndex(self._transporter, self._config, name)
@staticmethod
def create(app_id=None, api_key=None):
# type: (Optional[str], Optional[str]) -> SearchClient
config = SearchConfig(app_id, api_key)
return SearchClient.create_with_config(config)
@staticmethod
def create_with_config(config):
# type: (SearchConfig) -> SearchClient
requester = Requester()
transporter = Transporter(requester, config)
client = SearchClient(transporter, config)
if is_async_available():
from algoliasearch.search_client_async import SearchClientAsync
from algoliasearch.http.transporter_async import TransporterAsync
from algoliasearch.http.requester_async import RequesterAsync
return SearchClientAsync(
client, TransporterAsync(RequesterAsync(), config), config
)
return client
def move_index(self, src_index_name, dst_index_name, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/operation", src_index_name),
{"operation": "move", "destination": dst_index_name},
request_options,
)
return IndexingResponse(self.init_index(src_index_name), [raw_response])
def copy_index(self, src_index_name, dst_index_name, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/indexes/{}/operation", src_index_name),
{"operation": "copy", "destination": dst_index_name},
request_options,
)
return IndexingResponse(self.init_index(src_index_name), [raw_response])
def copy_settings(self, src_index_name, dst_index_name, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
if request_options is None:
request_options = {}
request_options["scope"] = ["settings"]
return self.copy_index(src_index_name, dst_index_name, request_options)
def copy_synonyms(self, src_index_name, dst_index_name, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
if request_options is None:
request_options = {}
request_options["scope"] = ["synonyms"]
return self.copy_index(src_index_name, dst_index_name, request_options)
def copy_rules(self, src_index_name, dst_index_name, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> IndexingResponse # noqa: E501
if request_options is None:
request_options = {}
request_options["scope"] = ["rules"]
return self.copy_index(src_index_name, dst_index_name, request_options)
def assign_user_id(self, user_id, cluster, request_options=None):
# type: (str, str,Optional[Union[dict, RequestOptions]]) -> dict
if request_options is None:
request_options = RequestOptions.create(self._config)
request_options["X-Algolia-User-ID"] = user_id
return self._transporter.write(
Verb.POST, "1/clusters/mapping", {"cluster": cluster}, request_options
)
def assign_user_ids(self, user_ids, cluster, request_options=None):
# type: (Union[List[dict], Iterator[dict]], str, Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
return self._transporter.write(
Verb.POST,
"1/clusters/mapping/batch",
{"cluster": cluster, "users": user_ids},
request_options,
)
def remove_user_id(self, user_id, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
if request_options is None:
request_options = RequestOptions.create(self._config)
request_options["X-Algolia-User-ID"] = user_id
return self._transporter.write(
Verb.DELETE, "1/clusters/mapping", None, request_options
)
def list_clusters(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(Verb.GET, "1/clusters", {}, request_options)
def get_user_id(self, user_id, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET, endpoint("1/clusters/mapping/{}", user_id), None, request_options
)
def list_user_ids(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET, "1/clusters/mapping", None, request_options
)
def get_top_user_ids(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET, "1/clusters/mapping/top", None, request_options
)
def search_user_ids(self, query, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.POST, "1/clusters/mapping/search", {"query": query}, request_options
)
def has_pending_mappings(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
retrieve_mappings = None
if isinstance(request_options, dict):
retrieve_mappings = request_options.pop(
"retrieveMappings", retrieve_mappings
)
if retrieve_mappings:
if request_options is None or isinstance(request_options, dict):
request_options = RequestOptions.create(self._config, request_options)
request_options.query_parameters["getClusters"] = retrieve_mappings
return self._transporter.read(
Verb.GET, "1/clusters/mapping/pending", None, request_options
)
def list_api_keys(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(Verb.GET, "1/keys", None, request_options)
def get_api_key(self, key, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET, endpoint("1/keys/{}", key), None, request_options
)
def delete_api_key(self, key, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> DeleteApiKeyResponse # noqa: E501
raw_response = self._transporter.write(
Verb.DELETE, endpoint("1/keys/{}", key), None, request_options
)
return DeleteApiKeyResponse(self, raw_response, key)
def add_api_key(self, acl, request_options=None):
# type: (list, Optional[Union[dict, RequestOptions]]) -> AddApiKeyResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST, "1/keys", {"acl": acl}, request_options
)
return AddApiKeyResponse(self, raw_response)
def update_api_key(self, key, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> UpdateApiKeyResponse # noqa: E501
if not isinstance(request_options, RequestOptions):
request_options = RequestOptions.create(self._config, request_options)
raw_response = self._transporter.write(
Verb.PUT, endpoint("1/keys/{}", key), {}, request_options
)
return UpdateApiKeyResponse(self, raw_response, request_options)
def restore_api_key(self, key, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> RestoreApiKeyResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST, endpoint("1/keys/{}/restore", key), None, request_options
)
return RestoreApiKeyResponse(self, raw_response, key)
@staticmethod
def generate_secured_api_key(parent_api_key, restrictions):
# type: (str, dict) -> str
query_parameters = QueryParametersSerializer.serialize(restrictions)
secured_key = hmac.new(
parent_api_key.encode("utf-8"),
query_parameters.encode("utf-8"),
hashlib.sha256,
).hexdigest()
base64encoded = base64.b64encode(
("{}{}".format(secured_key, query_parameters)).encode("utf-8")
)
return str(base64encoded.decode("utf-8"))
@staticmethod
def get_secured_api_key_remaining_validity(api_key):
# type: (str) -> int
decoded_string = base64.b64decode(api_key)
match = re.search(r"validUntil=(\d+)", str(decoded_string))
if match is None:
raise ValidUntilNotFoundException("ValidUntil not found in api key.")
return int(match.group(1)) - int(round(time.time()))
def list_indices(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(Verb.GET, "1/indexes", None, request_options)
def get_logs(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(Verb.GET, "1/logs", None, request_options)
def multiple_queries(self, queries, request_options=None):
# type: (List[dict], Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.POST, "1/indexes/*/queries", {"requests": queries}, request_options
)
def multiple_get_objects(self, requests, request_options=None):
# type: (List[dict], Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.POST, "1/indexes/*/objects", {"requests": requests}, request_options
)
def multiple_batch(self, operations, request_options=None):
# type: (List[dict], Optional[Union[dict, RequestOptions]]) -> MultipleIndexBatchIndexingResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST, "1/indexes/*/batch", {"requests": operations}, request_options
)
return MultipleIndexBatchIndexingResponse(self, raw_response)
def wait_task(self, index_name, task_id, request_options=None):
# type: (str, int, Optional[Union[dict, RequestOptions]]) -> None
self.init_index(index_name).wait_task(task_id, request_options)
def set_personalization_strategy(self, strategy, request_options=None):
# type: (dict, Optional[Union[dict, RequestOptions]]) -> dict
warnings.warn(
"`%s.%s` is deprecated, use `%s.%s` instead."
% (
"SearchClient",
"set_personalization_strategy",
"PersonalizationClient",
"set_personalization_strategy",
),
DeprecationWarning,
)
return self._transporter.write(
Verb.POST,
"1/recommendation/personalization/strategy",
strategy,
request_options,
)
def get_personalization_strategy(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
warnings.warn(
"`%s.%s` is deprecated, use `%s.%s` instead."
% (
"SearchClient",
"get_personalization_strategy",
"PersonalizationClient",
"get_personalization_strategy",
),
DeprecationWarning,
)
return self._transporter.read(
Verb.GET, "1/recommendation/personalization/strategy", None, request_options
)
def save_dictionary_entries(
self, dictionary, dictionary_entries, request_options=None
):
# type: (str, List[dict], Optional[Union[dict, RequestOptions]]) -> DictionaryResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/dictionaries/{}/batch", dictionary),
{
"clearExistingDictionaryEntries": False,
"requests": build_raw_response_batch("addEntry", dictionary_entries),
},
request_options,
)
return DictionaryResponse(self, raw_response)
def replace_dictionary_entries(
self, dictionary, dictionary_entries, request_options=None
):
# type: (str, List[dict], Optional[Union[dict, RequestOptions]]) -> DictionaryResponse # noqa: E501
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/dictionaries/{}/batch", dictionary),
{
"clearExistingDictionaryEntries": True,
"requests": build_raw_response_batch("addEntry", dictionary_entries),
},
request_options,
)
return DictionaryResponse(self, raw_response)
def delete_dictionary_entries(self, dictionary, object_ids, request_options=None):
# type: (str, Iterator[str], Optional[Union[dict, RequestOptions]])-> DictionaryResponse # noqa: E501
request = [{"objectID": object_id} for object_id in object_ids]
raw_response = self._transporter.write(
Verb.POST,
endpoint("1/dictionaries/{}/batch", dictionary),
{
"clearExistingDictionaryEntries": False,
"requests": build_raw_response_batch("deleteEntry", request),
},
request_options,
)
return DictionaryResponse(self, raw_response)
def clear_dictionary_entries(self, dictionary, request_options=None):
# type: (str, Optional[Union[dict, RequestOptions]]) -> DictionaryResponse # noqa: E501
return self.replace_dictionary_entries(dictionary, [], request_options)
def search_dictionary_entries(self, dictionary, query, request_options=None):
# type: (str, str, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.POST,
endpoint("1/dictionaries/{}/search", dictionary),
{"query": query},
request_options,
)
def set_dictionary_settings(self, dictionary_settings, request_options=None):
# type: (dict, Optional[Union[dict, RequestOptions]])-> DictionaryResponse # noqa: E501
raw_response = self._transporter.write(
Verb.PUT, "1/dictionaries/*/settings", dictionary_settings, request_options
)
return DictionaryResponse(self, raw_response)
def get_dictionary_settings(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET, "1/dictionaries/*/settings", {}, request_options
)
def close(self):
# type: () -> None
return self._transporter.close() # type: ignore
def _sync(self):
# type: () -> SearchClient
return self
def custom_request(self, data, uri, method, call_type, request_options=None):
# type: (dict, str, str, int, Optional[Union[dict, RequestOptions]]) -> dict
if call_type == CallType.WRITE:
return self._transporter.write(method, uri, data, request_options)
else:
return self._transporter.read(method, uri, data, request_options)
| |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from py.combinator import Combinator, RuleType
import py.combinator
from xml.etree.ElementTree import Element, SubElement, ElementTree, tostring
def is_normal_form(rule_type, left, right):
if (left.rule_type == FC or \
left.rule_type == GFC) and \
(rule_type == FA or \
rule_type == FC or \
rule_type == GFC):
return False
if (right.rule_type == BC or \
right.rule_type == GBC) and \
(rule_type == BA or \
rule_type == BC or \
left.rule_type == GBC):
return False
if left.rule_type == UNARY and \
rule_type == FA and \
left.cat.is_forward_type_raised:
return False
if right.rule_type == UNARY and \
rule_type == BA and \
right.cat.is_backward_type_raised:
return False
if (left.rule_type == FC or left.rule_type == GFC) \
and (rule_type == FA or rule_type == FC):
return False
if (right.rule_type == BC or right.rule_type == GBC) \
and (rule_type == BA or rule_type == BC):
return False
return True
def count_normal_form(trees):
res = 0
total = 0
def rec(tree):
if isinstance(tree, Tree):
total += 1
res += int(tree.is_normal)
for child in tree.children:
res(child)
for tree in trees:
rec(tree)
print("normal form:", res, "total:", total)
class Node(object):
def __init__(self, cat, rule_type):
self.cat = cat
self.rule_type = rule_type
class Leaf(Node):
# (<L N/N NNP NNP Pierre N_73/N_73>)
def __init__(self, word, cat, position, tag=None):
super(Leaf, self).__init__(cat, RuleType.LEXICON)
self.word = word
self.pos = position
self.tag = tag
def __str__(self):
pos = "POS" # dummy
if self.word in ["{", "("]:
word = "-LRB-"
elif self.word in ["}", ")"]:
word = "-RRB-"
else:
word = self.word.encode("utf-8")
return "(<L {0} {1} {1} {2} {0}>)".format(
self.cat, pos, word)
def _to_xml(self, parent):
word = self.word #.encode("utf-8")
SubElement(parent, "lf",
{"word": word
,"cat": str(self.cat.without_feat)
,"start": str(self.pos)
,"span": "1"
,"lemma": word
,"pos": "POS"
,"chunk": "CHUNK"
,"entity": "O"})
@property
def is_normal(self):
return False
@property
def headid(self):
return self.pos
@property
def deplen(self):
return 0
class Tree(Node):
# (<T N 1 2> (<L N/N JJ JJ nonexecutive N_43/N_43>) (<L N NN NN director N>) )
def __init__(self, cat, left_is_head, children, rule=None):
rule_type = RuleType.NONE if not isinstance(rule, Combinator) \
else rule.rule_type
super(Tree, self).__init__(cat, rule_type)
self.children = children
self.left_is_head = left_is_head
self.op = rule
def __str__(self):
left_is_head = 0 if self.left_is_head else 1
children = [str(c) for c in self.children]
return "(<T {0} {1} {2}> {3} )".format(
self.cat, left_is_head, len(children), " ".join(children))
def _to_xml(self, parent=None):
if parent is None:
parent = Element("ccg")
rule = SubElement(parent, "rule",
{"type": str(self.op)
,"cat": str(self.cat.without_feat)})
for child in self.children:
child._to_xml(rule)
return parent
@property
def is_normal(self):
children = self.children
if len(children) == 1:
return True
return is_normal_form(
self.rule_type, children[0].cat, children[1].cat)
def resolve_op(self, ops):
if len(self.children) == 1:
self.rule_type = RuleType.UNARY
self.op = py.combinator.UnaryRule()
else:
left, right = self.children
for op in ops:
if op.can_apply(left.cat, right.cat) and \
op.apply(left.cat, right.cat).strip_feat() == self.cat.strip_feat():
self.rule_type = op.rule_type
new_head = op.head_is_left(left.cat, right.cat)
if self.left_is_head != new_head:
print("head error!!!: old: {}, new: {}" .format(self.head_is_left, new_head))
print(self.show_derivation())
raise RuntimeError()
self.left_is_head = new_head
self.op = op
return
print(left.cat, right.cat, "-->", self.cat, "\n")
print(self.show_derivation())
raise RuntimeError()
# self.rule_type = NONE
# self.op = Combinator()
@property
def headid(self):
children = self.children
if len(children) == 1:
return children[0].headid
elif len(children) == 2:
return children[0].headid if self.left_is_head else children[1].headid
else:
raise RuntimeError("Number of children of Tree must be 1 or 2.")
@property
def deplen(self):
children = self.children
if len(children) == 1:
return children[0].deplen
elif len(children) == 2:
return (children[1].headid - children[0].headid) + \
children[0].deplen + children[1].deplen
else:
raise RuntimeError("Number of children of Tree must be 1 or 2.")
def show_derivation(self):
catstr = ""
wordstr = ""
for leaf in get_leaves(self):
str_cat = str(leaf.cat)
str_word = leaf.word
nextlen = 2 + max(len(str_word), len(str_cat))
lcatlen = (nextlen - len(str_cat)) // 2
rcatlen = lcatlen + (nextlen - len(str_cat)) % 2
catstr += " " * lcatlen + str_cat + " " * rcatlen
lwordlen = (nextlen - len(str_word)) // 2
rwordlen = lwordlen + (nextlen - len(str_word)) % 2
wordstr += " " * lwordlen + str_word + " " * rwordlen
def rec(lwidth, node):
rwidth = lwidth
if isinstance(node, Leaf):
return max(rwidth, 2 + lwidth + len(str(node.cat)),
2 + lwidth + len(node.word))
if isinstance(node, Tree):
for child in node.children:
rwidth = max(rwidth, rec(rwidth, child))
op = "" if node.op is None else str(node.op)
print(lwidth * " " + (rwidth - lwidth) * "-" + str(op))
str_res = str(node.cat)
if (len(node.children) > 1):
str_res += " ->" if node.left_is_head else " <-"
respadlen = (rwidth - lwidth - len(str_res)) // 2 + lwidth
print(respadlen * " " + str_res)
return rwidth
print(catstr.rstrip())
print(wordstr.rstrip())
rec(0, self)
def to_xml(trees, out):
candc = Element("candc")
for tree in trees:
if isinstance(tree, Tree):
candc.append(tree._to_xml())
with open(out, "w") as f:
ElementTree(candc).write(f)
def resolve_op(tree, ops):
try:
tree.resolve_op(ops)
for child in tree.children:
if isinstance(child, Tree):
resolve_op(child, ops)
return True
except:
return False
def get_leaves(tree):
res = []
def rec(tree):
for child in tree.children:
if isinstance(child, Tree):
rec(child)
elif isinstance(child, Leaf):
res.append(child)
else:
raise RuntimeError()
if isinstance(tree, Tree):
rec(tree)
else:
res.append(tree)
return res
| |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from collections import deque
from contextlib import contextmanager
import logging
import os
import re
import sys
from scss import config
from scss.calculator import Calculator
from scss.compiler import _prop_split_re
from scss.compiler import Compiler
from scss.errors import SassEvaluationError
from scss.legacy import Scss
from scss.legacy import _default_scss_vars
from scss.namespace import Namespace
from scss.rule import SassRule
from scss.rule import UnparsedBlock
from scss.scss_meta import BUILD_INFO
from scss.source import SourceFile
from scss.util import profiling
try:
raw_input
except NameError:
raw_input = input
log = logging.getLogger(__name__)
logging.getLogger('scss').setLevel(logging.INFO)
def main():
logging.basicConfig(format="%(levelname)s: %(message)s")
from optparse import OptionGroup, OptionParser, SUPPRESS_HELP
if hasattr(config.LOAD_PATHS, 'split'):
initial_load_paths = [p.strip() for p in config.LOAD_PATHS.split(',')]
else:
initial_load_paths = list(config.LOAD_PATHS)
def append_load_path(option, opt_str, value, parser):
dest = getattr(parser.values, option.dest)
paths = value.replace(os.pathsep, ',').replace(';', ',').split(',')
for path in paths:
path = path.strip()
if path and path not in dest:
dest.append(path)
parser = OptionParser(usage="Usage: %prog [options] [file]",
description="Converts Scss files to CSS.",
add_help_option=False)
parser.add_option("-i", "--interactive", action="store_true",
help="Run an interactive Scss shell")
parser.add_option("-w", "--watch", metavar="DIR",
help="Watch the files in DIR, and recompile when they change")
parser.add_option("-r", "--recursive", action="store_true", default=False,
help="Also watch directories inside of the watch directory")
parser.add_option("-o", "--output", metavar="PATH",
help="Write output to PATH (a directory if using watch, a file otherwise)")
parser.add_option("-s", "--suffix", metavar="STRING",
help="If using watch, a suffix added to the output filename (i.e. filename.STRING.css)")
parser.add_option("--time", action="store_true",
help="Ignored, will be removed in 2.0")
parser.add_option("--debug-info", action="store_true",
help="Turns on scss's debugging information")
parser.add_option("--no-debug-info", action="store_false",
dest="debug_info", default=False,
help="Turns off scss's debugging information")
parser.add_option("-T", "--test", action="store_true", help=SUPPRESS_HELP)
parser.add_option("-t", "--style", metavar="NAME",
dest="style", default='nested',
help="Output style. Can be nested (default), compact, compressed, or expanded.")
parser.add_option("-C", "--no-compress", action="store_false", dest="style", default=True,
help="Don't minify outputted CSS")
parser.add_option("-?", action="help", help=SUPPRESS_HELP)
parser.add_option("-h", "--help", action="help",
help="Show this message and exit")
parser.add_option("-v", "--version", action="store_true",
help="Print version and exit")
paths_group = OptionGroup(parser, "Resource Paths")
paths_group.add_option("-I", "--load-path", metavar="PATH", type="string",
action="callback", callback=append_load_path, dest="load_paths",
default=initial_load_paths,
help="Add a scss import path, may be given multiple times")
paths_group.add_option("-S", "--static-root", metavar="PATH", dest="static_root",
help="Static root path (Where images and static resources are located)")
paths_group.add_option("-A", "--assets-root", metavar="PATH", dest="assets_root",
help="Assets root path (Sprite images will be created here)")
paths_group.add_option("-a", "--assets-url", metavar="URL", dest="assets_url",
help="URL to reach the files in your assets_root")
paths_group.add_option("-F", "--fonts-root", metavar="PATH", dest="fonts_root",
help="Fonts root path (Where fonts are located)")
paths_group.add_option("-f", "--fonts-url", metavar="PATH", dest="fonts_url",
help="URL to reach the fonts in your fonts_root")
paths_group.add_option("--images-root", metavar="PATH", dest="images_root",
help="Images root path (Where images are located)")
paths_group.add_option("--images-url", metavar="PATH", dest="images_url",
help="URL to reach the images in your images_root")
paths_group.add_option("--cache-root", metavar="PATH", dest="cache_root",
help="Cache root path (Cache files will be created here)")
parser.add_option_group(paths_group)
parser.add_option("--sass", action="store_true",
dest="is_sass", default=None,
help="Sass mode")
options, args = parser.parse_args()
# General runtime configuration
if options.static_root is not None:
config.STATIC_ROOT = options.static_root
if options.assets_root is not None:
config.ASSETS_ROOT = options.assets_root
if options.fonts_root is not None:
config.FONTS_ROOT = options.fonts_root
if options.fonts_url is not None:
config.FONTS_URL = options.fonts_url
if options.images_root is not None:
config.IMAGES_ROOT = options.images_root
if options.images_url is not None:
config.IMAGES_URL = options.images_url
if options.cache_root is not None:
config.CACHE_ROOT = options.cache_root
if options.assets_url is not None:
config.ASSETS_URL = options.assets_url
# Execution modes
if options.test:
run_tests()
elif options.version:
print_version()
elif options.interactive:
run_repl(options)
elif options.watch:
watch_sources(options)
else:
do_build(options, args)
def print_version():
print(BUILD_INFO)
def run_tests():
try:
import pytest
except ImportError:
raise ImportError("You need py.test installed to run the test suite.")
pytest.main("") # don't let py.test re-consume our arguments
def do_build(options, args):
if options.output is not None:
out = open(options.output, 'wb')
else:
out = sys.stdout
# Get the unencoded stream on Python 3
out = getattr(out, 'buffer', out)
css = Scss(scss_opts={
'style': options.style,
'debug_info': options.debug_info,
},
search_paths=options.load_paths,
)
if not args:
args = ['-']
source_files = []
for path in args:
if path == '-':
source = SourceFile.from_file(sys.stdin, relpath="<stdin>", is_sass=options.is_sass)
else:
source = SourceFile.from_filename(path, is_sass=options.is_sass)
source_files.append(source)
encodings = set(source.encoding for source in source_files)
if len(encodings) > 1:
sys.stderr.write(
"Can't combine these files! "
"They have different encodings: {0}\n"
.format(', '.join(encodings))
)
sys.exit(3)
output = css.compile(source_files=source_files)
out.write(output.encode(source_files[0].encoding))
for f, t in profiling.items():
sys.stderr.write("%s took %03fs" % (f, t))
def watch_sources(options):
import time
try:
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
except ImportError:
sys.stderr.write("Using watch functionality requires the `watchdog` library: http://pypi.python.org/pypi/watchdog/")
sys.exit(1)
if options.output and not os.path.isdir(options.output):
sys.stderr.write("watch file output directory is invalid: '%s'" % (options.output))
sys.exit(2)
class ScssEventHandler(PatternMatchingEventHandler):
def __init__(self, *args, **kwargs):
super(ScssEventHandler, self).__init__(*args, **kwargs)
self.css = Scss(scss_opts={
'style': options.style,
'debug_info': options.debug_info,
},
search_paths=options.load_paths,
)
self.output = options.output
self.suffix = options.suffix
def is_valid(self, path):
return os.path.isfile(path) and (path.endswith('.scss') or path.endswith('.sass')) and not os.path.basename(path).startswith('_')
def process(self, path):
if os.path.isdir(path):
for f in os.listdir(path):
full = os.path.join(path, f)
if self.is_valid(full):
self.compile(full)
elif self.is_valid(path):
self.compile(path)
def compile(self, src_path):
fname = os.path.basename(src_path)
if fname.endswith('.scss') or fname.endswith('.sass'):
fname = fname[:-5]
if self.suffix:
fname += '.' + self.suffix
fname += '.css'
else:
# you didn't give me a file of the correct type!
return False
if self.output:
dest_path = os.path.join(self.output, fname)
else:
dest_path = os.path.join(os.path.dirname(src_path), fname)
print("Compiling %s => %s" % (src_path, dest_path))
dest_file = open(dest_path, 'wb')
dest_file.write(self.css.compile(scss_file=src_path).encode('utf-8'))
def on_moved(self, event):
super(ScssEventHandler, self).on_moved(event)
self.process(event.dest_path)
def on_created(self, event):
super(ScssEventHandler, self).on_created(event)
self.process(event.src_path)
def on_modified(self, event):
super(ScssEventHandler, self).on_modified(event)
self.process(event.src_path)
event_handler = ScssEventHandler(patterns=['*.scss', '*.sass'])
observer = Observer()
observer.schedule(event_handler, path=options.watch, recursive=options.recursive)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
@contextmanager
def readline_history(fn):
try:
import readline
except ImportError:
yield
return
try:
readline.read_history_file(fn)
except IOError:
pass
try:
yield
finally:
try:
readline.write_history_file(fn)
except IOError:
pass
def run_repl(is_sass=False):
repl = SassRepl()
with readline_history(os.path.expanduser('~/.scss-history')):
print("Welcome to %s interactive shell" % (BUILD_INFO,))
while True:
try:
in_ = raw_input('>>> ').strip()
for output in repl(in_):
print(output)
except (EOFError, KeyboardInterrupt):
print("Bye!")
return
class SassRepl(object):
def __init__(self, is_sass=False):
# TODO it would be lovely to get these out of here, somehow
self.namespace = Namespace(variables=_default_scss_vars)
self.compiler = Compiler(namespace=self.namespace)
self.compilation = self.compiler.make_compilation()
self.legacy_compiler_options = {}
self.source_file = SourceFile.from_string('', '<shell>', is_sass=is_sass)
self.calculator = Calculator(self.namespace)
def __call__(self, s):
# TODO this is kind of invasive; surely it's possible to do this
# without calling only private methods
from pprint import pformat
if s in ('exit', 'quit'):
raise KeyboardInterrupt
for s in s.split(';'):
s = self.source_file.prepare_source(s.strip())
if not s:
continue
elif s.startswith('@'):
scope = None
properties = []
children = deque()
rule = SassRule(self.source_file, namespace=self.namespace, legacy_compiler_options=self.legacy_compiler_options, properties=properties)
block = UnparsedBlock(rule, 1, s, None)
code, name = (s.split(None, 1) + [''])[:2]
if code == '@option':
self.compilation._at_options(self.calculator, rule, scope, block)
continue
elif code == '@import':
# TODO this doesn't really work either since there's no path
self.compilation._at_import(self.calculator, rule, scope, block)
continue
elif code == '@include':
final_cont = ''
self.compilation._at_include(self.calculator, rule, scope, block)
code = self.compilation._print_properties(properties).rstrip('\n')
if code:
final_cont += code
if children:
# TODO this almost certainly doesn't work, and is kind of goofy anyway since @mixin isn't supported
self.compilation.children.extendleft(children)
self.compilation.parse_children()
code = self.compilation._create_css(self.compilation.rules).rstrip('\n')
if code:
final_cont += code
yield final_cont
continue
elif s == 'ls' or s.startswith('show(') or s.startswith('show ') or s.startswith('ls(') or s.startswith('ls '):
m = re.match(r'(?:show|ls)(\()?\s*([^,/\\) ]*)(?:[,/\\ ]([^,/\\ )]+))*(?(1)\))', s, re.IGNORECASE)
if m:
name = m.group(2)
code = m.group(3)
name = name and name.strip().rstrip('s') # remove last 's' as in functions
code = code and code.strip()
ns = self.namespace
if not name:
yield pformat(list(sorted(['vars', 'options', 'mixins', 'functions'])))
elif name in ('v', 'var', 'variable'):
variables = dict(ns._variables)
if code == '*':
pass
elif code:
variables = dict((k, v) for k, v in variables.items() if code in k)
else:
variables = dict((k, v) for k, v in variables.items() if not k.startswith('$--'))
yield pformat(variables)
elif name in ('o', 'opt', 'option'):
opts = self.legacy_compiler_options
if code == '*':
pass
elif code:
opts = dict((k, v) for k, v in opts.items() if code in k)
else:
opts = dict((k, v) for k, v in opts.items())
yield pformat(opts)
elif name in ('m', 'mix', 'mixin', 'f', 'func', 'funct', 'function'):
if name.startswith('m'):
funcs = dict(ns._mixins)
elif name.startswith('f'):
funcs = dict(ns._functions)
if code == '*':
pass
elif code:
funcs = dict((k, v) for k, v in funcs.items() if code in k[0])
else:
pass
# TODO print source when possible
yield pformat(funcs)
continue
elif s.startswith('$') and (':' in s or '=' in s):
prop, value = [a.strip() for a in _prop_split_re.split(s, 1)]
prop = self.calculator.do_glob_math(prop)
value = self.calculator.calculate(value)
self.namespace.set_variable(prop, value)
continue
# TODO respect compress?
try:
yield(self.calculator.calculate(s).render())
except (SyntaxError, SassEvaluationError) as e:
print("%s" % e, file=sys.stderr)
if __name__ == "__main__":
main()
| |
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import threading
import itertools
import numpy as np
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from pytest import raises as assert_raises
import pytest
from numpy import dot, conj, random
from scipy.linalg import eig, eigh
from scipy.sparse import csc_matrix, csr_matrix, diags, rand
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import (eigs, eigsh, arpack,
ArpackNoConvergence)
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex_=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N, N))
if complex_:
M = M + 1j * np.random.random((N, N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i, j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.nonzero(i == j)
j[ind] = (j[ind] + 1) % N
M[i, j] = 0
M[j, i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i, j] = 0
return M
def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
M = np.random.random((N, N))
M = 0.5 * (M + M.T) # Make M symmetric
if pos_definite:
Id = N * np.eye(N)
if sparse:
M = csr_matrix(M)
M += Id
else:
if sparse:
M = csr_matrix(M)
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except AssertionError:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eigenvalues, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eigenvalues, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eigenvalues - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eigenvalues - sigma)
+ 1. / (eigenvalues - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eigenvalues - sigma)
- 1. / (eigenvalues - np.conj(sigma)))
elif mode == 'cayley':
reval = (eigenvalues + sigma) / (eigenvalues - sigma)
elif mode == 'buckling':
reval = eigenvalues / (eigenvalues - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ)
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eigenvalues, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eigenvalues, typ, k, which,
sigma, OPpart, mode)
eigenvalues = eigenvalues[ind]
evec = evec[:, ind]
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eigenvalues * np.dot(b, evec)
else:
RHS = eigenvalues * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
try:
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
break
except AssertionError:
ntries += 1
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
Mc = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
# general hermitian problem with hermitian M
GHc = DictWithRepr("gen-hermitian-Mc")
GHc['mat'] = Ac
GHc['bmat'] = Mc
GHc['v0'] = v0
GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH, GHc]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
eval_evec(symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex_=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((8, 8)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# This test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_linearoperator_deallocation():
# Check that the linear operators used by the Arpack wrappers are
# deallocatable by reference counting -- they are big objects, so
# Python's cyclic GC may not collect them fast enough before
# running out of memory if eigs/eigsh are called in a tight loop.
M_d = np.eye(10)
M_s = csc_matrix(M_d)
M_o = aslinearoperator(M_d)
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
pass
with assert_deallocated(lambda: arpack.LuInv(M_d)):
pass
with assert_deallocated(lambda: arpack.IterInv(M_s)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
pass
def test_parallel_threads():
results = []
v0 = np.random.rand(50)
def worker():
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=3, v0=v0)
results.append(w)
w, v = eigsh(x, k=3, v0=v0)
results.append(w)
threads = [threading.Thread(target=worker) for k in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
worker()
for r in results:
assert_allclose(r, results[-1])
def test_reentering():
# Just some linear operator that calls eigs recursively
def A_matvec(x):
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=1)
return v / w[0]
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
assert_raises(RuntimeError, eigs, A, k=1)
assert_raises(RuntimeError, eigsh, A, k=1)
def test_regression_arpackng_1315():
# Check that issue arpack-ng/#1315 is not present.
# Adapted from arpack-ng/TESTS/bug_1315_single.c
# If this fails, then the installed ARPACK library is faulty.
for dtype in [np.float32, np.float64]:
np.random.seed(1234)
w0 = np.arange(1, 1000+1).astype(dtype)
A = diags([w0], [0], shape=(1000, 1000))
v0 = np.random.rand(1000).astype(dtype)
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
assert_allclose(np.sort(w), np.sort(w0[-9:]),
rtol=1e-4)
def test_eigs_for_k_greater():
# Test eigs() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = np.random.random((4, 4))
M_sparse = generate_matrix(4, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eig(A, b=M_dense)
eig_tuple2 = eig(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
# Test 'A' for different types
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
assert_raises(TypeError, eigs, A_sparse, k=3)
def test_eigsh_for_k_greater():
# Test eigsh() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = generate_matrix_symmetric(4, pos_definite=True)
M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eigh(A, b=M_dense)
eig_tuple2 = eigh(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
# Test 'A' for different types
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
def test_real_eigs_real_k_subset():
np.random.seed(1)
n = 10
A = rand(n, n, density=0.5)
A.data *= 2
A.data -= 1
v0 = np.ones(n)
whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
dtypes = [np.float32, np.float64]
for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
prev_w = np.array([], dtype=dtype)
eps = np.finfo(dtype).eps
for k in range(1, 9):
w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
v0=v0.astype(dtype), tol=0)
assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
# Check that the set of eigenvalues for `k` is a subset of that for `k+1`
dist = abs(prev_w[:,None] - w).min(axis=1)
assert_allclose(dist, 0, atol=np.sqrt(eps))
prev_w = w
# Check sort order
if sigma is None:
d = w
else:
d = 1 / (w - sigma)
if which == 'LM':
# ARPACK is systematic for 'LM', but sort order
# appears not well defined for other modes
assert np.all(np.diff(abs(d)) <= 1e-6)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Bare-Metal Deployment Service."""
import os
import sys
import threading
import time
import cgi
import Queue
import re
import socket
import stat
from wsgiref import simple_server
from nova import config
from nova import context as nova_context
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import unit
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
QUEUE = Queue.Queue()
LOG = logging.getLogger(__name__)
# All functions are called from deploy() directly or indirectly.
# They are split for stub-out.
def discovery(portal_address, portal_port):
"""Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
'-p', '%s:%s' % (portal_address, portal_port),
run_as_root=True,
check_exit_code=[0])
def login_iscsi(portal_address, portal_port, target_iqn):
"""Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--login',
run_as_root=True,
check_exit_code=[0])
# Ensure the login complete
time.sleep(3)
def logout_iscsi(portal_address, portal_port, target_iqn):
"""Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--logout',
run_as_root=True,
check_exit_code=[0])
def make_partitions(dev, root_mb, swap_mb):
"""Create partitions for root and swap on a disk device."""
# Lead in with 1MB to allow room for the partition table itself, otherwise
# the way sfdisk adjusts doesn't shift the partition up to compensate, and
# we lose the space.
# http://bazaar.launchpad.net/~ubuntu-branches/ubuntu/raring/util-linux/
# raring/view/head:/fdisk/sfdisk.c#L1940
stdin_command = ('1,%d,83;\n,%d,82;\n0,0;\n0,0;\n' % (root_mb, swap_mb))
utils.execute('sfdisk', '-uM', dev, process_input=stdin_command,
run_as_root=True,
attempts=3,
check_exit_code=[0])
# avoid "device is busy"
time.sleep(3)
def is_block_device(dev):
"""Check whether a device is block or not."""
s = os.stat(dev)
return stat.S_ISBLK(s.st_mode)
def dd(src, dst):
"""Execute dd from src to dst."""
utils.execute('dd',
'if=%s' % src,
'of=%s' % dst,
'bs=1M',
'oflag=direct',
run_as_root=True,
check_exit_code=[0])
def mkswap(dev, label='swap1'):
"""Execute mkswap on a device."""
utils.execute('mkswap',
'-L', label,
dev,
run_as_root=True,
check_exit_code=[0])
def block_uuid(dev):
"""Get UUID of a block device."""
out, _ = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev,
run_as_root=True,
check_exit_code=[0])
return out.strip()
def switch_pxe_config(path, root_uuid):
"""Switch a pxe config from deployment mode to service mode."""
with open(path) as f:
lines = f.readlines()
root = 'UUID=%s' % root_uuid
rre = re.compile(r'\$\{ROOT\}')
dre = re.compile('^default .*$')
with open(path, 'w') as f:
for line in lines:
line = rre.sub(root, line)
line = dre.sub('default boot', line)
f.write(line)
def notify(address, port):
"""Notify a node that it becomes ready to reboot."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((address, port))
s.send('done')
finally:
s.close()
def get_dev(address, port, iqn, lun):
"""Returns a device path for given parameters."""
dev = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s" \
% (address, port, iqn, lun)
return dev
def get_image_mb(image_path):
"""Get size of an image in Megabyte."""
mb = unit.Mi
image_byte = os.path.getsize(image_path)
# round up size to MB
image_mb = int((image_byte + mb - 1) / mb)
return image_mb
def work_on_disk(dev, root_mb, swap_mb, image_path):
"""Creates partitions and write an image to the root partition."""
root_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
if not is_block_device(dev):
LOG.warn(_("parent device '%s' not found"), dev)
return
make_partitions(dev, root_mb, swap_mb)
if not is_block_device(root_part):
LOG.warn(_("root device '%s' not found"), root_part)
return
if not is_block_device(swap_part):
LOG.warn(_("swap device '%s' not found"), swap_part)
return
dd(image_path, root_part)
mkswap(swap_part)
try:
root_uuid = block_uuid(root_part)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to detect root device UUID."))
return root_uuid
def deploy(address, port, iqn, lun, image_path, pxe_config_path,
root_mb, swap_mb):
"""All-in-one function to deploy a node."""
dev = get_dev(address, port, iqn, lun)
image_mb = get_image_mb(image_path)
if image_mb > root_mb:
root_mb = image_mb
discovery(address, port)
login_iscsi(address, port, iqn)
try:
root_uuid = work_on_disk(dev, root_mb, swap_mb, image_path)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
# Log output if there was a error
LOG.error(_("Cmd : %s"), err.cmd)
LOG.error(_("StdOut : %r"), err.stdout)
LOG.error(_("StdErr : %r"), err.stderr)
finally:
logout_iscsi(address, port, iqn)
switch_pxe_config(pxe_config_path, root_uuid)
# Ensure the node started netcat on the port after POST the request.
time.sleep(3)
notify(address, 10000)
class Worker(threading.Thread):
"""Thread that handles requests in queue."""
def __init__(self):
super(Worker, self).__init__()
self.setDaemon(True)
self.stop = False
self.queue_timeout = 1
def run(self):
while not self.stop:
try:
# Set timeout to check self.stop periodically
(node_id, params) = QUEUE.get(block=True,
timeout=self.queue_timeout)
except Queue.Empty:
pass
else:
# Requests comes here from BareMetalDeploy.post()
LOG.info(_('start deployment for node %(node_id)s, '
'params %(params)s'),
{'node_id': node_id, 'params': params})
context = nova_context.get_admin_context()
try:
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYING})
deploy(**params)
except Exception:
LOG.exception(_('deployment to node %s failed'), node_id)
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYFAIL})
else:
LOG.info(_('deployment to node %s done'), node_id)
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYDONE})
class BareMetalDeploy(object):
"""WSGI server for bare-metal deployment."""
def __init__(self):
self.worker = Worker()
self.worker.start()
def __call__(self, environ, start_response):
method = environ['REQUEST_METHOD']
if method == 'POST':
return self.post(environ, start_response)
else:
start_response('501 Not Implemented',
[('Content-type', 'text/plain')])
return 'Not Implemented'
def post(self, environ, start_response):
LOG.info(_("post: environ=%s"), environ)
inpt = environ['wsgi.input']
length = int(environ.get('CONTENT_LENGTH', 0))
x = inpt.read(length)
q = dict(cgi.parse_qsl(x))
try:
node_id = q['i']
deploy_key = q['k']
address = q['a']
port = q.get('p', '3260')
iqn = q['n']
lun = q.get('l', '1')
err_msg = q.get('e')
except KeyError as e:
start_response('400 Bad Request', [('Content-type', 'text/plain')])
return "parameter '%s' is not defined" % e
if err_msg:
LOG.error(_('Deploy agent error message: %s'), err_msg)
context = nova_context.get_admin_context()
d = db.bm_node_get(context, node_id)
if d['deploy_key'] != deploy_key:
start_response('400 Bad Request', [('Content-type', 'text/plain')])
return 'key is not match'
params = {'address': address,
'port': port,
'iqn': iqn,
'lun': lun,
'image_path': d['image_path'],
'pxe_config_path': d['pxe_config_path'],
'root_mb': int(d['root_mb']),
'swap_mb': int(d['swap_mb']),
}
# Restart worker, if needed
if not self.worker.isAlive():
self.worker = Worker()
self.worker.start()
LOG.info(_("request is queued: node %(node_id)s, params %(params)s"),
{'node_id': node_id, 'params': params})
QUEUE.put((node_id, params))
# Requests go to Worker.run()
start_response('200 OK', [('Content-type', 'text/plain')])
return ''
def main():
config.parse_args(sys.argv)
logging.setup("nova")
global LOG
LOG = logging.getLogger('nova.virt.baremetal.deploy_helper')
app = BareMetalDeploy()
srv = simple_server.make_server('', 10000, app)
srv.serve_forever()
| |
from multiprocessing import set_start_method, cpu_count
set_start_method('forkserver')
import os
os.environ["OMP_NUM_THREADS"] = str(cpu_count()) # or to whatever you want
from argparse import ArgumentParser
from datetime import datetime
time_now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
ap = ArgumentParser()
ap.add_argument('-d', '--directory', type=str, required=False, default='nalu_tf_save_dir/saves_{}'.format(time_now), help='The tensorflow ckpt save file')
ap.add_argument('-nnl', '--n_nalu_layers', type=int, required=False, default=1, help='Whether to use 1 (default), 2, or ... N NALU layers.')
ap.add_argument('-nnn', '--n_nalu_neurons', type=int, required=False, default=0, help='How many features on the second NALU layer')
ap.add_argument('-ne', '--n_epochs', type=int, required=False, default=200, help='Number of N_EPOCHS to train the network with.')
ap.add_argument('-nc', '--n_classes', type=int, required=False, default=1, help='n_classes == 1 for Regression (default); > 1 for Classification.')
ap.add_argument('-bs', '--batch_size', type=int, required=False, default=32, help='Batch size: number of samples per batch.')
ap.add_argument('-lr', '--learning_rate', type=float, required=False, default=1e-3, help='Learning rate: how fast the optimizer moves up/down the gradient.')
ap.add_argument('-ts', '--test_size', type=float, required=False, default=0.75, help='How much to split the train / test ratio')
ap.add_argument('-rs', '--random_state', type=int, required=False, default=42, help='Integer value to initialize train/test splitting randomization')
ap.add_argument('-v', '--verbose', type=str2bool, nargs='?', required=False, default=False, help='Whether to set verbosity = True or False (default)')
ap.add_argument('-ds', '--data_set', type=str, required=False, default='', help='The csv file containing the data to predict with')
try:
args = vars(ap.parse_args())
except:
args = {}
args['directory'] = ap.get_default('directory')
args['n_nalu_layers'] = ap.get_default('n_nalu_layers')
args['n_nalu_neurons'] = ap.get_default('n_nalu_neurons')
args['n_epochs'] = ap.get_default('n_epochs')
args['n_classes'] = ap.get_default('n_classes')
args['batch_size'] = ap.get_default('batch_size')
args['learning_rate'] = ap.get_default('learning_rate')
args['test_size'] = ap.get_default('test_size')
arts['random_state'] = ap.get_default('random_state')
args['verbose'] = ap.get_default('verbose')
args['data_set'] = ap.get_default('data_set')
verbose = args['verbose']
data_set_fname = args['data_set']
print('BEGIN BIG COPY PASTE ')
import pandas as pd
import numpy as np
import pdb
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler, minmax_scale
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor#, AdaBoostRegressor, GradientBoostingRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.externals import joblib
from sklearn.metrics import r2_score
import xgboost as xgb
from tqdm import tqdm
from glob import glob
from time import time
start0 = time()
def setup_features(dataRaw, label='flux', notFeatures=[], pipeline=None, verbose=False, resample=False, returnAll=None):
"""Example function with types documented in the docstring.
For production level usage: All scaling and transformations must be done
with respect to the calibration data distributions
Args:
features (nD-array): Array of input raw features.
labels (1D-array): The second parameter.
pipeline (int): The first parameter.
label_scaler (str): The second parameter.
feature_scaler (str): The second parameter.
Returns:
features_transformed, labels_scaled
.. _PEP 484:
https://github.com/ExoWanderer/
"""
# if label in notFeatures: notFeatures.remove(label)
if isinstance(dataRaw,str):
dataRaw = pd.read_csv(filename)
elif isinstance(dataRaw, dict):
dataRaw = pd.DataFrame(dataRaw)
elif not isinstance(dataRaw, pd.DataFrame):
raise TypeError('The input must be a `pandas.DataFrame` or a `dict` with Equal Size Entries (to convert to df here)')
# WHY IS THIS ALLOWED TO NOT HAVE PARENTHESES?
# assert isinstance(dataRaw, pd.DataFrame), 'The input must be a Pandas DataFrame or Dictionary with Equal Size Entries'
inputData = dataRaw.copy()
# PLDpixels = pd.DataFrame({key:dataRaw[key] for key in dataRaw.columns if 'pix' in key})
pixCols = [colname for colname in inputData.columns if 'pix' in colname.lower() or 'pld' in colname.lower()]
PLDnorm = np.sum(np.array(inputData[pixCols]),axis=1)
inputData[pixCols] = (np.array(inputData[pixCols]).T / PLDnorm).T
# # Overwrite the PLDpixels entries with the normalized version
# for key in dataRaw.columns:
# if key in PLDpixels.columns:
# inputData[key] = PLDpixels[key]
#
# Assign the labels
n_PLD = len([key for key in dataRaw.keys() if 'err' not in colname.lower() and ('pix' in key.lower() or 'pld' in key.lower())])
input_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' not in colname.lower()]
errors_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' in colname.lower()]
# resampling_inputs = ['flux', 'xpos', 'ypos', 'xfwhm', 'yfwhm', 'bg_flux', 'bmjd', 'np'] + ['pix{}'.format(k) for k in range(1,10)]
# resampling_errors = ['fluxerr', 'xerr', 'yerr', 'xerr', 'yerr', 'sigma_bg_flux', 'bmjd_err', 'np_err'] + ['fluxerr']*n_PLD
start = time()
if resample:
print("Resampling ", end=" ")
inputData = pd.DataFrame({colname:np.random.normal(dataRaw[colname], dataRaw[colerr]) \
for colname, colerr in tqdm(zip(input_labels, errors_labels), total=len(input_labels))
})
print("took {} seconds".format(time() - start))
else:
inputData = pd.DataFrame({colname:dataRaw[colname] for colname in input_labels})
labels = dataRaw[label].values
# explicitly remove the label
if label in inputData.columns: inputData.drop(label, axis=1, inplace=True)
feature_columns = [colname for colname in inputData.columns if colname not in notFeatures]
print('\n\n','flux' in notFeatures, 'flux' in feature_columns, '\n\n')
features = inputData[feature_columns].values
if verbose: print('Shape of Features Array is', features.shape)
if verbose: start = time()
# labels_scaled = labels# label_scaler.fit_transform(labels[:,None]).ravel() if label_scaler is not None else labels
features_trnsfrmd = pipeline.fit_transform(features) if pipeline is not None else features
if verbose: print('took {} seconds'.format(time() - start))
collection = features_trnsfrmd, labels
if returnAll == True:
collection = features_trnsfrmd, labels, pipeline
if returnAll == 'features':
collection = features_trnsfrmd
if returnAll == 'with raw data':
collection.append(dataRaw)
return collection
# ## Load CSVs data
tobe_flux_normalized = ['fluxerr', 'bg_flux', 'sigma_bg_flux', 'flux']
spitzerCalNotFeatures = ['flux', 'fluxerr', 'dn_peak', 'xycov', 't_cernox', 'xerr', 'yerr', 'sigma_bg_flux']
spitzerCalFilename = 'pmap_ch2_0p1s_x4_rmulti_s3_7.csv' if data_set_fname == '' else data_set_fname
spitzerCalRawData = pd.read_csv(spitzerCalFilename)
for key in tobe_flux_normalized:
spitzerCalRawData[key] = spitzerCalRawData[key] / np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['bmjd_err'] = np.median(0.5*np.diff(spitzerCalRawData['bmjd']))
spitzerCalRawData['np_err'] = np.sqrt(spitzerCalRawData['yerr'])
for colname in spitzerCalRawData.columns:
if 'err' not in colname.lower() and ('pix' in colname.lower() or 'pld' in colname.lower()):
spitzerCalRawData[colname+'_err'] = spitzerCalRawData[colname] * spitzerCalRawData['fluxerr']
start = time()
print("Transforming Data ", end=" ")
operations = []
# header = 'GBR' if do_gbr else 'RFI' if do_rfi else 'STD'
pipe = Pipeline(operations) if len(operations) else None
not_features_now = []
for feat_name in spitzerCalNotFeatures:
if feat_name in spitzerCalRawData.columns:
not_features_now.append(feat_name)
features, labels, pipe_fitted = setup_features( dataRaw = spitzerCalRawData,
pipeline = pipe,
verbose = verbose,
notFeatures = not_features_now,
resample = False,
returnAll = True)
print('END OF BIG COPY PASTE')
print('BEGIN NEW HyperParameter Optimization.')
from sklearn.metrics import r2_score
''' NALU: Nearual Arithmentic Logical Unit
NALU uses memory and logic gates to train a unique TF layer to modify the gradients of the weights.
This seems to be very smilar to a LSTM layer, but for a non-RNN.
This code has been specifically implemented with tensorflow.
Code source: https://github.com/grananqvist/NALU-tf
Original paper: https://arxiv.org/abs/1808.00508 (Trask et al.)
'''
import numpy as np
import tensorflow as tf
def nalu(input_layer, num_outputs):
""" Neural Arithmetic Logic Unit tesnorflow layer
Arguments:
input_layer - A Tensor representing previous layer
num_outputs - number of ouput units
Returns:
A tensor representing the output of NALU
"""
shape = (int(input_layer.shape[-1]), num_outputs)
# define variables
W_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
M_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
G = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
# operations according to paper
W = tf.tanh(W_hat) * tf.sigmoid(M_hat)
m = tf.exp(tf.matmul(tf.log(tf.abs(input_layer) + 1e-7), W))
g = tf.sigmoid(tf.matmul(input_layer, G))
a = tf.matmul(input_layer, W)
out = g * a + (1 - g) * m
return out
def generate_dataset(size=10000, op='sum', n_features=2):
""" Generate dataset for NALU toy problem
Arguments:
size - number of samples to generate
op - the operation that the generated data should represent. sum | prod
Returns:
X - the dataset
Y - the dataset labels
"""
X = np.random.randint(9, size=(size, n_features))
if op == 'prod':
Y = np.prod(X, axis=1, keepdims=True)
else:
Y = np.sum(X, axis=1, keepdims=True)
return X, Y
def chisq(y_true, y_pred, y_error): return np.sum(((y_true-y_pred)/y_error)**2.)
if __name__ == "__main__":
N_FEATURES = features.shape[-1]
EXPORT_DIR = args['directory']
N_NALU_LAYERS = args['n_nalu_layers']
N_NALU_NEURONS = args['n_nalu_neurons'] if args['n_nalu_neurons'] > 0 else N_FEATURES
N_CLASSES = args['n_classes'] # = 1 for regression
TEST_SIZE = args['test_size']
RANDOM_STATE = args['random_state']
N_EPOCHS = args['n_epochs']
LEARNING_RATE = args['learning_rate']
BATCH_SIZE = args['batch_size']
EXPORT_DIR = EXPORT_DIR + '_nnl{}_nnn{}_nc{}_bs{}_lr{}_ne{}_ts{}_rs{}/'.format(N_NALU_LAYERS, N_NALU_NEURONS, N_CLASSES, BATCH_SIZE, LEARNING_RATE, N_EPOCHS, TEST_SIZE, RANDOM_STATE)
print("Saving models to path: {}".format(EXPORT_DIR))
idx_train, idx_test = train_test_split(np.arange(labels.size), test_size=TEST_SIZE, random_state=RANDOM_STATE)
X_data, Y_data = features[idx_train], labels[idx_train][:,None]
LAST_BIT = X_data.shape[0]-BATCH_SIZE*(X_data.shape[0]//BATCH_SIZE)
# Force integer number of batches total by dropping last "<BATCH_SIEZ" number of samples
X_data_use = X_data[:-LAST_BIT].copy()
Y_data_use = Y_data[:-LAST_BIT].copy()
N_FEATURES = X_data.shape[-1]
output_dict = {}
output_dict['loss'] = np.zeros(N_EPOCHS)
output_dict['accuracy'] = np.zeros(N_EPOCHS)
output_dict['R2_train'] = np.zeros(N_EPOCHS)
output_dict['R2_test'] = np.zeros(N_EPOCHS)
output_dict['chisq_train'] = np.zeros(N_EPOCHS)
output_dict['chisq_test'] = np.zeros(N_EPOCHS)
with tf.device("/cpu:0"):
# tf.reset_default_graph()
# define placeholders and network
X = tf.placeholder(tf.float32, shape=[None, N_FEATURES])
Y_true = tf.placeholder(tf.float32, shape=[None, 1])
# Setup NALU Layers
nalu_layers = {'nalu0':nalu(X,N_NALU_NEURONS)}
for kn in range(1, N_NALU_LAYERS):
nalu_layers['nalu{}'.format(kn)] = nalu(nalu_layers['nalu{}'.format(kn-1)], N_NALU_NEURONS)
Y_pred = nalu(nalu_layers['nalu{}'.format(N_NALU_LAYERS-1)], N_CLASSES) # N_CLASSES = 1 for regression
# loss and train operations
loss = tf.nn.l2_loss(Y_pred - Y_true) # NALU uses mse
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
train_op = optimizer.minimize(loss)
# Add an op to initialize the variables.
init_op = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()#max_to_keep=N_EPOCHS)
sess_config = tf.ConfigProto(
device_count={"CPU": cpu_count()},
inter_op_parallelism_threads=cpu_count(),
intra_op_parallelism_threads=cpu_count())
with tf.Session(config=sess_config) as sess:
''' Tensorboard Redouts'''
''' Training R-Squared Score'''
total_error = tf.reduce_sum(tf.square(tf.subtract(Y_true, tf.reduce_mean(Y_true))))
unexplained_error = tf.reduce_sum(tf.square(tf.subtract(Y_true, Y_pred)))
R_squared = tf.subtract(1.0, tf.div(unexplained_error, total_error))
# ''' Testing R-Squared Score'''
# Y_pred_test = Y_pred.eval(feed_dict={X: features[idx_test]})
# total_error_test = tf.reduce_sum(tf.square(tf.subtract(Y_data_use, tf.reduce_mean(Y_data_use))))
# unexplained_error_test = tf.reduce_sum(tf.square(tf.subtract(Y_data_use, Y_pred_test)))
# R_squared_test = tf.subtract(1, tf.div(unexplained_error, total_error))
''' Loss and RMSE '''
squared_error = tf.square(tf.subtract(Y_true, Y_pred))
loss = tf.reduce_sum(tf.sqrt(tf.cast(squared_error, tf.float32)))
rmse = tf.sqrt(tf.reduce_mean(tf.cast(squared_error, tf.float32)))
''' Declare Scalar Tensorboard Terms'''
tf.summary.scalar('loss', loss)
tf.summary.scalar('RMSE', rmse)
tf.summary.scalar('R_sqrd', R_squared)
''' Declare Histogram Tensorboard Terms'''
# Squared Error Histogram
tf.summary.histogram('SqErr Hist', squared_error)
# NALU Layers Histogram
for kn in range(N_NALU_LAYERS):
tf.summary.histogram('NALU{}'.format(kn), nalu_layers['nalu{}'.format(kn)])
''' Merge all the summaries and write them out to `export_dir` + `/logs_train_`time_now`` '''
merged = tf.summary.merge_all()
''' Output all summaries to `export_dir` + `/logs_train_`time_now`` '''
train_writer = tf.summary.FileWriter(EXPORT_DIR + '/logs_train_{}'.format(time_now),sess.graph)
# test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/test')
''' END Tensorboard Readout Step'''
sess.run(init_op)
best_test_r2 = 0
for ep in tqdm(range(N_EPOCHS)):
i = 0
gts = 0
# for k in range(N_EPOCHS):
# batch_now = range(k*N_EPOCHS, (k+1)*N_EPOCHS)
while i < len(X_data_use):
xs, ys = X_data_use[i:i+BATCH_SIZE], Y_data_use[i:i+BATCH_SIZE]
_, ys_pred, l = sess.run([train_op, Y_pred, loss],
feed_dict={X: xs, Y_true: ys})
# calculate number of correct predictions from batch
gts += np.sum(np.isclose(ys, ys_pred, atol=1e-4, rtol=1e-4))
i += BATCH_SIZE
ytest_pred = Y_pred.eval(feed_dict={X: features[idx_test]})
test_r2 = r2_score(labels[idx_test][:,None], ytest_pred)
# print("Test R2 Score: {}".format(test_r2_score))
acc = gts/len(Y_data_use)
train_r2 = r2_score(ys, ys_pred)
print('epoch {}, loss: {:.5}, accuracy: {:.5}, Batch R2: {:.5}, Test R2: {:.5}'.format(ep, l, acc, train_r2, test_r2))
output_dict['loss'][ep] = l
output_dict['accuracy'][ep] = acc
output_dict['R2_train'][ep] = train_r2
output_dict['R2_test'][ep] = test_r2
output_dict['chisq_train'][ep] = chisq(ys.flatten(), ys_pred.flatten(), spitzerCalRawData['fluxerr'][i:i+BATCH_SIZE])
output_dict['chisq_test'][ep] = chisq(labels[idx_test], ytest_pred.flatten(), spitzerCalRawData['fluxerr'][idx_test])
save_path = saver.save(sess, EXPORT_DIR + "model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format(ep, l, acc, train_r2, test_r2))
# print("Model saved in path: %s" % save_path)
if test_r2 >= best_test_r2:
best_test_r2 = test_r2
''' Store the Best Scored Test-R2 '''
save_path = saver.save(sess, EXPORT_DIR + "best_test_r2/model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format(ep, l, acc, train_r2, test_r2))
ep = '_FINAL'
save_path = saver.save(sess, EXPORT_DIR+ "model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format(ep, l, acc, train_r2, test_r2))
print("Model saved in path: %s" % save_path)
try:
pd.DataFrame(output_dict, index=range(N_EPOCHS)).to_csv(EXPORT_DIR+ "model_loss_acc_BatchR2_TestR2_DataFrame.csv")
except Exception as e:
print('DataFrame to CSV broke because', str(e))
'''
with tf.name_scope("loss"):
def tf_nll(labels, output, uncs, coeff=1):
error = output - labels
return tf.reduce_sum(tf.divide(tf.squared_difference(output, labels) , tf.square(uncs)))# + tf.log(tf.square(uncs))
#return tf.reduce_sum(1 * (coeff * np.log(2*np.pi) + coeff * tf.log(uncs) + (0.5/uncs) * tf.pow(error, 2)))
negloglike = tf_nll(labels=y, output=output, uncs=unc)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([negloglike] + reg_losses, name="chisq")
with tf.name_scope("eval"):
accuracy = tf.reduce_mean(tf.squared_difference(output, y, name="accuracy"))
SqErrRatio= tf.divide(accuracy, tf.reduce_mean(tf.squared_difference(y, tf.reduce_mean(y))))
r2_acc = 1.0 - SqErrRatio
chsiqMean = tf_nll(labels=y, output=tf.reduce_mean(y), uncs=unc)
chisqModel= tf_nll(labels=y, output=output, uncs=unc)
rho2_acc = 1.0 - chisqModel / chsiqMean"
]
},mse_summary = tf.summary.scalar('train_acc' , accuracy )
loss_summary = tf.summary.scalar('loss' , loss )
nll_summary = tf.summary.scalar('negloglike', negloglike)
r2s_summary = tf.summary.scalar('r2_acc' , r2_acc )
p2s_summary = tf.summary.scalar('rho2_acc' , rho2_acc )
val_summary = tf.summary.scalar('val_acc' , accuracy )
# hid1_hist = tf.summary.histogram('hidden1', hidden1)
# hid2_hist = tf.summary.histogram('hidden1', hidden1)
# hid3_hist = tf.summary.histogram('hidden1', hidden1)
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
'''
| |
"""
--------------------------------------------------------------------------------------------
Extracted from skikit-learn to ensure basic compatibility
without creating an explicit dependency.
For the original code see
http://scikit-learn.org/
and
https://github.com/scikit-learn
--------------------------------------------------------------------------------------------
Base classes for all estimators.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from pyemma._ext import six
###############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
# NOTE: I have renamed this from BaseEstimator to Parametric in order to also use it for Models
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
# Currently unused
# ###############################################################################
# class ClassifierMixin(object):
# """Mixin class for all classifiers in scikit-learn."""
# _estimator_type = "classifier"
#
# def score(self, X, y, sample_weight=None):
# """Returns the mean accuracy on the given test data and labels.
# In multi-label classification, this is the subset accuracy
# which is a harsh metric since you require for each sample that
# each label set be correctly predicted.
# Parameters
# ----------
# X : array-like, shape = (n_samples, n_features)
# Test samples.
# y : array-like, shape = (n_samples) or (n_samples, n_outputs)
# True labels for X.
# sample_weight : array-like, shape = [n_samples], optional
# Sample weights.
# Returns
# -------
# score : float
# Mean accuracy of self.predict(X) wrt. y.
# """
# from .metrics import accuracy_score
# return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
# Currently unused
# ###############################################################################
# class RegressorMixin(object):
# """Mixin class for all regression estimators in scikit-learn."""
# _estimator_type = "regressor"
#
# def score(self, X, y, sample_weight=None):
# """Returns the coefficient of determination R^2 of the prediction.
# The coefficient R^2 is defined as (1 - u/v), where u is the regression
# sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
# sum of squares ((y_true - y_true.mean()) ** 2).sum().
# Best possible score is 1.0, lower values are worse.
# Parameters
# ----------
# X : array-like, shape = (n_samples, n_features)
# Test samples.
# y : array-like, shape = (n_samples) or (n_samples, n_outputs)
# True values for X.
# sample_weight : array-like, shape = [n_samples], optional
# Sample weights.
# Returns
# -------
# score : float
# R^2 of self.predict(X) wrt. y.
# """
#
# from .metrics import r2_score
# return r2_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
# class BiclusterMixin(object):
# """Mixin class for all bicluster estimators in scikit-learn"""
#
# @property
# def biclusters_(self):
# """Convenient way to get row and column indicators together.
# Returns the ``rows_`` and ``columns_`` members.
# """
# return self.rows_, self.columns_
#
# def get_indices(self, i):
# """Row and column indices of the i'th bicluster.
# Only works if ``rows_`` and ``columns_`` attributes exist.
# Returns
# -------
# row_ind : np.array, dtype=np.intp
# Indices of rows in the dataset that belong to the bicluster.
# col_ind : np.array, dtype=np.intp
# Indices of columns in the dataset that belong to the bicluster.
# """
# rows = self.rows_[i]
# columns = self.columns_[i]
# return np.nonzero(rows)[0], np.nonzero(columns)[0]
#
# def get_shape(self, i):
# """Shape of the i'th bicluster.
# Returns
# -------
# shape : (int, int)
# Number of rows and columns (resp.) in the bicluster.
# """
# indices = self.get_indices(i)
# return tuple(len(i) for i in indices)
#
# def get_submatrix(self, i, data):
# """Returns the submatrix corresponding to bicluster `i`.
# Works with sparse matrices. Only works if ``rows_`` and
# ``columns_`` attributes exist.
# """
# from .utils.validation import check_array
# data = check_array(data, accept_sparse='csr')
# row_ind, col_ind = self.get_indices(i)
# return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
| |
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import Select
from .keywordgroup import KeywordGroup
class _SelectElementKeywords(KeywordGroup):
# Public
def get_list_items(self, locator):
"""Returns the values in the select list identified by `locator`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options(locator)
return self._get_labels_for_options(options)
def get_selected_list_label(self, locator):
"""Returns the visible label of the selected element from the select list identified by `locator`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select = self._get_select_list(locator)
return select.first_selected_option.text
def get_selected_list_labels(self, locator):
"""Returns the visible labels of selected elements (as a list) from the select list identified by `locator`.
Fails if there is no selection.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options_selected(locator)
if len(options) == 0:
raise ValueError("Select list with locator '%s' does not have any selected values")
return self._get_labels_for_options(options)
def get_selected_list_value(self, locator):
"""Returns the value of the selected element from the select list identified by `locator`.
Return value is read from `value` attribute of the selected element.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select = self._get_select_list(locator)
return select.first_selected_option.get_attribute('value')
def get_selected_list_values(self, locator):
"""Returns the values of selected elements (as a list) from the select list identified by `locator`.
Fails if there is no selection.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options_selected(locator)
if len(options) == 0:
raise ValueError("Select list with locator '%s' does not have any selected values")
return self._get_values_for_options(options)
def list_selection_should_be(self, locator, *items):
"""Verifies the selection of select list identified by `locator` is exactly `*items`.
If you want to test that no option is selected, simply give no `items`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
items_str = items and "option(s) [ %s ]" % " | ".join(items) or "no options"
self._info("Verifying list '%s' has %s selected." % (locator, items_str))
items = list(items)
self.page_should_contain_list(locator)
select, options = self._get_select_list_options_selected(locator)
if not items and len(options) == 0:
return
selected_values = self._get_values_for_options(options)
selected_labels = self._get_labels_for_options(options)
err = "List '%s' should have had selection [ %s ] but it was [ %s ]" \
% (locator, ' | '.join(items), ' | '.join(selected_labels))
for item in items:
if item not in selected_values + selected_labels:
raise AssertionError(err)
for selected_value, selected_label in zip(selected_values, selected_labels):
if selected_value not in items and selected_label not in items:
raise AssertionError(err)
def list_should_have_no_selections(self, locator):
"""Verifies select list identified by `locator` has no selections.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
self._info("Verifying list '%s' has no selection." % locator)
select, options = self._get_select_list_options_selected(locator)
if options:
selected_labels = self._get_labels_for_options(options)
items_str = " | ".join(selected_labels)
raise AssertionError("List '%s' should have had no selection "
"(selection was [ %s ])" % (locator, items_str))
def page_should_contain_list(self, locator, message='', loglevel='INFO'):
"""Verifies select list identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._page_should_contain_element(locator, 'list', message, loglevel)
def page_should_not_contain_list(self, locator, message='', loglevel='INFO'):
"""Verifies select list identified by `locator` is not found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._page_should_not_contain_element(locator, 'list', message, loglevel)
def select_all_from_list(self, locator):
"""Selects all values from multi-select list identified by `id`.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._info("Selecting all options from list '%s'." % locator)
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Select all from list' works only for multiselect lists.")
for i in range(len(select.options)):
select.select_by_index(i)
def select_from_list(self, locator, *items):
"""Selects `*items` from list identified by `locator`
If more than one value is given for a single-selection list, the last
value will be selected. If the target list is a multi-selection list,
and `*items` is an empty list, all values of the list will be selected.
*items try to select by value then by label.
It's faster to use 'by index/value/label' functions.
An exception is raised for a single-selection list if the last
value does not exist in the list and a warning for all other non-
existing items. For a multi-selection list, an exception is raised
for any and all non-existing values.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
non_existing_items = []
items_str = items and "option(s) '%s'" % ", ".join(items) or "all options"
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not items:
for i in range(len(select.options)):
select.select_by_index(i)
return
for item in items:
try:
select.select_by_value(item)
except:
try:
select.select_by_visible_text(item)
except:
non_existing_items = non_existing_items + [item]
continue
if any(non_existing_items):
if select.is_multiple:
raise ValueError("Options '%s' not in list '%s'." % (", ".join(non_existing_items), locator))
else:
if any (non_existing_items[:-1]):
items_str = non_existing_items[:-1] and "Option(s) '%s'" % ", ".join(non_existing_items[:-1])
self._warn("%s not found within list '%s'." % (items_str, locator))
if items and items[-1] in non_existing_items:
raise ValueError("Option '%s' not in list '%s'." % (items[-1], locator))
def select_from_list_by_index(self, locator, *indexes):
"""Selects `*indexes` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not indexes:
raise ValueError("No index given.")
items_str = "index(es) '%s'" % ", ".join(indexes)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for index in indexes:
select.select_by_index(int(index))
def select_from_list_by_value(self, locator, *values):
"""Selects `*values` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not values:
raise ValueError("No value given.")
items_str = "value(s) '%s'" % ", ".join(values)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for value in values:
select.select_by_value(value)
def select_from_list_by_label(self, locator, *labels):
"""Selects `*labels` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not labels:
raise ValueError("No value given.")
items_str = "label(s) '%s'" % ", ".join(labels)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for label in labels:
select.select_by_visible_text(label)
def unselect_from_list(self, locator, *items):
"""Unselects given values from select list identified by locator.
As a special case, giving empty list as `*items` will remove all
selections.
*items try to unselect by value AND by label.
It's faster to use 'by index/value/label' functions.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
items_str = items and "option(s) '%s'" % ", ".join(items) or "all options"
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
if not items:
select.deselect_all()
return
select, options = self._get_select_list_options(select)
for item in items:
# Only Selenium 2.52 and newer raise exceptions when there is no match.
# For backwards compatibility reasons we want to ignore them.
try:
select.deselect_by_value(item)
except NoSuchElementException:
pass
try:
select.deselect_by_visible_text(item)
except NoSuchElementException:
pass
def unselect_from_list_by_index(self, locator, *indexes):
"""Unselects `*indexes` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not indexes:
raise ValueError("No index given.")
items_str = "index(es) '%s'" % ", ".join(indexes)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for index in indexes:
select.deselect_by_index(int(index))
def unselect_from_list_by_value(self, locator, *values):
"""Unselects `*values` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not values:
raise ValueError("No value given.")
items_str = "value(s) '%s'" % ", ".join(values)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for value in values:
select.deselect_by_value(value)
def unselect_from_list_by_label(self, locator, *labels):
"""Unselects `*labels` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not labels:
raise ValueError("No value given.")
items_str = "label(s) '%s'" % ", ".join(labels)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for label in labels:
select.deselect_by_visible_text(label)
# Private
def _get_labels_for_options(self, options):
labels = []
for option in options:
labels.append(option.text)
return labels
def _get_select_list(self, locator):
el = self._element_find(locator, True, True, 'select')
return Select(el)
def _get_select_list_options(self, select_list_or_locator):
if isinstance(select_list_or_locator, Select):
select = select_list_or_locator
else:
select = self._get_select_list(select_list_or_locator)
return select, select.options
def _get_select_list_options_selected(self, locator):
select = self._get_select_list(locator)
# TODO: Handle possible exception thrown by all_selected_options
return select, select.all_selected_options
def _get_values_for_options(self, options):
values = []
for option in options:
values.append(option.get_attribute('value'))
return values
def _is_multiselect_list(self, select):
multiple_value = select.get_attribute('multiple')
if multiple_value is not None and (multiple_value == 'true' or multiple_value == 'multiple'):
return True
return False
def _unselect_all_options_from_multi_select_list(self, select):
self._current_browser().execute_script("arguments[0].selectedIndex = -1;", select)
def _unselect_option_from_multi_select_list(self, select, options, index):
if options[index].is_selected():
options[index].click()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameter values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import re
import six
from tensorflow.contrib.training.python.training import hparam_pb2
from tensorflow.python.framework import ops
from tensorflow.python.util import compat
def parse_values(values, type_map):
"""Parses hyperparameter values from a string into a python map..
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, the last
value is used.
The `value` in `name=value` must follows the syntax according to the
type of the parameter:
* Scalar integer: A Python-parsable integer point value. E.g.: 1,
100, -12.
* Scalar float: A Python-parsable floating point value. E.g.: 1.0,
-.54e89.
* Boolean: Either true or false.
* Scalar string: A non-empty sequence of characters, excluding comma,
spaces, and square brackets. E.g.: foo, bar_1.
* List: A comma separated list of scalar values of the parameter type
enclosed in square backets. E.g.: [1,2,3], [1.0,1e-12], [high,low].
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
type_map: A dictionary mapping hyperparameter names to types. Note every
parameter name in values must be a key in type_map. The values must
conform to the types indicated, where a value V is said to conform to a
type T if either V has type T, or V is a list of elements of type T.
Hence, for a multidimensional parameter 'x' taking float values,
'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.
Returns:
A python map containing the name, value pairs.
Raises:
ValueError: If `values` cannot be parsed.
"""
ret = {}
param_re = re.compile(
r'(?P<name>[a-zA-Z][\w]*)\s*=\s*'
r'((?P<val>[^,\[]*)|\[(?P<vals>[^\]]*)\])($|,)')
pos = 0
while pos < len(values):
m = param_re.match(values, pos)
if not m:
raise ValueError('Malformed hyperparameter value: %s' % values[pos:])
# Check that there is a comma between parameters and move past it.
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict['name']
if name not in type_map:
raise ValueError('Unknown hyperparameter type for %s' % name)
type_ = type_map[name]
def parse_fail(value):
raise ValueError(
'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s'
% (name, type_.__name__, value, values))
if type_ == bool:
def parse_bool(value):
if value in ['true', 'True']:
return True
elif value in ['false', 'False']:
return False
else:
try:
return bool(int(value))
except (ValueError, TypeError):
parse_fail(value)
parse = parse_bool
else:
parse = type_
if m_dict['val'] is not None:
try:
ret[name] = parse(m_dict['val'])
except (ValueError, TypeError):
parse_fail(m_dict['val'])
elif m_dict['vals'] is not None:
elements = filter(None, re.split('[ ,]', m_dict['vals']))
try:
ret[name] = [parse(e) for e in elements]
except (ValueError, TypeError):
parse_fail(m_dict['vals'])
else:
parse_fail('')
return ret
class HParams(object):
"""Class to hold a set of hyperparameters as name-value pairs.
A `HParams` object holds hyperparameters used to build and train a model,
such as the number of hidden units in a neural net layer or the learning rate
to use when training.
You first create a `HParams` object by specifying the names and values of the
hyperparameters.
To make them easily accessible the parameter names are added as direct
attributes of the class. A typical usage is as follows:
```python
# Create a HParams object specifying names and values of the model
# hyperparameters:
hparams = HParams(learning_rate=0.1, num_hidden_units=100)
# The hyperparameter are available as attributes of the HParams object:
hparams.learning_rate ==> 0.1
hparams.num_hidden_units ==> 100
```
Hyperparameters have type, which is inferred from the type of their value
passed at construction type. The currently supported types are: integer,
float, string, and list of integer, float, or string.
You can override hyperparameter values by calling the
[`parse()`](#HParams.parse) method, passing a string of comma separated
`name=value` pairs. This is intended to make it possible to override
any hyperparameter values from a single command-line flag to which
the user passes 'hyper-param=value' pairs. It avoids having to define
one flag for each hyperparameter.
The syntax expected for each value depends on the type of the parameter.
See `parse()` for a description of the syntax.
Example:
```python
# Define a command line flag to pass name=value pairs.
# For example using argparse:
import argparse
parser = argparse.ArgumentParser(description='Train my model.')
parser.add_argument('--hparams', type=str,
help='Comma separated list of "name=value" pairs.')
args = parser.parse_args()
...
def my_program():
# Create a HParams object specifying the names and values of the
# model hyperparameters:
hparams = tf.HParams(learning_rate=0.1, num_hidden_units=100,
activations=['relu', 'tanh'])
# Override hyperparameters values by parsing the command line
hparams.parse(args.hparams)
# If the user passed `--hparams=learning_rate=0.3` on the command line
# then 'hparams' has the following attributes:
hparams.learning_rate ==> 0.3
hparams.num_hidden_units ==> 100
hparams.activations ==> ['relu', 'tanh']
# If the hyperparameters are in json format use parse_json:
hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}')
```
"""
def __init__(self, hparam_def=None, model_structure=None, **kwargs):
"""Create an instance of `HParams` from keyword arguments.
The keyword arguments specify name-values pairs for the hyperparameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `HParams` object, so they
can be accessed directly with the dot notation `hparams._name_`.
Example:
```python
# Define 3 hyperparameters: 'learning_rate' is a float parameter,
# 'num_hidden_units' an integer parameter, and 'activation' a string
# parameter.
hparams = tf.HParams(
learning_rate=0.1, num_hidden_units=100, activation='relu')
hparams.activation ==> 'relu'
```
Note that a few names are reserved and cannot be used as hyperparameter
names. If you use one of the reserved name the constructor raises a
`ValueError`.
Args:
hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef
protocol buffer. If provided, this object is initialized by
deserializing hparam_def. Otherwise **kwargs is used.
model_structure: An instance of ModelStructure, defining the feature
crosses to be used in the Trial.
**kwargs: Key-value pairs where the key is the hyperparameter name and
the value is the value for the parameter.
Raises:
ValueError: If both `hparam_def` and initialization values are provided,
or if one of the arguments is invalid.
"""
# Register the hyperparameters and their type in _hparam_types.
# This simplifies the implementation of parse().
# _hparam_types maps the parameter name to a tuple (type, bool).
# The type value is the type of the parameter for scalar hyperparameters,
# or the type of the list elements for multidimensional hyperparameters.
# The bool value is True if the value is a list, False otherwise.
self._hparam_types = {}
self._model_structure = model_structure
if hparam_def:
self._init_from_proto(hparam_def)
if kwargs:
raise ValueError('hparam_def and initialization values are '
'mutually exclusive')
else:
for name, value in six.iteritems(kwargs):
self.add_hparam(name, value)
def _init_from_proto(self, hparam_def):
"""Creates a new HParams from `HParamDef` protocol buffer.
Args:
hparam_def: `HParamDef` protocol buffer.
"""
assert isinstance(hparam_def, hparam_pb2.HParamDef)
for name, value in hparam_def.hparam.items():
kind = value.WhichOneof('kind')
if kind.endswith('_value'):
# Single value.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, int(getattr(value, kind)))
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, compat.as_str(getattr(value, kind)))
else:
self.add_hparam(name, getattr(value, kind))
else:
# List of values.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, [int(v) for v in getattr(value, kind).value])
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, [compat.as_str(v)
for v in getattr(value, kind).value])
else:
self.add_hparam(name, [v for v in getattr(value, kind).value])
def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# hyperparameter name.
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError('Multi-valued hyperparameters cannot be empty: %s'
% name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def parse(self, values):
"""Override hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed.
"""
type_map = dict()
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self._set_from_map(values_map)
def _set_from_map(self, values_map):
"""Override hyperparameter values, parsing new values from a dictionary.
Args:
values_map: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values_map` cannot be parsed.
"""
for name, value in values_map.items():
_, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name, value)
else:
if is_list:
raise ValueError(
'Must pass a list for multi-valued parameter: %s.' % name)
setattr(self, name, value)
return self
def set_model_structure(self, model_structure):
self._model_structure = model_structure
def get_model_structure(self):
return self._model_structure
def to_json(self):
"""Serializes the hyperparameters into JSON.
Returns:
A JSON string.
"""
return json.dumps(self.values())
def parse_json(self, values_json):
"""Override hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values_json` cannot be parsed.
"""
values_map = json.loads(values_json)
return self._set_from_map(values_map)
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()}
def __str__(self):
return str(sorted(self.values().items()))
@staticmethod
def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized.
"""
if issubclass(param_type, bool):
# This check must happen before issubclass(param_type, six.integer_types),
# since Python considers bool to be a subclass of int.
typename = 'bool'
elif issubclass(param_type, six.integer_types):
# Setting 'int' and 'long' types to be 'int64' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix])
def to_proto(self, export_scope=None): # pylint: disable=unused-argument
"""Converts a `HParams` object to a `HParamDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `HParamDef` protocol buffer.
"""
hparam_proto = hparam_pb2.HParamDef()
for name in self._hparam_types:
# Parse the values.
param_type, is_list = self._hparam_types.get(name, (None, None))
kind = HParams._get_kind_name(param_type, is_list)
if is_list:
if kind.startswith('bytes'):
v_list = [compat.as_bytes(v) for v in getattr(self, name)]
else:
v_list = [v for v in getattr(self, name)]
getattr(hparam_proto.hparam[name], kind).value.extend(v_list)
else:
v = getattr(self, name)
if kind.startswith('bytes'):
v = compat.as_bytes(getattr(self, name))
setattr(hparam_proto.hparam[name], kind, v)
return hparam_proto
@staticmethod
def from_proto(hparam_def, import_scope=None): # pylint: disable=unused-argument
return HParams(hparam_def=hparam_def)
ops.register_proto_function('hparams',
proto_type=hparam_pb2.HParamDef,
to_proto=HParams.to_proto,
from_proto=HParams.from_proto)
| |
#
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2013, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import xml.etree.ElementTree as ET
from mock import Mock
import unittest
import stratuslab.accounting.Computer
from stratuslab.accounting.Computer import Computer, PDiskHelper, \
bytes_to_giga_approximation, ManifestGetFromMarketplaceError
from stratuslab.volume_manager.PersistentDisk import PersistentDisk
USAGERECORD_XML = """
<usagerecord>
<vm id="0">
<name>one-0</name>
<time>1</time>
<cpu>1.0</cpu>
<mem>1024</mem>
<net_rx>%(1GB)s</net_rx>
<net_tx>%(1GB)s</net_tx>
<starttime>2014-01-21 11:38:35</starttime>
<endtime>1970-01-01 00:00:00</endtime>
<disk>
<size>1</size>
</disk>
<disk>
<size>1.0</size>
</disk>
</vm>
</usagerecord>
""" % {'1GB': 1024 ** 3}
# Ony time related elements as returned by 'oneacct' CLI.
VM_XML = """
<vm>
<time></time>
<slice>
<!-- Pending -->
<stime>0</stime>
<!-- Prolog -->
<pstime>0</pstime>
<petime>0</petime>
<!-- Running -->
<rstime>0</rstime>
<retime>0</retime>
<!-- Epilog -->
<estime>0</estime>
<eetime>0</eetime>
<!-- Done -->
<etime>0</etime>
</slice>
</vm>
"""
HOUR = 60 * 60
DISK_SIZE = 10 * 1024 ** 3
IMAGE_MANIFEST = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<metadata>
<rdf:RDF xmlns:dcterms="http://purl.org/dc/terms/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:slreq="http://mp.stratuslab.eu/slreq#" xmlns:slterms="http://mp.stratuslab.eu/slterms#" xml:base="http://mp.stratuslab.eu/">
<rdf:Description rdf:about="#Pd1C0IktTPRXFPGlEHfxrF7gxOF">
<dcterms:identifier>Pd1C0IktTPRXFPGlEHfxrF7gxOF</dcterms:identifier>
<slreq:bytes>%s</slreq:bytes>
</rdf:Description>
</rdf:RDF>
</metadata>""" % DISK_SIZE
class ComputerTest(unittest.TestCase):
def setUp(self):
self.w_end = time.time() - HOUR
self.w_start = self.w_end - 24 * HOUR
def test_compute_totals(self):
cmptr = Computer(0, 0, '', True)
root = ET.fromstring(USAGERECORD_XML)
cmptr.compute_totals(root)
assert '1' == root.get('total_time')
assert '1' == root.get('total_cpu')
assert '1' == root.get('total_ram')
assert '2' == root.get('total_disk')
# FIXME: Enable when enabled in the code.
# assert '1' == root.get('total_net_rx')
# assert '1' == root.get('total_net_tx')
def test_update_time_on_vm_started_before_still_running(self):
"Started before the metering window and still running."
stime = int(self.w_start - HOUR)
etime = 0
vm = self._get_vm(stime, etime)
delta_time_hours = int((self.w_end - self.w_start) / HOUR)
self._update_and_assert(vm, delta_time_hours)
def test_update_time_on_vm_started_before_ended_within(self):
"Started before and ended within the metering window."
stime = int(self.w_start - HOUR)
etime = int(self.w_start + HOUR) + 1
vm = self._get_vm(stime, etime)
delta_time_hours = int((etime - self.w_start) / HOUR)
self._update_and_assert(vm, delta_time_hours)
def test_update_time_on_vm_started_ended_within(self):
"Started and ended within the metering window."
stime = int(self.w_start + HOUR)
etime = stime + HOUR
vm = self._get_vm(stime, etime)
delta_time_hours = int((etime - stime) / HOUR)
self._update_and_assert(vm, delta_time_hours)
def test_update_time_on_vm_started_within_still_running(self):
"Started within the metering window and still running."
stime = int(self.w_end - HOUR)
etime = 0
vm = self._get_vm(stime, etime)
delta_time_hours = int((self.w_end - stime) / HOUR)
self._update_and_assert(vm, delta_time_hours)
def test_update_time_on_vm_accepted_within_didnot_running(self):
"Started within the metering window and still running."
vm = self._get_vm(int(self.w_end - HOUR), 0)
self._update_and_assert(vm, 1)
def test_vm_in_range_ended_before(self):
etime = int(self.w_start - HOUR)
stime = etime - HOUR
vm = self._get_vm(stime, etime)
cmptr = Computer(self.w_start, self.w_end, '', True)
assert False == cmptr.vm_in_range(vm)
def test_vm_in_range_started_after(self):
stime = int(self.w_end + HOUR)
etime = stime + HOUR
vm = self._get_vm(stime, etime)
cmptr = Computer(self.w_start, self.w_end, '', True)
assert False == cmptr.vm_in_range(vm)
def test_vm_in_range_started_before_still_running(self):
stime = int(self.w_start - HOUR)
etime = 0
vm = self._get_vm(stime, etime)
cmptr = Computer(self.w_start, self.w_end, '', True)
cmptr._query_etime_from_vm_details = cmptr.get_etime
assert True == cmptr.vm_in_range(vm)
def test_vm_in_range_accepted_within_didnot_run(self):
vm = self._get_vm(0, 0)
stime = int(self.w_end - HOUR)
vm.find('slice/' + Computer.VM_STARTTIME_ELEM).text = str(stime)
cmptr = Computer(self.w_start, self.w_end, '', True)
cmptr._query_etime_from_vm_details = cmptr.get_etime
assert True == cmptr.vm_in_range(vm)
def test_vm_in_range_accepted_before_didnot_run(self):
vm = self._get_vm(0, 0)
stime = int(self.w_start - HOUR)
vm.find('slice/' + Computer.VM_STARTTIME_ELEM).text = str(stime)
cmptr = Computer(self.w_start, self.w_end, '', True)
cmptr._query_etime_from_vm_details = cmptr.get_etime
assert False == cmptr.vm_in_range(vm)
def test_vm_in_range_accepted_after_didnot_run(self):
vm = self._get_vm(0, 0)
stime = int(self.w_end + HOUR)
vm.find('slice/' + Computer.VM_STARTTIME_ELEM).text = str(stime)
cmptr = Computer(self.w_start, self.w_end, '', True)
cmptr._query_etime_from_vm_details = cmptr.get_etime
assert False == cmptr.vm_in_range(vm)
def test_get_size_from_marketplace(self):
cmptr = Computer(self.w_start, self.w_end, '', True)
_url_get_save = stratuslab.accounting.Computer.url_get
try:
stratuslab.accounting.Computer.url_get = Mock(return_value=IMAGE_MANIFEST)
size_gb = bytes_to_giga_approximation(DISK_SIZE)
assert size_gb == cmptr.get_size_from_marketplace_or_pdisk_by_manifest_id('http://foo.bar/baz')
assert size_gb == cmptr.marketplace_size_cache['http://foo.bar/baz']
finally:
stratuslab.accounting.Computer.url_get = _url_get_save
def test_get_size_from_pdisk_by_manifest_id(self):
cmptr = Computer(self.w_start, self.w_end, '', True)
size_gb = bytes_to_giga_approximation(DISK_SIZE)
_get_size_saved = stratuslab.accounting.Computer.MPHelper.get_size
_get_size_by_marketplace_url_saved = \
stratuslab.accounting.Computer.PDiskHelper.get_size_by_marketplace_url
try:
stratuslab.accounting.Computer.MPHelper.get_size = \
Mock(side_effect=ManifestGetFromMarketplaceError('Moked exception.'))
stratuslab.accounting.Computer.PDiskHelper.get_size_by_marketplace_url = \
Mock(return_value=size_gb)
assert size_gb == cmptr.get_size_from_marketplace_or_pdisk_by_manifest_id('http://foo.bar/baz')
assert size_gb == cmptr.marketplace_size_cache['http://foo.bar/baz']
finally:
stratuslab.accounting.Computer.MPHelper.get_size = _get_size_saved
stratuslab.accounting.Computer.PDiskHelper.get_size_by_marketplace_url = \
_get_size_by_marketplace_url_saved
def test_get_size_from_pdisk(self):
cmptr = Computer(self.w_start, self.w_end, '', True)
_get_config_as_dict_saved = \
stratuslab.accounting.Computer.PDiskHelper._get_config_as_dict
try:
stratuslab.accounting.Computer.PDiskHelper._get_config_as_dict = \
Mock(return_value={'pdiskEndpoint': 'foo-endpoint',
'persistentDiskCloudServiceUser': 'bar-user',
'persistentDiskIp': '1.2.3.4'})
PersistentDisk.getValue = Mock(return_value='2048')
assert 2048 == cmptr.get_size_from_pdisk('foo:1.2.3.4:1234:1-2-3-4')
assert 2048 == cmptr.pdisk_size_cache['foo:1.2.3.4:1234:1-2-3-4']
finally:
stratuslab.accounting.Computer.PDiskHelper._get_config_as_dict = \
_get_config_as_dict_saved
def test__get_pdisk_endpoint_and_disk_uuid_from_uri(self):
endpoint, uuid = PDiskHelper.get_endpoint_and_disk_uuid_from_uri('foo:1.2.3.4:1234:1-2-3-4')
assert endpoint == '1.2.3.4:1234'
assert uuid == '1-2-3-4'
endpoint, uuid = PDiskHelper.get_endpoint_and_disk_uuid_from_uri('foo:1.2.3.4:1-2-3-4')
assert endpoint == '1.2.3.4'
assert uuid == '1-2-3-4'
self.assertRaises(Exception,
PDiskHelper.get_endpoint_and_disk_uuid_from_uri,
('foo:bar',))
def test_get_disk_size_with_MP_swap_PDisk_disks(self):
cmptr = Computer(self.w_start, self.w_end, '', True)
with open('vm-with-extra-disk-in-pdisk.xml') as f:
vm = ET.fromstring(f.read())
disks = cmptr.get_disks(vm)
assert 3 == len(disks)
disk_MP, disk_swap, disk_pdisk = disks
# Disk from Marketplace (11GB)
_url_get_save = stratuslab.accounting.Computer.url_get
stratuslab.accounting.Computer.url_get = Mock(return_value=IMAGE_MANIFEST)
try:
size_gb = bytes_to_giga_approximation(DISK_SIZE)
assert size_gb == cmptr.get_disk_size(disk_MP)
assert size_gb == \
cmptr.marketplace_size_cache['https://marketplace.stratuslab.eu/metadata/Pd1C0IktTPRXFPGlEHfxrF7gxOF']
finally:
stratuslab.accounting.Computer.url_get = _url_get_save
# swap (1.5GB)
assert float('1536') / 1024 == cmptr.get_disk_size(disk_swap)
# disk from PDisk (2TB)
_get_config_as_dict_saved = \
stratuslab.accounting.Computer.PDiskHelper._get_config_as_dict
try:
stratuslab.accounting.Computer.PDiskHelper._get_config_as_dict = \
Mock(return_value={'pdiskEndpoint': 'foo-endpoint',
'persistentDiskCloudServiceUser': 'bar-user',
'persistentDiskIp': '1.2.3.4'})
PersistentDisk.getValue = Mock(return_value='2048')
assert 2048 == cmptr.get_disk_size(disk_pdisk)
assert 2048 == cmptr.pdisk_size_cache['pdisk:154.48.152.10:8445/95b75092-17d2-459f-9fa0-cd1deaa721f1']
finally:
stratuslab.accounting.Computer.PDiskHelper._get_config_as_dict = \
_get_config_as_dict_saved
def _get_vm(self, stime, etime):
vm = ET.fromstring(VM_XML)
vm.find('slice/' + Computer.VM_RUN_STARTTIME_ELEM).text = str(stime)
vm.find('slice/' + Computer.VM_RUN_ENDTIME_ELEM).text = str(etime)
return vm
def _update_and_assert(self, vm, delta_time_hours):
cmptr = Computer(self.w_start, self.w_end, '', True)
cmptr._query_etime_from_vm_details = cmptr.get_etime
cmptr._update_time_on_vm(vm)
assert delta_time_hours == int(vm.find('time').text)
if __name__ == "__main__":
unittest.main()
| |
"""Implementation of restricted Boltzmann machine."""
__author__ = "Mihaela Rosca"
__contact__ = "mihaela.c.rosca@gmail.com"
import numpy as np
from common import *
from activationfunctions import *
import theano
from theano import tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
theanoFloat = theano.config.floatX
class RBMMiniBatchTrainer(object):
def __init__(self, input, theanoGenerator, initialWeights, initialBiases,
visibleActivationFunction, hiddenActivationFunction,
visibleDropout, hiddenDropout, sparsityConstraint, cdSteps):
self.visible = input
self.cdSteps = theano.shared(value=np.int32(cdSteps))
self.theanoGenerator = theanoGenerator
# Weights and biases
self.weights = theano.shared(value=np.asarray(initialWeights,
dtype=theanoFloat),
name='W')
self.biasVisible = theano.shared(value=np.asarray(initialBiases[0],
dtype=theanoFloat),
name='bvis')
self.biasHidden = theano.shared(value=np.asarray(initialBiases[1],
dtype=theanoFloat),
name='bhid')
# Old weight and biases updates (required for momentum)
self.oldDw = theano.shared(value=np.zeros(shape=initialWeights.shape,
dtype=theanoFloat))
self.oldDVis = theano.shared(value=np.zeros(shape=initialBiases[0].shape,
dtype=theanoFloat))
self.oldDHid = theano.shared(value=np.zeros(shape=initialBiases[1].shape,
dtype=theanoFloat))
# Old weight and biases mean squares (required for rmsprop)
self.oldMeanW = theano.shared(value=np.zeros(shape=initialWeights.shape,
dtype=theanoFloat))
self.oldMeanVis = theano.shared(value=np.zeros(shape=initialBiases[0].shape,
dtype=theanoFloat))
self.oldMeanHid = theano.shared(value=np.zeros(shape=initialBiases[1].shape,
dtype=theanoFloat))
if visibleDropout in [1.0, 1]:
droppedOutVisible = self.visible
else:
# Create dropout mask for the visible layer
dropoutMaskVisible = self.theanoGenerator.binomial(size=self.visible.shape,
n=1, p=visibleDropout,
dtype=theanoFloat)
droppedOutVisible = dropoutMaskVisible * self.visible
if hiddenDropout in [1.0, 1]:
dropoutMaskHidden = T.ones(shape=(input.shape[0], initialBiases[1].shape[0]))
else:
# Create dropout mask for the hidden layer
dropoutMaskHidden = self.theanoGenerator.binomial(
size=(input.shape[0], initialBiases[1].shape[0]),
n=1, p=hiddenDropout,
dtype=theanoFloat)
def OneCDStep(visibleSample):
linearSum = T.dot(visibleSample, self.weights) + self.biasHidden
hidden = hiddenActivationFunction.nonDeterminstic(linearSum) * dropoutMaskHidden
linearSum = T.dot(hidden, self.weights.T) + self.biasVisible
if visibleDropout in [1.0, 1]:
visibleRec = visibleActivationFunction.deterministic(linearSum)
else:
visibleRec = visibleActivationFunction.deterministic(linearSum) * dropoutMaskVisible
return visibleRec
visibleSeq, updates = theano.scan(OneCDStep,
outputs_info=[droppedOutVisible],
n_steps=self.cdSteps)
self.updates = updates
self.visibleReconstruction = visibleSeq[-1]
self.runningAvgExpected = theano.shared(value=np.zeros(shape=initialBiases[1].shape,
dtype=theanoFloat))
# Duplicate work but avoiding gradient in theano thinking we are using a random op
linearSum = T.dot(droppedOutVisible, self.weights) + self.biasHidden
self.hiddenActivations = hiddenActivationFunction.deterministic(linearSum) * dropoutMaskHidden
self.activationProbabilities = hiddenActivationFunction.activationProbablity(linearSum)
# Do not sample for the last one, in order to get less sampling noise
# Here you should also use a expected value for symmetry
# but we need an elegant way to do it
hiddenRec = hiddenActivationFunction.deterministic(T.dot(self.visibleReconstruction, self.weights) + self.biasHidden)
self.hiddenReconstruction = hiddenRec * dropoutMaskHidden
# TODO: check if this is doing the right thing
# because the graph makes it look like hidden activations has to do with
# the random generator
class ReconstructerBatch(object):
def __init__(self, input, theanoGenerator, weights, biases,
visibleActivationFunction, hiddenActivationFunction,
visibleDropout, hiddenDropout, cdSteps):
self.visible = input
self.cdSteps = theano.shared(value=np.int32(cdSteps))
self.theanoGenerator = theanoGenerator
self.weightsForVisible, self.weightsForHidden = testWeights(weights,
visibleDropout=visibleDropout, hiddenDropout=hiddenDropout)
hiddenBias = biases[1]
visibleBias = biases[0]
# This does not sample the visible layers, but samples
# The hidden layers up to the last one, like Hinton suggests
def OneCDStep(visibleSample):
linearSum = T.dot(visibleSample, self.weightsForHidden) + hiddenBias
hidden = hiddenActivationFunction.nonDeterminstic(linearSum)
linearSum = T.dot(hidden, self.weightsForVisible) + visibleBias
visibleRec = visibleActivationFunction.deterministic(linearSum)
return visibleRec
visibleSeq, updates = theano.scan(OneCDStep,
outputs_info=[self.visible],
n_steps=self.cdSteps)
self.updates = updates
self.visibleReconstruction = visibleSeq[-1]
# Duplicate work but avoiding gradient in theano thinking we are using a random op
linearSum = T.dot(self.visible, self.weightsForHidden) + hiddenBias
self.hiddenActivations = hiddenActivationFunction.deterministic(linearSum)
# Do not sample for the last one, in order to get less sampling noise
hiddenRec = hiddenActivationFunction.deterministic(T.dot(self.visibleReconstruction, self.weightsForHidden) + hiddenBias)
self.hiddenReconstruction = hiddenRec
"""
Represents a RBM
"""
class RBM(object):
def __init__(self, nrVisible, nrHidden, learningRate,
hiddenDropout, visibleDropout,
visibleActivationFunction=Sigmoid(),
hiddenActivationFunction=Sigmoid(),
rmsprop=True,
nesterov=True,
weightDecay=0.001,
initialWeights=None,
initialBiases=None,
trainingEpochs=1,
momentumFactorForLearningRate=False,
momentumMax=0.95,
sparsityCostFunction=squaredDiff,
sparsityConstraint=False,
sparsityRegularization=0.01,
sparsityTraget=0.01):
# TODO: also check how the gradient works for RBMS
# dropout = 1 means no dropout, keep all the weights
self.hiddenDropout = hiddenDropout
print "hidden dropout in RBM" , hiddenDropout
# dropout = 1 means no dropout, keep all the weights
self.visibleDropout = visibleDropout
print "visible dropout in RBM" , visibleDropout
self.nrHidden = nrHidden
self.nrVisible = nrVisible
self.learningRate = learningRate
self.rmsprop = rmsprop
self.nesterov = nesterov
self.weights = initialWeights
self.biases = initialBiases
self.weightDecay = np.float32(weightDecay)
self.momentumFactorForLearningRate = momentumFactorForLearningRate
self.visibleActivationFunction = visibleActivationFunction
self.hiddenActivationFunction = hiddenActivationFunction
self.trainingEpochs = trainingEpochs
self.momentumMax = momentumMax
self.sparsityConstraint = sparsityConstraint
self.sparsityRegularization = np.float32(sparsityRegularization)
self.sparsityTraget = np.float32(sparsityTraget)
self.sparsityCostFunction = sparsityCostFunction
if sparsityConstraint:
print "using sparsityConstraint"
self.__initialize(initialWeights, initialBiases)
def __initialize(self, weights, biases):
# Initialize the weights
if weights == None and biases == None:
weights = initializeWeights(self.nrVisible, self.nrHidden)
biases = initializeBiasesReal(self.nrVisible, self.nrHidden)
theanoRng = RandomStreams(seed=np.random.randint(1, 1000))
x = T.matrix('x', dtype=theanoFloat)
batchTrainer = RBMMiniBatchTrainer(input=x,
theanoGenerator=theanoRng,
initialWeights=weights,
initialBiases=biases,
visibleActivationFunction=self.visibleActivationFunction,
hiddenActivationFunction=self.hiddenActivationFunction,
visibleDropout=self.visibleDropout,
hiddenDropout=self.hiddenDropout,
sparsityConstraint=self.sparsityConstraint,
cdSteps=1)
reconstructer = ReconstructerBatch(input=x,
theanoGenerator=theanoRng,
weights=batchTrainer.weights,
biases=[batchTrainer.biasVisible, batchTrainer.biasHidden],
visibleActivationFunction=self.visibleActivationFunction,
hiddenActivationFunction=self.hiddenActivationFunction,
visibleDropout=self.visibleDropout,
hiddenDropout=self.hiddenDropout,
cdSteps=1)
self.reconstructer = reconstructer
self.batchTrainer = batchTrainer
self.x = x
def train(self, data, miniBatchSize=10):
print "rbm learningRate"
print self.learningRate
print "data set size for restricted boltzmann machine"
print len(data)
# If we have gaussian units, we need to scale the data
# to unit variance and zero mean
if isinstance(self.visibleActivationFunction, Identity):
print "scaling data for RBM"
data = scale(data)
sharedData = theano.shared(np.asarray(data, dtype=theanoFloat))
self.miniBatchSize = miniBatchSize
batchTrainer = self.batchTrainer
x = self.x
miniBatchIndex = T.lscalar()
momentum = T.fscalar()
cdSteps = T.iscalar()
batchLearningRate = self.learningRate / miniBatchSize
batchLearningRate = np.float32(batchLearningRate)
if self.nesterov:
preDeltaUpdates, updates = self.buildNesterovUpdates(batchTrainer,
momentum, batchLearningRate, cdSteps)
updateWeightWithMomentum = theano.function(
inputs=[momentum],
outputs=[],
updates=preDeltaUpdates
)
updateWeightWithDelta = theano.function(
inputs=[miniBatchIndex, cdSteps, momentum],
outputs=[],
updates=updates,
givens={
x: sharedData[miniBatchIndex * self.miniBatchSize:(miniBatchIndex + 1) * self.miniBatchSize],
}
)
def trainFunction(miniBatchIndex, momentum, cdSteps):
updateWeightWithMomentum(momentum)
updateWeightWithDelta(miniBatchIndex, cdSteps, momentum)
else:
updates = self.buildUpdates(batchTrainer, momentum, batchLearningRate, cdSteps)
trainFunction = theano.function(
inputs=[miniBatchIndex, momentum, cdSteps],
outputs=[], # TODO: output error
updates=updates,
givens={
x: sharedData[miniBatchIndex * self.miniBatchSize:(miniBatchIndex + 1) * self.miniBatchSize],
})
nrMiniBatches = len(data) / miniBatchSize
for epoch in xrange(self.trainingEpochs):
print "rbm training epoch", epoch
for miniBatchIndex in range(nrMiniBatches):
iteration = miniBatchIndex + epoch * nrMiniBatches
momentum = np.float32(min(np.float32(0.5) + iteration * np.float32(0.001),
np.float32(self.momentumMax)))
if miniBatchIndex < 10:
step = 1
else:
step = 3
trainFunction(miniBatchIndex, momentum, step)
self.sharedWeights = batchTrainer.weights
self.sharedBiases = [batchTrainer.biasVisible, batchTrainer.biasHidden]
self.weights = batchTrainer.weights.get_value()
self.biases = [batchTrainer.biasVisible.get_value(),
batchTrainer.biasHidden.get_value()]
print "reconstruction Error"
print self.reconstructionError(data)
self.testWeights = testWeights(self.weights, visibleDropout=self.visibleDropout,
hiddenDropout=self.hiddenDropout)
assert self.weights.shape == (self.nrVisible, self.nrHidden)
assert self.biases[0].shape[0] == self.nrVisible
assert self.biases[1].shape[0] == self.nrHidden
def buildUpdates(self, batchTrainer, momentum, batchLearningRate, cdSteps):
updates = []
if self.momentumFactorForLearningRate:
factorLr = 1.0 - momentum
else:
factorLr = np.float32(1.0)
if self.sparsityConstraint:
if self.sparsityCostFunction == T.nnet.binary_crossentropy:
sparistyCostMeasure = batchTrainer.activationProbabilities
else:
sparistyCostMeasure = batchTrainer.hiddenActivations
runningAvg = batchTrainer.runningAvgExpected * 0.9 + T.mean(sparistyCostMeasure, axis=0) * 0.1
# Sum over all hidden units
sparsityCost = T.sum(self.sparsityCostFunction(self.sparsityTraget, runningAvg))
updates.append((batchTrainer.runningAvgExpected, runningAvg))
positiveDifference = T.dot(batchTrainer.visible.T, batchTrainer.hiddenActivations)
negativeDifference = T.dot(batchTrainer.visibleReconstruction.T,
batchTrainer.hiddenReconstruction)
delta = positiveDifference - negativeDifference
wUpdate = momentum * batchTrainer.oldDw
# # Sparsity cost
# if self.sparsityConstraint:
# gradientW = T.grad(sparsityCost, batchTrainer.weights)
# delta -= self.sparsityRegularization * gradientW
if self.rmsprop:
meanW = 0.9 * batchTrainer.oldMeanW + 0.1 * delta ** 2
wUpdate += factorLr * batchLearningRate * delta / T.sqrt(meanW + 1e-8)
updates.append((batchTrainer.oldMeanW, meanW))
else:
wUpdate += factorLr * batchLearningRate * delta
wUpdate -= batchLearningRate * self.weightDecay * batchTrainer.oldDw
updates.append((batchTrainer.weights, batchTrainer.weights + wUpdate))
updates.append((batchTrainer.oldDw, wUpdate))
visibleBiasDiff = T.sum(batchTrainer.visible - batchTrainer.visibleReconstruction, axis=0)
biasVisUpdate = momentum * batchTrainer.oldDVis
if self.rmsprop:
meanVis = 0.9 * batchTrainer.oldMeanVis + 0.1 * visibleBiasDiff ** 2
biasVisUpdate += factorLr * batchLearningRate * visibleBiasDiff / T.sqrt(meanVis + 1e-8)
updates.append((batchTrainer.oldMeanVis, meanVis))
else:
biasVisUpdate += factorLr * batchLearningRate * visibleBiasDiff
updates.append((batchTrainer.biasVisible, batchTrainer.biasVisible + biasVisUpdate))
updates.append((batchTrainer.oldDVis, biasVisUpdate))
hiddenBiasDiff = T.sum(batchTrainer.hiddenActivations - batchTrainer.hiddenReconstruction, axis=0)
biasHidUpdate = momentum * batchTrainer.oldDHid
# Sparsity cost
if self.sparsityConstraint:
gradientbiasHid = T.grad(sparsityCost, batchTrainer.biasHidden)
hiddenBiasDiff -= self.sparsityRegularization * gradientbiasHid
if self.rmsprop:
meanHid = 0.9 * batchTrainer.oldMeanHid + 0.1 * hiddenBiasDiff ** 2
biasHidUpdate += factorLr * batchLearningRate * hiddenBiasDiff / T.sqrt(meanHid + 1e-8)
updates.append((batchTrainer.oldMeanHid, meanHid))
else:
biasHidUpdate += factorLr * batchLearningRate * hiddenBiasDiff
updates.append((batchTrainer.biasHidden, batchTrainer.biasHidden + biasHidUpdate))
updates.append((batchTrainer.oldDHid, biasHidUpdate))
# Add the updates required for the theano random generator
updates += batchTrainer.updates.items()
updates.append((batchTrainer.cdSteps, cdSteps))
return updates
def buildNesterovUpdates(self, batchTrainer, momentum, batchLearningRate, cdSteps):
preDeltaUpdates = []
if self.momentumFactorForLearningRate:
factorLr = 1.0 - momentum
else:
factorLr = np.float32(1.0)
wUpdateMomentum = momentum * batchTrainer.oldDw
biasVisUpdateMomentum = momentum * batchTrainer.oldDVis
biasHidUpdateMomentum = momentum * batchTrainer.oldDHid
preDeltaUpdates.append((batchTrainer.weights, batchTrainer.weights + wUpdateMomentum))
preDeltaUpdates.append((batchTrainer.biasVisible, batchTrainer.biasVisible + biasVisUpdateMomentum))
preDeltaUpdates.append((batchTrainer.biasHidden, batchTrainer.biasHidden + biasHidUpdateMomentum))
updates = []
if self.sparsityConstraint:
if self.sparsityCostFunction == T.nnet.binary_crossentropy:
sparistyCostMeasure = batchTrainer.activationProbabilities
else:
sparistyCostMeasure = batchTrainer.hiddenActivations
runningAvg = batchTrainer.runningAvgExpected * 0.9 + T.mean(sparistyCostMeasure, axis=0) * 0.1
# Sum over all hidden units
sparsityCost = T.sum(self.sparsityCostFunction(self.sparsityTraget, runningAvg))
updates.append((batchTrainer.runningAvgExpected, runningAvg))
positiveDifference = T.dot(batchTrainer.visible.T, batchTrainer.hiddenActivations)
negativeDifference = T.dot(batchTrainer.visibleReconstruction.T,
batchTrainer.hiddenReconstruction)
delta = positiveDifference - negativeDifference
# # Sparsity cost
# if self.sparsityConstraint:
# gradientW = T.grad(sparsityCost, batchTrainer.weights)
# delta -= self.sparsityRegularization * gradientW
if self.rmsprop:
meanW = 0.9 * batchTrainer.oldMeanW + 0.1 * delta ** 2
wUpdate = factorLr * batchLearningRate * delta / T.sqrt(meanW + 1e-8)
updates.append((batchTrainer.oldMeanW, meanW))
else:
wUpdate = factorLr * batchLearningRate * delta
wUpdate -= batchLearningRate * self.weightDecay * batchTrainer.oldDw
updates.append((batchTrainer.weights, batchTrainer.weights + wUpdate))
updates.append((batchTrainer.oldDw, wUpdate + wUpdateMomentum))
visibleBiasDiff = T.sum(batchTrainer.visible - batchTrainer.visibleReconstruction, axis=0)
if self.rmsprop:
meanVis = 0.9 * batchTrainer.oldMeanVis + 0.1 * visibleBiasDiff ** 2
biasVisUpdate = factorLr * batchLearningRate * visibleBiasDiff / T.sqrt(meanVis + 1e-8)
updates.append((batchTrainer.oldMeanVis, meanVis))
else:
biasVisUpdate = factorLr * batchLearningRate * visibleBiasDiff
updates.append((batchTrainer.biasVisible, batchTrainer.biasVisible + biasVisUpdate))
updates.append((batchTrainer.oldDVis, biasVisUpdate + biasVisUpdateMomentum))
hiddenBiasDiff = T.sum(batchTrainer.hiddenActivations - batchTrainer.hiddenReconstruction, axis=0)
# As the paper says, only update the hidden bias
if self.sparsityConstraint:
gradientbiasHid = T.grad(sparsityCost, batchTrainer.biasHidden)
hiddenBiasDiff -= self.sparsityRegularization * gradientbiasHid
if self.rmsprop:
meanHid = 0.9 * batchTrainer.oldMeanHid + 0.1 * hiddenBiasDiff ** 2
biasHidUpdate = factorLr * batchLearningRate * hiddenBiasDiff / T.sqrt(meanHid + 1e-8)
updates.append((batchTrainer.oldMeanHid, meanHid))
else:
biasHidUpdate = factorLr * batchLearningRate * hiddenBiasDiff
updates.append((batchTrainer.biasHidden, batchTrainer.biasHidden + biasHidUpdate))
updates.append((batchTrainer.oldDHid, biasHidUpdate + biasHidUpdateMomentum))
# Add the updates required for the theano random generator
updates += batchTrainer.updates.items()
updates.append((batchTrainer.cdSteps, cdSteps))
return preDeltaUpdates, updates
# Even though this function has no side effects, we need mini batches in
# order to ensure that we do not go out of memory
def hiddenRepresentation(self, dataInstances):
dataInstacesConverted = theano.shared(np.asarray(dataInstances, dtype=theanoFloat))
miniBatchSize = 1000
nrMiniBatches = len(dataInstances) / miniBatchSize + 1
# Symbolic variable for index
index = T.iscalar()
representHidden = theano.function(
inputs=[index],
outputs=self.reconstructer.hiddenActivations,
updates=self.reconstructer.updates,
givens={self.x: dataInstacesConverted[index * miniBatchSize: (index + 1) * miniBatchSize]})
data = np.vstack([representHidden(i) for i in xrange(nrMiniBatches)])
return data
# Even though this function has no side effects, we need mini batches in
# order to ensure that we do not go out of memory
def reconstruct(self, dataInstances, cdSteps=1):
dataInstacesConverted = theano.shared(np.asarray(dataInstances, dtype=theanoFloat))
miniBatchSize = 1000
nrMiniBatches = len(dataInstances) / miniBatchSize + 1
# Symbolic variable for index
index = T.iscalar()
reconstructFunction = theano.function(
inputs=[index],
outputs=self.reconstructer.visibleReconstruction,
updates=self.reconstructer.updates,
givens={self.x: dataInstacesConverted[index * miniBatchSize: (index + 1) * miniBatchSize]})
data = np.vstack([reconstructFunction(i) for i in xrange(nrMiniBatches)])
return data
def reconstructionError(self, dataInstances):
reconstructions = self.reconstruct(dataInstances)
return rmse(reconstructions, dataInstances)
def buildReconstructerForSymbolicVariable(self, x, theanoRng):
reconstructer = ReconstructerBatch(input=x,
theanoGenerator=theanoRng,
weights=self.sharedWeights,
biases=self.sharedBiases,
visibleActivationFunction=self.visibleActivationFunction,
hiddenActivationFunction=self.hiddenActivationFunction,
visibleDropout=self.visibleDropout,
hiddenDropout=self.hiddenDropout,
cdSteps=1)
return reconstructer
def initializeWeights(nrVisible, nrHidden):
return np.asarray(np.random.uniform(
low=-4 * np.sqrt(6. / (nrHidden + nrVisible)),
high=4 * np.sqrt(6. / (nrHidden + nrVisible)),
size=(nrVisible, nrHidden)), dtype=theanoFloat)
# return np.asarray(np.random.normal(0, 0.01, (nrVisible, nrHidden)), dtype=theanoFloat)
# This only works for stochastic binary units
def intializeBiasesBinary(data, nrHidden):
# get the percentage of data points that have the i'th unit on
# and set the visible bias to log (p/(1-p))
percentages = data.mean(axis=0, dtype=theanoFloat)
vectorized = np.vectorize(safeLogFraction, otypes=[np.float32])
visibleBiases = vectorized(percentages)
hiddenBiases = np.zeros(nrHidden, dtype=theanoFloat)
return np.array([visibleBiases, hiddenBiases])
# TODO: Try random small numbers?
def initializeBiasesReal(nrVisible, nrHidden):
visibleBiases = np.zeros(nrVisible, dtype=theanoFloat)
hiddenBiases = np.zeros(nrHidden, dtype=theanoFloat)
return np.array([visibleBiases, hiddenBiases])
def testWeights(weights, visibleDropout, hiddenDropout):
return weights.T * hiddenDropout, weights * visibleDropout
| |
# -*- coding: utf-8 -*-
# Copyright 2017 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import decimal
from gnocchiclient import client as gclient
from gnocchiclient import exceptions as gexceptions
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from cloudkitty.collector import validate_conf
from cloudkitty.storage.v1.hybrid.backends import BaseHybridBackend
import cloudkitty.utils as ck_utils
from cloudkitty.utils import json
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('period', 'cloudkitty.collector', 'collect')
GNOCCHI_STORAGE_OPTS = 'storage_gnocchi'
gnocchi_storage_opts = [
cfg.StrOpt('interface',
default='internalURL',
help='endpoint url type'),
cfg.StrOpt('archive_policy_name',
default='rating',
help='Gnocchi storage archive policy name.'),
# The archive policy definition MUST include the collect period granularity
cfg.StrOpt('archive_policy_definition',
default='[{"granularity": '
+ str(CONF.collect.period) +
', "timespan": "90 days"}, '
'{"granularity": 86400, "timespan": "360 days"}, '
'{"granularity": 2592000, "timespan": "1800 days"}]',
help='Gnocchi storage archive policy definition.'), ]
CONF.register_opts(gnocchi_storage_opts, GNOCCHI_STORAGE_OPTS)
ks_loading.register_session_conf_options(
CONF,
GNOCCHI_STORAGE_OPTS)
ks_loading.register_auth_conf_options(
CONF,
GNOCCHI_STORAGE_OPTS)
RESOURCE_TYPE_NAME_ROOT = 'rating_service_'
METADATA_NAME_ROOT = 'ckmeta_'
class UnknownResourceType(Exception):
"""Exception raised when an unknown resource type is encountered"""
def __init__(self, resource_type):
super(UnknownResourceType, self).__init__(
'Unknown resource type {}'.format(resource_type)
)
class GnocchiStorage(BaseHybridBackend):
"""Gnocchi backend for hybrid storage.
"""
groupby_keys = ['res_type', 'tenant_id']
groupby_values = ['type', 'project_id']
def _init_resource_types(self):
for metric_name, metric in self.conf.items():
metric_dict = dict()
metric_dict['attributes'] = list()
for attribute in metric.get('metadata', {}):
metric_dict['attributes'].append(
METADATA_NAME_ROOT + attribute)
metric_dict['required_attributes'] = ['unit', 'resource_id']
for attribute in metric['groupby']:
metric_dict['required_attributes'].append(
METADATA_NAME_ROOT + attribute)
metric_dict['name'] = RESOURCE_TYPE_NAME_ROOT + metric['alt_name']
if metric['mutate'] == 'NUMBOOL':
metric_dict['qty_metric'] = 1
else:
metric_dict['qty_metric'] = metric_name
self._resource_type_data[metric['alt_name']] = metric_dict
def _get_res_type_dict(self, res_type):
res_type_data = self._resource_type_data.get(res_type, None)
if not res_type_data:
return None
attribute_dict = dict()
for attribute in res_type_data['attributes']:
attribute_dict[attribute] = {
'required': False,
'type': 'string',
}
for attribute in res_type_data['required_attributes']:
attribute_dict[attribute] = {
'required': True,
'type': 'string',
}
return {
'name': res_type_data['name'],
'attributes': attribute_dict,
}
def _create_resource(self, res_type, tenant_id, data):
res_type_data = self._resource_type_data.get(res_type, None)
if not res_type_data:
raise UnknownResourceType(
"Unknown resource type '{}'".format(res_type))
res_dict = {
'id': data['id'],
'resource_id': data['id'],
'project_id': tenant_id,
'user_id': 'cloudkitty',
'unit': data['unit'],
}
for key in ['attributes', 'required_attributes']:
for attr in res_type_data[key]:
if METADATA_NAME_ROOT in attr:
res_dict[attr] = data.get(
attr.replace(METADATA_NAME_ROOT, ''), None) or ''
if isinstance(res_dict[attr], decimal.Decimal):
res_dict[attr] = float(res_dict[attr])
created_metrics = [
self._conn.metric.create({
'name': metric,
'archive_policy_name':
CONF.storage_gnocchi.archive_policy_name,
}) for metric in ['price', res_type]
]
metrics_dict = dict()
for metric in created_metrics:
metrics_dict[metric['name']] = metric['id']
res_dict['metrics'] = metrics_dict
try:
return self._conn.resource.create(res_type_data['name'], res_dict)
except gexceptions.ResourceAlreadyExists:
res_dict['id'] = uuidutils.generate_uuid()
return self._conn.resource.create(res_type_data['name'], res_dict)
def _get_resource(self, resource_type, resource_id):
try:
resource_name = self._resource_type_data[resource_type]['name']
except KeyError:
raise UnknownResourceType(
"Unknown resource type '{}'".format(resource_type))
try:
return self._conn.resource.get(resource_name, resource_id)
except gexceptions.ResourceNotFound:
return None
def _find_resource(self, resource_type, resource_id):
try:
resource_type = self._resource_type_data[resource_type]['name']
except KeyError:
raise UnknownResourceType(
"Unknown resource type '{}'".format(resource_type))
query = {
'=': {
'resource_id': resource_id,
}
}
try:
return self._conn.resource.search(
resource_type=resource_type, query=query, limit=1)[0]
except IndexError:
return None
def _create_resource_type(self, resource_type):
res_type = self._resource_type_data.get(resource_type, None)
if not res_type:
return None
res_type_dict = self._get_res_type_dict(resource_type)
try:
output = self._conn.resource_type.create(res_type_dict)
except gexceptions.ResourceTypeAlreadyExists:
output = None
return output
def _get_resource_type(self, resource_type):
res_type = self._resource_type_data.get(resource_type, None)
if not res_type:
return None
return self._conn.resource_type.get(res_type['name'])
def __init__(self, **kwargs):
super(GnocchiStorage, self).__init__(**kwargs)
conf = kwargs.get('conf') or ck_utils.load_conf(
CONF.collect.metrics_conf)
self.conf = validate_conf(conf)
self.auth = ks_loading.load_auth_from_conf_options(
CONF,
GNOCCHI_STORAGE_OPTS)
self.session = ks_loading.load_session_from_conf_options(
CONF,
GNOCCHI_STORAGE_OPTS,
auth=self.auth)
self._conn = gclient.Client(
'1',
session=self.session,
adapter_options={'connect_retries': 3,
'interface': CONF.storage_gnocchi.interface})
self._archive_policy_name = (
CONF.storage_gnocchi.archive_policy_name)
self._archive_policy_definition = json.loads(
CONF.storage_gnocchi.archive_policy_definition)
self._period = kwargs.get('period') or CONF.collect.period
self._measurements = dict()
self._resource_type_data = dict()
self._init_resource_types()
def commit(self, tenant_id, state):
if not self._measurements.get(tenant_id, None):
return
commitable_measurements = dict()
for metrics in self._measurements[tenant_id].values():
for metric_id, measurements in metrics.items():
if measurements:
measures = list()
for measurement in measurements:
measures.append(
{
'timestamp': state,
'value': measurement,
}
)
commitable_measurements[metric_id] = measures
if commitable_measurements:
self._conn.metric.batch_metrics_measures(commitable_measurements)
del self._measurements[tenant_id]
def init(self):
try:
self._conn.archive_policy.get(self._archive_policy_name)
except gexceptions.ArchivePolicyNotFound:
ck_archive_policy = {}
ck_archive_policy['name'] = self._archive_policy_name
ck_archive_policy['back_window'] = 0
ck_archive_policy['aggregation_methods'] \
= ['std', 'count', 'min', 'max', 'sum', 'mean']
ck_archive_policy['definition'] = self._archive_policy_definition
self._conn.archive_policy.create(ck_archive_policy)
for service in self._resource_type_data.keys():
try:
self._get_resource_type(service)
except gexceptions.ResourceTypeNotFound:
self._create_resource_type(service)
def get_total(self, begin=None, end=None, tenant_id=None,
service=None, groupby=None):
# Query can't be None if we don't specify a resource_id
query = {'and': [{
'like': {'type': RESOURCE_TYPE_NAME_ROOT + '%'},
}]}
if tenant_id:
query['and'].append({'=': {'project_id': tenant_id}})
gb = []
if groupby:
for elem in groupby.split(','):
if elem in self.groupby_keys:
gb.append(self.groupby_values[
self.groupby_keys.index(elem)])
# Setting gb to None instead of an empty list
gb = gb if len(gb) > 0 else None
# build aggregration operation
op = ['aggregate', 'sum', ['metric', 'price', 'sum']]
try:
aggregates = self._conn.aggregates.fetch(
op,
start=begin,
stop=end,
groupby=gb,
search=query)
# No 'price' metric found
except gexceptions.BadRequest:
return [dict(begin=begin, end=end, rate=0)]
# In case no group_by was specified
if not isinstance(aggregates, list):
aggregates = [aggregates]
total_list = list()
for aggregate in aggregates:
if groupby:
measures = aggregate['measures']['measures']['aggregated']
else:
measures = aggregate['measures']['aggregated']
if len(measures) > 0:
rate = sum(measure[2] for measure in measures
if (measure[1] == self._period))
total = dict(begin=begin, end=end, rate=rate)
if gb:
for value in gb:
key = self.groupby_keys[
self.groupby_values.index(value)]
total[key] = aggregate['group'][value].replace(
RESOURCE_TYPE_NAME_ROOT, '')
total_list.append(total)
return total_list
def _append_measurements(self, resource, data, tenant_id):
if not self._measurements.get(tenant_id, None):
self._measurements[tenant_id] = {}
measurements = self._measurements[tenant_id]
if not measurements.get(resource['id'], None):
measurements[resource['id']] = {
key: list() for key in resource['metrics'].values()
}
for metric_name, metric_id in resource['metrics'].items():
measurement = data.get(metric_name, None)
if measurement is not None:
measurements[resource['id']][metric_id].append(
float(measurement)
if isinstance(measurement, decimal.Decimal)
else measurement)
def append_time_frame(self, res_type, frame, tenant_id):
flat_frame = ck_utils.flat_dict(frame)
resource = self._find_resource(res_type, flat_frame['id'])
if not resource:
resource = self._create_resource(res_type, tenant_id, flat_frame)
self._append_measurements(resource, flat_frame, tenant_id)
def get_tenants(self, begin, end):
query = {'like': {'type': RESOURCE_TYPE_NAME_ROOT + '%'}}
r = self._conn.metric.aggregation(
metrics='price',
query=query,
start=begin,
stop=end,
aggregation='sum',
granularity=self._period,
needed_overlap=0,
groupby='project_id')
projects = list()
for measures in r:
projects.append(measures['group']['project_id'])
return projects
@staticmethod
def _get_time_query(start, end, resource_type, tenant_id=None):
query = {'and': [{
'or': [
{'=': {'ended_at': None}},
{'<=': {'ended_at': end}}
]
},
{'>=': {'started_at': start}},
{'=': {'type': resource_type}},
]
}
if tenant_id:
query['and'].append({'=': {'project_id': tenant_id}})
return query
def _get_resources(self, resource_type, start, end, tenant_id=None):
"""Returns the resources of the given type in the given period"""
return self._conn.resource.search(
resource_type=resource_type,
query=self._get_time_query(start, end, resource_type, tenant_id),
details=True)
def _format_frame(self, res_type, resource, desc, measure, tenant_id):
res_type_info = self._resource_type_data.get(res_type, None)
if not res_type_info:
return dict()
start = measure[0]
stop = start + datetime.timedelta(seconds=self._period)
# Getting price
price = decimal.Decimal(measure[2])
price_dict = {'price': float(price)}
# Getting vol
if isinstance(res_type_info['qty_metric'], str):
try:
qty = self._conn.metric.get_measures(
resource['metrics'][res_type_info['qty_metric']],
aggregation='sum',
start=start, stop=stop,
refresh=True)[-1][2]
except IndexError:
qty = 0
else:
qty = res_type_info['qty_metric']
vol_dict = {'qty': decimal.Decimal(qty), 'unit': resource['unit']}
# Period
period_dict = {
'begin': ck_utils.dt2iso(start),
'end': ck_utils.dt2iso(stop),
}
# Formatting
res_dict = dict()
res_dict['desc'] = desc
res_dict['vol'] = vol_dict
res_dict['rating'] = price_dict
res_dict['tenant_id'] = tenant_id
return {
'usage': {res_type: [res_dict]},
'period': period_dict,
}
def resource_info(self, resource_type, start, end, tenant_id=None):
"""Returns a dataframe for the given resource type"""
try:
res_type_info = self._resource_type_data.get(resource_type, None)
resource_name = res_type_info['name']
except (KeyError, AttributeError):
raise UnknownResourceType(resource_type)
attributes = res_type_info['attributes'] \
+ res_type_info['required_attributes']
output = list()
query = self._get_time_query(start, end, resource_name, tenant_id)
measures = self._conn.metric.aggregation(
metrics='price',
resource_type=resource_name,
query=query,
start=start,
stop=end,
granularity=self._period,
aggregation='sum',
needed_overlap=0,
groupby=['type', 'id'],
)
for resource_measures in measures:
resource_desc = None
resource = None
for measure in resource_measures['measures']:
if not resource_desc:
resource = self._get_resource(
resource_type, resource_measures['group']['id'])
if not resource:
continue
desc = {attr.replace(METADATA_NAME_ROOT, ''):
resource.get(attr, None) for attr in attributes}
formatted_frame = self._format_frame(
resource_type, resource, desc, measure, tenant_id)
output.append(formatted_frame)
return output
def get_time_frame(self, begin, end, **filters):
tenant_id = filters.get('tenant_id', None)
resource_types = [filters.get('res_type', None)]
if not resource_types[0]:
resource_types = self._resource_type_data.keys()
output = list()
for resource_type in resource_types:
output += self.resource_info(resource_type, begin, end, tenant_id)
return output
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A luigi file system client that wraps around snakebite
Originally written by Alan Brenner <alan@magnetic.com> github.com/alanbbr
"""
from luigi.contrib.hdfs import config as hdfs_config
from luigi.contrib.hdfs import abstract_client as hdfs_abstract_client
from luigi import six
import luigi.contrib.target
import logging
import datetime
import os
logger = logging.getLogger('luigi-interface')
class SnakebiteHdfsClient(hdfs_abstract_client.HdfsFileSystem):
"""
A hdfs client using snakebite. Since Snakebite has a python API, it'll be
about 100 times faster than the hadoop cli client, which does shell out to
a java program on each file system operation.
"""
def __init__(self):
super(SnakebiteHdfsClient, self).__init__()
self._bite = None
self.pid = -1
@staticmethod
def list_path(path):
if isinstance(path, list) or isinstance(path, tuple):
return path
# TODO: Should this be:
# isinstance(path, (six.text_type, six.binary_type))?
if isinstance(path, six.string_types):
return [path, ]
return [str(path), ]
def get_bite(self):
"""
If Luigi has forked, we have a different PID, and need to reconnect.
"""
config = hdfs_config.hdfs()
if self.pid != os.getpid() or not self._bite:
client_kwargs = dict(filter(
lambda k_v: k_v[1] is not None and k_v[1] != '', six.iteritems({
'hadoop_version': config.client_version,
'effective_user': config.effective_user,
})
))
if config.snakebite_autoconfig:
"""
This is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well.
This client tries to read ``${HADOOP_PATH}/conf/hdfs-site.xml`` to get the address of the namenode.
The behaviour is the same as Client.
"""
from snakebite.client import AutoConfigClient
self._bite = AutoConfigClient(**client_kwargs)
else:
from snakebite.client import Client
self._bite = Client(config.namenode_host, config.namenode_port, **client_kwargs)
return self._bite
def exists(self, path):
"""
Use snakebite.test to check file existence.
:param path: path to test
:type path: string
:return: boolean, True if path exists in HDFS
"""
return self.get_bite().test(path, exists=True)
def move(self, path, dest):
"""
Use snakebite.rename, if available.
:param path: source file(s)
:type path: either a string or sequence of strings
:param dest: destination file (single input) or directory (multiple)
:type dest: string
:return: list of renamed items
"""
parts = dest.rstrip('/').split('/')
if len(parts) > 1:
dir_path = '/'.join(parts[0:-1])
if not self.exists(dir_path):
self.mkdir(dir_path, parents=True)
return list(self.get_bite().rename(self.list_path(path), dest))
def rename_dont_move(self, path, dest):
"""
Use snakebite.rename_dont_move, if available.
:param path: source path (single input)
:type path: string
:param dest: destination path
:type dest: string
:return: True if succeeded
:raises: snakebite.errors.FileAlreadyExistsException
"""
from snakebite.errors import FileAlreadyExistsException
try:
self.get_bite().rename2(path, dest, overwriteDest=False)
except FileAlreadyExistsException:
# Unfortunately python2 don't allow exception chaining.
raise luigi.target.FileAlreadyExists()
def remove(self, path, recursive=True, skip_trash=False):
"""
Use snakebite.delete, if available.
:param path: delete-able file(s) or directory(ies)
:type path: either a string or a sequence of strings
:param recursive: delete directories trees like \*nix: rm -r
:type recursive: boolean, default is True
:param skip_trash: do or don't move deleted items into the trash first
:type skip_trash: boolean, default is False (use trash)
:return: list of deleted items
"""
return list(self.get_bite().delete(self.list_path(path), recurse=recursive))
def chmod(self, path, permissions, recursive=False):
"""
Use snakebite.chmod, if available.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param permissions: \*nix style permission number
:type permissions: octal
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
if type(permissions) == str:
permissions = int(permissions, 8)
return list(self.get_bite().chmod(self.list_path(path),
permissions, recursive))
def chown(self, path, owner, group, recursive=False):
"""
Use snakebite.chown/chgrp, if available.
One of owner or group must be set. Just setting group calls chgrp.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param owner: new owner, can be blank
:type owner: string
:param group: new group, can be blank
:type group: string
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
bite = self.get_bite()
if owner:
if group:
return all(bite.chown(self.list_path(path), "%s:%s" % (owner, group),
recurse=recursive))
return all(bite.chown(self.list_path(path), owner, recurse=recursive))
return list(bite.chgrp(self.list_path(path), group, recurse=recursive))
def count(self, path):
"""
Use snakebite.count, if available.
:param path: directory to count the contents of
:type path: string
:return: dictionary with content_size, dir_count and file_count keys
"""
try:
res = self.get_bite().count(self.list_path(path)).next()
dir_count = res['directoryCount']
file_count = res['fileCount']
content_size = res['spaceConsumed']
except StopIteration:
dir_count = file_count = content_size = 0
return {'content_size': content_size, 'dir_count': dir_count,
'file_count': file_count}
def copy(self, path, destination):
"""
Raise a NotImplementedError exception.
"""
raise NotImplementedError("SnakebiteClient in luigi doesn't implement copy")
def put(self, local_path, destination):
"""
Raise a NotImplementedError exception.
"""
raise NotImplementedError("Snakebite doesn't implement put")
def get(self, path, local_destination):
"""
Use snakebite.copyToLocal, if available.
:param path: HDFS file
:type path: string
:param local_destination: path on the system running Luigi
:type local_destination: string
"""
return list(self.get_bite().copyToLocal(self.list_path(path),
local_destination))
def mkdir(self, path, parents=True, mode=0o755, raise_if_exists=False):
"""
Use snakebite.mkdir, if available.
Snakebite's mkdir method allows control over full path creation, so by
default, tell it to build a full path to work like ``hadoop fs -mkdir``.
:param path: HDFS path to create
:type path: string
:param parents: create any missing parent directories
:type parents: boolean, default is True
:param mode: \*nix style owner/group/other permissions
:type mode: octal, default 0755
"""
result = list(self.get_bite().mkdir(self.list_path(path),
create_parent=parents, mode=mode))
if raise_if_exists and "ile exists" in result[0].get('error', ''):
raise luigi.target.FileAlreadyExists("%s exists" % (path, ))
return result
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False,
recursive=False):
"""
Use snakebite.ls to get the list of items in a directory.
:param path: the directory to list
:type path: string
:param ignore_directories: if True, do not yield directory entries
:type ignore_directories: boolean, default is False
:param ignore_files: if True, do not yield file entries
:type ignore_files: boolean, default is False
:param include_size: include the size in bytes of the current item
:type include_size: boolean, default is False (do not include)
:param include_type: include the type (d or f) of the current item
:type include_type: boolean, default is False (do not include)
:param include_time: include the last modification time of the current item
:type include_time: boolean, default is False (do not include)
:param recursive: list subdirectory contents
:type recursive: boolean, default is False (do not recurse)
:return: yield with a string, or if any of the include_* settings are
true, a tuple starting with the path, and include_* items in order
"""
bite = self.get_bite()
for entry in bite.ls(self.list_path(path), recurse=recursive):
if ignore_directories and entry['file_type'] == 'd':
continue
if ignore_files and entry['file_type'] == 'f':
continue
rval = [entry['path'], ]
if include_size:
rval.append(entry['length'])
if include_type:
rval.append(entry['file_type'])
if include_time:
rval.append(datetime.datetime.fromtimestamp(entry['modification_time'] / 1000))
if len(rval) > 1:
yield tuple(rval)
else:
yield rval[0]
def touchz(self, path):
"""
Raise a NotImplementedError exception.
"""
raise NotImplementedError("SnakebiteClient in luigi doesn't implement touchz")
| |
#!/usr/bin/env python
"""Unit tests for sitemap_gen.py, a script for generating sitemaps
for a web server.
"""
# Please be careful that all syntax used in this file can be parsed on
# Python 1.5 -- this version check is not evaluated until after the
# entire file has been parsed.
import sys
if sys.hexversion < 0x02020000:
print 'This script requires Python 2.2 or later.'
print 'Currently run with version: %s' % sys.version
sys.exit(1)
import binascii
import fnmatch
import gzip
import os
import tempfile
import unittest
import xml.dom.minidom
import sitemap_gen
class URLCounter(object):#1
"""Counts returned URLs, determines how many valid v. invalid we get.
This is a helper for consuming what the many Input* objects produce."""
def __init__(self, root, print_invalid, expected):
"""Input:
root :: root URL for calling the URL's own Validate()
print_invalid :: print to output all invalid URLs
expected :: sequence of wildcard filters to validate against
"""
self._root = root
self._print = print_invalid
self._expected = expected
self._valid = 0
self._invalid = 0
#end def __init__
def Reset(self):
"""Reset our counts without harming the validity filters."""
self._valid = 0
self._invalid = 0
#end def Reset
def Valid(self):
"""Returns number of valid URLs."""
return self._valid
#end def Valid
def Invalid(self):
"""Returns number of invalid URLs."""
return self._invalid
#end def Valid
def Count(self, url, allow_fragment):
"""The 'please consume this URL' function called by the URL producer."""
valid = True
if valid:
valid = url.Validate(self._root, allow_fragment)
if valid:
for filter in self._expected:
valid = fnmatch.fnmatchcase(url.loc, filter)
if valid:
break
if valid:
self._valid = self._valid + 1
else:
if self._print:
url.Log(prefix='URLCounter', level=0)
self._invalid = self._invalid + 1
#end def Count
#endfold
class TestSiteMap(unittest.TestCase):#1
"""Tests the sitemap_gen application."""
def testTimestampISO8601(self):
""" Checks whether the TimestampISO8601 function works. """
self.assertEqual(sitemap_gen.TimestampISO8601(23),
'1970-01-01T00:00:23Z')
self.assertEqual(sitemap_gen.TimestampISO8601(549876543),
'1987-06-05T07:29:03Z')
#end def testTimestampISO8601
def testExpandPathAttribute(self):
""" Verifies our path globbing function works. """
temppath = tempfile.mktemp()
tempwild = tempfile.tempdir
if tempwild:
tempwild = tempwild + os.sep
tempwild = tempwild + '*'
try:
open(temppath, 'w').close()
dict1 = {}
dict2 = {'alpha' : 'beta', 'path' : 'DoesNotExist987654321.xyz'}
dict3 = {'alpha' : 'beta', 'path' : tempwild}
res1 = sitemap_gen.ExpandPathAttribute(dict1, 'path')
res2 = sitemap_gen.ExpandPathAttribute(dict2, 'path')
res3 = sitemap_gen.ExpandPathAttribute(dict3, 'path')
self.assertEqual(len(res1), 1)
self.assertEqual(res1[0], dict1)
self.assertEqual(len(res2), 1)
self.assertEqual(res2[0], dict2)
self.assert_(len(res3) >= 1)
anymatch = False
for res in res3:
path = res['path']
if path.find(temppath) >= 0:
anymatch = True
self.assertEqual(res['alpha'], 'beta')
self.assert_(anymatch)
finally:
os.unlink(temppath)
#end def testExpandPathAttribute
def testEncoder(self):
""" Tests minimal functionality of the learning Unicode codec """
ENC_UTF8 = 'UTF-8'
ENC_LATIN1 = 'ISO-8859-1'
ENC_CYRILLIC = 'ISO-8859-5'
STR1_LATIN1 = 'has an ' + binascii.a2b_hex('FC') + 'mlat'
STR1_UTF8 = 'has an ' + binascii.a2b_hex('C3BC') + 'mlat'
STR1_UCS2 = 'has an ' + unichr(252) + 'mlat'
STR2_LATIN1 = 'DRAGON' + binascii.a2b_hex('A7') + '!'
STR2_CYRILLIC = 'DRAGON' + binascii.a2b_hex('FD') + '!'
STR2_UCS2 = 'DRAGON' + unichr(167) + '!'
# Spawn our own encoder instance so we don't abuse the module one.
encoder = sitemap_gen.Encoder()
# Convert Latin-1 to UTF-8, by way of Unicode
encoder.SetUserEncoding(ENC_LATIN1)
self.assertEqual(encoder.WidenText(STR1_LATIN1, None), STR1_UCS2)
self.assertEqual(encoder.NarrowText(STR1_UCS2, ENC_UTF8), STR1_UTF8)
# Test learning. STR1 has no Cyrillic equivalent, STR2 just changes.
encoder.SetUserEncoding(None)
encoder._learned = []
self.assertEqual(encoder.WidenText(STR2_CYRILLIC, ENC_CYRILLIC), STR2_UCS2)
self.assertEqual(encoder.WidenText(STR2_CYRILLIC, None), STR2_UCS2)
self.assertEqual(encoder.NarrowText(STR1_UCS2, None), STR1_UTF8)
self.assert_(not encoder._learned)
self.assertEqual(encoder.NarrowText(STR1_UCS2, ENC_LATIN1), STR1_LATIN1)
self.assertEqual(encoder.NarrowText(STR1_UCS2, None), STR1_LATIN1)
self.assertEqual(encoder.NarrowText(STR2_UCS2, None), STR2_LATIN1)
#end def testEncoder
def testURL(self):
""" Vigorously tests our URL attribute processing. """
# Test the IsAbsolute method
self.assert_(sitemap_gen.URL.IsAbsolute('http://a.b.c/d/e.txt?f=g#h'))
self.assert_(sitemap_gen.URL.IsAbsolute('http://a.b.c'))
self.assert_(not sitemap_gen.URL.IsAbsolute('http:///d/e.txt?f=g#h'))
self.assert_(not sitemap_gen.URL.IsAbsolute('http:a.b.c/d/e.txt?f=g#h'))
self.assert_(not sitemap_gen.URL.IsAbsolute('a.b.c/d/e.txt?f=g#h'))
self.assert_(not sitemap_gen.URL.IsAbsolute('/d/e.txt?f=g#h'))
# Canonicalize our base URL
BASE_R = 'http://www.example.com/f' + binascii.a2b_hex('F6F6') + '/'
BASE_C = 'http://www.example.com/f%F6%F6/'
sitemap_gen.encoder.SetUserEncoding('ISO-8859-1')
self.assertEqual(sitemap_gen.URL.Canonicalize(BASE_R), BASE_C)
# Test how canonicalization handles pre-quoted values
self.assertEqual(sitemap_gen.URL.Canonicalize(
'http://www.example.com/my%25thing'),
'http://www.example.com/my%25thing')
self.assertEqual(sitemap_gen.URL.Canonicalize(
'http://www.example.com/my%thing'),
'http://www.example.com/my%25thing')
# Test IDNA encoding
# The generator can only do the "right thing" on Python 2.3 or higher
warn = sitemap_gen.output.num_warns
if sys.hexversion >= 0x02030000:
self.assertEqual(sitemap_gen.URL.Canonicalize(
'http://www.' + unichr(252) + 'mlat.com/' + unichr(252) + 'mlat.txt'),
'http://www.xn--mlat-zra.com/%FCmlat.txt')
self.assertEqual(sitemap_gen.output.num_warns, warn)
else:
self.assertEqual(sitemap_gen.URL.Canonicalize(
'http://www.' + unichr(252) + 'mlat.com/' + unichr(252) + 'mlat.txt'),
'http://www.%FCmlat.com/%FCmlat.txt')
self.assertEqual(sitemap_gen.output.num_warns, warn + 2)
# All valid data
warn = sitemap_gen.output.num_warns
url1 = sitemap_gen.URL()
url1.TrySetAttribute('loc', BASE_R + 'bar.html')
url1.TrySetAttribute('lastmod', '1987-06-05T07:29:03Z')
url1.TrySetAttribute('changefreq', 'daily')
url1.TrySetAttribute('priority', '0.3')
self.assert_(url1.Validate(BASE_C, True))
self.assertEqual(sitemap_gen.output.num_warns, warn)
# Valid ref, all else invalid
warn = sitemap_gen.output.num_warns
url2 = sitemap_gen.URL()
url2.TrySetAttribute('loc', BASE_C + 'bar.html')
url2.TrySetAttribute('lastmod', 'June 1, 2005')
url2.TrySetAttribute('changefreq', 'every second')
url2.TrySetAttribute('priority', 'infinite')
url2.TrySetAttribute('badattr', 'Nope!')
self.assert_(url2.Validate(BASE_C, True))
self.assertEqual(sitemap_gen.output.num_warns, warn + 4)
# Two URLs with same ref should compare equal
self.assertEqual(url1, url2)
# A ref not based
warn = sitemap_gen.output.num_warns
url3 = sitemap_gen.URL()
url3.TrySetAttribute('loc', 'http://www.example.com/bar/foo.html')
self.assert_(not url3.Validate(BASE_C, True))
self.assertEqual(sitemap_gen.output.num_warns, warn + 1)
# A fragmentary URL
warn = sitemap_gen.output.num_warns
url4 = sitemap_gen.URL()
url4.TrySetAttribute('loc', '/foo.html')
self.assert_(not url4.Validate(BASE_C, False))
self.assertEqual(sitemap_gen.output.num_warns, warn + 1)
url4.TrySetAttribute('loc', '/xyzzy/foo.html')
self.assert_(url4.Validate('http://www.example.com/', True))
self.assertEqual(url4.loc, 'http://www.example.com/xyzzy/foo.html')
self.assertEqual(sitemap_gen.output.num_warns, warn + 1)
# Test a whole sequence of good and bad timestamp values
timestamps_good = [
'2001',
'2001-01',
'2001-01-02',
'2001-01-03T01:02Z',
'2001-01-03T01:02:03Z',
'2001-01-03T01:02:03.0123Z',
'2001-01-03T01:02+00:00',
'2001-01-03T01:02:03-99:99',
'2001-01-03T01:02:03.0123+88:88',
]
timestamps_bad = [
'2001:01:03T01:02Z',
'2001-01-03T01:02:03.Z',
'a2001-01-06T01:02:05-99:99',
'2001-01-06T01:02:05-99:99Z',
'2001-1-6T01:02:05-99:99',
'xyzzy',
'2001-01-03T01:02:03.1.2Z',
]
warn = sitemap_gen.output.num_warns
url3.TrySetAttribute('loc', BASE_C + 'foo.html')
for ts in timestamps_good:
url3.TrySetAttribute('lastmod', ts)
self.assert_(url3.Validate(BASE_C, True))
self.assertEqual(sitemap_gen.output.num_warns, warn)
for ts in timestamps_bad:
url3.TrySetAttribute('lastmod', ts)
self.assert_(url3.Validate(BASE_C, True))
self.assertEqual(sitemap_gen.output.num_warns, warn + len(timestamps_bad))
#end def testURL
def testFilter(self):
""" Test the filtering object """
url1 = sitemap_gen.URL()
url2 = sitemap_gen.URL()
url1.TrySetAttribute('loc', 'http://www.example.com/foo/bar.html')
url2.TrySetAttribute('loc', 'http://www.example.com/bar/foo.html')
url1.Validate('http://www.example.com', True)
url2.Validate('http://www.example.com', True)
# Arguments
error = sitemap_gen.output.num_errors
args_bad = [
{},
{'pattern' : '*', 'type' : 'unknown'},
{'pattern' : '*', 'type' : 'wildcard', 'action' : 'look pretty'},
{'pattern' : '*', 'type' : 'regexp'},
]
error = sitemap_gen.output.num_errors
for args in args_bad:
sitemap_gen.Filter(args)
self.assertEqual(sitemap_gen.output.num_errors, error + len(args_bad))
# Wildcard
filt_w = sitemap_gen.Filter({'pattern' : '*/foo/*', 'type' : 'wildcard' })
self.assertEqual(filt_w.Apply(url1), False)
self.assertEqual(filt_w.Apply(url2), None)
# Regexp
filt_r = sitemap_gen.Filter({'pattern' : '/bar/[^/]+$', 'type' : 'REGEXP',
'action' : 'PASS'})
self.assertEqual(filt_r.Apply(url1), None)
self.assertEqual(filt_r.Apply(url2), True)
#end def testFilter
def Count(self, url, allow_fragment):
if url.Validate('http://www.example.com/', allow_fragment):
self.valid_urls = self.valid_urls + 1
else:
self.invalid_urls = self.invalid_urls + 1
#end def Count
valid_urls = 0
invalid_urls = 0
def testInputURL(self):
""" Test one of the Input mechanisms: InputURL """
# Feed a couple URLs. Make sure we get an error on extra attributes.
self.valid_urls = 0
self.invalid_urls = 0
error = sitemap_gen.output.num_errors
warn = sitemap_gen.output.num_warns
generator1 = sitemap_gen.InputURL({'href' : 'http://www.example.com/1',
'priority' : '0.3',
'lastmod' : '2004-11-14T01:00-07:00',
'changefreq' : 'hourly',
'unknownInURL' : 'attribute'})
generator2 = sitemap_gen.InputURL({'href' : 'http://www.example.com/2',
'priority' : '0.3',
'lastmod' : '2004-11-14T01:00-07:00',
'changefreq' : 'hourly'})
generator1.ProduceURLs(self.Count)
generator2.ProduceURLs(self.Count)
self.assertEqual(self.valid_urls, 1)
self.assertEqual(self.invalid_urls, 0)
self.assertEqual(sitemap_gen.output.num_errors, error + 1)
self.assertEqual(sitemap_gen.output.num_warns, warn)
#end def testInputURL
def testInputURLList(self):
""" Test one of the Input mechanisms: InputURLList """
path = tempfile.mktemp()
file = open(path, 'w')
try:
# Create a temp file we can read
testText = """
http://www.example.com/foo/bar unknownInURLList=attribute
http://www.example.com/foo/xxx.pdf lastmod=2003-12-31T14:05:06+00:00
http://www.example.com/foo/yyy?x=12&y=23 changefreq=weekly priority=0.3
"""
file.write(testText)
file.close()
# Feed in the data. Make sure we get a warning on the bad attribute.
self.valid_urls = 0
self.invalid_urls = 0
warn = sitemap_gen.output.num_warns
generator = sitemap_gen.InputURLList({'path' : path})
generator.ProduceURLs(self.Count)
self.assertEqual(self.valid_urls, 3)
self.assertEqual(self.invalid_urls, 0)
self.assertEqual(sitemap_gen.output.num_warns, warn + 1)
finally:
os.unlink(path)
#end def testInputURLList
def testInputDirectory(self):
"""Test one of the Input mechanisms: InputDirectory.
I've seen a subtle path-bug appear when going into sub-sub-directories
that didn't under just sub-directories. So we go to the trouble to
make a whole little directory tree to read.
"""
counter = URLCounter('http://www.example.com/', True, (
'http://www.example.com/',
'http://www.example.com/one.html',
'http://www.example.com/two.html',
'http://www.example.com/xyzzy/',
'http://www.example.com/xyzzy/thr.html',
'http://www.example.com/xyzzy/zyxxy/',
'http://www.example.com/xyzzy/zyxxy/fiv.html',
))
path = tempfile.mktemp()
subpath = os.path.join(path, 'xyzzy')
subsubpath = os.path.join(subpath, 'zyxxy')
try:
# Create some dummy empty files
os.mkdir(path)
os.mkdir(subpath)
os.mkdir(subsubpath)
path_one = os.path.join(path, 'one.html')
path_two = os.path.join(path, 'two.html')
path_thr = os.path.join(subpath, 'thr.html')
path_for = os.path.join(subpath, 'default.html')
path_fiv = os.path.join(subsubpath, 'fiv.html')
open(path_one, 'w').close()
open(path_two, 'w').close()
open(path_thr, 'w').close()
open(path_for, 'w').close()
open(path_fiv, 'w').close()
# Feed in the data. There should be no warnings.
warn = sitemap_gen.output.num_warns
generator = sitemap_gen.InputDirectory({'path' : path,
'url' : 'http://www.example.com/', 'default_file' : 'default.html'},
'http://www.example.com/')
generator.ProduceURLs(counter.Count)
self.assertEqual(counter.Valid(), 7)
self.assertEqual(counter.Invalid(), 0)
self.assertEqual(sitemap_gen.output.num_warns, warn)
finally:
os.unlink(path_one)
os.unlink(path_two)
os.unlink(path_thr)
os.unlink(path_for)
os.unlink(path_fiv)
os.rmdir(subsubpath)
os.rmdir(subpath)
os.rmdir(path)
#end def testInputDirectory
def testInputAccessLogCLF(self):
""" Test one of the Input mechanisms: InputAccessLog (Common logfile) """
path = tempfile.mktemp()
file = open(path, 'w')
try:
# Create a temp file we can read
testText = '''
msnbot.msn.com - - [15/May/2005:07:46:50 -0700] "GET /~guest/main/ HTTP/1.0" 200 5670
221.216.237.71 - - [15/May/2005:07:59:25 -0700] "GET /~guest/bookmark/ HTTP/1.1" 200 39195
221.216.237.71 - - [15/May/2005:07:59:27 -0700] "GET /favicon.ico HTTP/1.1" 404 217
c-67-161-121-105.hsd1.wa.comcast.net - - [15/May/2005:11:17:23 -0700] "GET /picts/top.jpg HTTP/1.1" 200 10044
cpe-65-24-155-46.columbus.res.rr.com - - [16/May/2005:22:53:07 -0700] "HEAD http://www.example.com/~guest HTTP/1.1" 200 0
'''
file.write(testText)
file.close()
# Feed in the data
self.valid_urls = 0
self.invalid_urls = 0
warn = sitemap_gen.output.num_warns
generator = sitemap_gen.InputAccessLog({'path' : path})
generator.ProduceURLs(self.Count)
self.assertEqual(self.valid_urls, 4)
self.assertEqual(self.invalid_urls, 0)
self.assertEqual(sitemap_gen.output.num_warns, warn)
finally:
os.unlink(path)
#end def testInputAccessLogCLF
def testInputAccessLogELF(self):
""" Test one of the Input mechanisms: InputAccessLog (Extended logfile) """
path = tempfile.mktemp()
file = open(path, 'w')
try:
# Create a temp file we can read
testText = '''
#Software: Microsoft Internet Information Services 6.0
#Version: 1.0
#Date: 2004-03-22 09:20:36
#Fields: date time s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) sc-status sc-substatus sc-w
in32-status
2004-03-22 09:20:36 192.168.0.58 GET /Default.htm - 80 - 4.5.11.3 Mozilla/4.0+(compatible;+MSIE+5.5;+Windows+98) 200 0 64
2004-03-22 09:22:58 192.168.0.58 GET /Default.htm - 80 - 24.87.160.82 Mozilla/4.0+(compatible;+MSIE+5.5;+Windows+98) 200 0 6
4
'''
file.write(testText)
file.close()
# Feed in the data
self.valid_urls = 0
self.invalid_urls = 0
warn = sitemap_gen.output.num_warns
generator = sitemap_gen.InputAccessLog({'path' : path})
generator.ProduceURLs(self.Count)
self.assertEqual(self.valid_urls, 2)
self.assertEqual(self.invalid_urls, 0)
self.assertEqual(sitemap_gen.output.num_warns, warn)
finally:
os.unlink(path)
#end def testInputAccessLogELF
def testInputSitemap(self):
""" Test one of the Input mechanisms: InputSitemap """
path1 = tempfile.mktemp('.xml')
path2 = tempfile.mktemp('.xml')
path3 = tempfile.mktemp('.xml')
path4 = tempfile.mktemp('.xml')
file1 = None
file2 = None
file3 = None
file4 = None
index = '''<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex
xmlns="http://www.google.com/schemas/sitemap/0.84"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84
http://www.google.com/schemas/sitemap/0.84/siteindex.xsd">
<sitemap>
<loc>http://www.example.com/path/to/%(PATH2)s</loc>
<lastmod>2005-07-15T17:41:22Z</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/path/to/%(PATH3)s</loc>
<lastmod>2005-07-15T17:41:22Z</lastmod>
</sitemap>
</sitemapindex>
'''
content1 = '''<?xml version="1.0" encoding="UTF-8"?>
<urlset
xmlns="http://www.google.com/schemas/sitemap/0.84"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84
http://www.google.com/schemas/sitemap/0.84/sitemap.xsd">
<url>
<loc>http://www.example.com/another/path/to/samplefile1.html</loc>
<lastmod>2005-07-13T00:00:12Z</lastmod>
<priority>0.5000</priority>
</url>
<url>
<loc>http://www.example.com/another/path/to/samplefile2.html</loc>
<lastmod>2004-11-16T20:22:06Z</lastmod>
<priority>0.5000</priority>
</url>
</urlset>
'''
content2 = '''<?xml version="1.0" encoding="UTF-8"?>
<urlset
xmlns="http://www.google.com/schemas/sitemap/0.84"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84
http://www.google.com/schemas/sitemap/0.84/sitemap.xsd">
<url badSitemapAttr="Hello, World!">
<loc>http://www.example.com/another/path/to/samplefile3.html</loc>
<lastmod>2005-07-13T00:00:12Z</lastmod>
<priority>0.5000</priority>
</url>
<url>
<loc>http://www.example.com/another/path/to/samplefile4.html</loc>
<lastmod>2004-11-16T20:22:06Z</lastmod>
<priority>0.5000</priority>
</url>
</urlset>
'''
# This index is invalid because it points to another index file.
badindex = '''<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex
xmlns="http://www.google.com/schemas/sitemap/0.84"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84
http://www.google.com/schemas/sitemap/0.84/siteindex.xsd">
<sitemap>
<loc>http://www.example.com/path/to/%(PATH2)s</loc>
<lastmod>2005-07-15T17:41:22Z</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/path/to/%(PATH1)s</loc>
<lastmod>2005-07-15T17:41:22Z</lastmod>
</sitemap>
</sitemapindex>
'''
# Make a nice complicated set of two index files and two sitemaps.
try:
file1 = open(path1, 'wt')
file2 = open(path2, 'wt')
file3 = open(path3, 'wt')
file4 = open(path4, 'wt')
file1.write(index % {
'PATH1' : os.path.basename(path1),
'PATH2' : os.path.basename(path2),
'PATH3' : os.path.basename(path3)})
file2.write(content1)
file3.write(content2)
file4.write(badindex % {
'PATH1' : os.path.basename(path1),
'PATH2' : os.path.basename(path2),
'PATH3' : os.path.basename(path3)})
file1.close()
file1 = None
file2.close()
file2 = None
file3.close()
file3 = None
file4.close()
file4 = None
# Feed in the good data. Make sure we get warned on the bad attribute.
self.valid_urls = 0
self.invalid_urls = 0
warn = sitemap_gen.output.num_warns
generator = sitemap_gen.InputSitemap({'path' : path1})
generator.ProduceURLs(self.Count)
self.assertEqual(self.valid_urls, 4)
self.assertEqual(self.invalid_urls, 0)
self.assertEqual(sitemap_gen.output.num_warns, warn + 1)
# Feed in the bad data. Should error once on the bad index and once
# because it aborts processing the XML.
self.valid_urls = 0
self.invalid_urls = 0
errors = sitemap_gen.output.num_errors
generator = sitemap_gen.InputSitemap({'path' : path4})
generator.ProduceURLs(self.Count)
self.assertEqual(self.valid_urls, 2)
self.assertEqual(self.invalid_urls, 0)
self.assertEqual(sitemap_gen.output.num_errors, errors + 2)
finally:
if file1 is not None:
file1.close()
if file2 is not None:
file2.close()
if file3 is not None:
file3.close()
if os.path.exists(path1):
os.unlink(path1)
if os.path.exists(path2):
os.unlink(path2)
if os.path.exists(path3):
os.unlink(path3)
#end def testInputSitemap
def testFilePathGenerator(self):
""" Test our iteration of filenames """
gen1 = sitemap_gen.FilePathGenerator()
gen2 = sitemap_gen.FilePathGenerator()
gen3 = sitemap_gen.FilePathGenerator()
self.assert_(gen1.Preload('/tmp/bar/foo.xml'))
self.assert_(gen2.Preload('foo.xml.gz'))
self.assert_(gen3.Preload('/foo.gz'))
self.assert_(not gen1.is_gzip)
self.assert_( gen2.is_gzip)
self.assert_( gen3.is_gzip)
self.assertEqual(gen1.GeneratePath(0),
os.path.normpath('/tmp/bar/foo.xml'))
self.assertEqual(gen2.GeneratePath(1),'foo1.xml.gz')
self.assertEqual(gen1.GeneratePath('_index.xml'),
os.path.normpath('/tmp/bar/foo_index.xml'))
self.assertEqual(gen1.GenerateURL('_index.xml', 'http://www.example.com/'),
'http://www.example.com/foo_index.xml')
self.assertEqual(gen1.GenerateURL(2, 'http://www.example.com/'),
'http://www.example.com/foo2.xml')
self.assertEqual(gen2.GenerateWildURL('http://www.example.com/'),
'http://www.example.com/foo*.xml.gz')
#end def testFilePathGenerator
def testSitemap(self):
"""Test a basic config of the overall sitemap class."""
path1 = tempfile.mktemp()
path2 = tempfile.mktemp(".xml.gz")
file = open(path1, 'w')
try:
# Create a temp file we can read
testText = '''<?xml version="1.0" encoding="UTF-8"?>
<site
base_url="http://www.example.com/"
store_into="%(OUTPUTFILENAME)s"
default_encoding="UTF-8"
verbose="3"
>
<url href="http://www.example.com/.htaccess" />
<url href="http://www.example.com/foo/bar.html" />
<url href="http://www.example.com/foo/bar.gif" />
<url href="http://www.example.com/foo/bar.html" />
<url href="http://www.example.com/percent%%%%percent.html" />
<url href="http://www.example.com/ümlat.html" />
<filter action="drop" type="regexp" pattern="/\.[^/]*$" />
</site>
'''
file.write(testText % {'OUTPUTFILENAME' : path2})
file.close()
# Bring up the engine
warn = sitemap_gen.output.num_warns
error = sitemap_gen.output.num_errors
sitemap = sitemap_gen.CreateSitemapFromFile(path1, True)
self.assert_(sitemap)
sitemap.Generate()
self.assertEqual(sitemap_gen.output.num_warns, warn)
self.assertEqual(sitemap_gen.output.num_errors, error)
# Verify we got readable XML out of it
file = gzip.open(path2, mode='rb')
result = file.read()
file.close()
dom = xml.dom.minidom.parseString(result)
self.assertEqual(len(dom.getElementsByTagName('url')), 4)
self.assert_(result.find('http://www.example.com/foo/bar.html') > 0)
self.assert_(result.find('http://www.example.com/foo/bar.gif') > 0)
self.assert_(result.find('%25%25') > 0)
self.assert_(result.find('%C3%BC') > 0)
finally:
if os.path.exists(path2):
os.unlink(path2)
os.unlink(path1)
#end def testSitemap
#endfold
if __name__ == '__main__':#1
unittest.main()
| |
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'make', 'wget', 'curl']
if args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install Docker.', file=sys.stderr)
sys.exit(1)
else:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin-core/gitian.sigs.git'])
if not os.path.isdir('bitcoin-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin-core/bitcoin-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('fujicoin'):
subprocess.check_call(['git', 'clone', 'https://github.com/fujicoin/fujicoin.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'focal', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc', '--disksize', '13000']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_focal and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
sys.exit(0)
def build():
global args, workdir
os.makedirs('fujicoin-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-O', 'inputs/osslsigncode-2.0.tar.gz', 'https://github.com/mtrojnar/osslsigncode/archive/2.0.tar.gz'])
subprocess.check_call(["echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../fujicoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'fujicoin='+args.commit, '--url', 'fujicoin='+args.url, '../fujicoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../fujicoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/fujicoin-*.tar.gz build/out/src/fujicoin-*.tar.gz ../fujicoin-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'fujicoin='+args.commit, '--url', 'fujicoin='+args.url, '../fujicoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../fujicoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/fujicoin-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/fujicoin-*.zip build/out/fujicoin-*.exe build/out/src/fujicoin-*.tar.gz ../fujicoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'fujicoin='+args.commit, '--url', 'fujicoin='+args.url, '../fujicoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../fujicoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/fujicoin-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/fujicoin-*.tar.gz build/out/fujicoin-*.dmg build/out/src/fujicoin-*.tar.gz ../fujicoin-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/fujicoin-' + args.version + '-win-unsigned.tar.gz inputs/fujicoin-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../fujicoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../fujicoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/fujicoin-*win64-setup.exe ../fujicoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/fujicoin-' + args.version + '-osx-unsigned.tar.gz inputs/fujicoin-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../fujicoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../fujicoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/fujicoin-osx-signed.dmg ../fujicoin-binaries/'+args.version+'/fujicoin-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
rc = 0
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../fujicoin/contrib/gitian-descriptors/gitian-linux.yml']):
print('Verifying v'+args.version+' Linux FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../fujicoin/contrib/gitian-descriptors/gitian-win.yml']):
print('Verifying v'+args.version+' Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../fujicoin/contrib/gitian-descriptors/gitian-osx.yml']):
print('Verifying v'+args.version+' MacOS FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Signed Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../fujicoin/contrib/gitian-descriptors/gitian-win-signer.yml']):
print('Verifying v'+args.version+' Signed Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Signed MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../fujicoin/contrib/gitian-descriptors/gitian-osx-signer.yml']):
print('Verifying v'+args.version+' Signed MacOS FAILED\n')
rc = 1
os.chdir(workdir)
return rc
def main():
global args, workdir
parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/fujicoin/fujicoin', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.is_focal = b'focal' in subprocess.check_output(['lsb_release', '-cs'])
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
# Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
# can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
os.environ['USE_LXC'] = ''
os.environ['USE_VBOX'] = ''
os.environ['USE_DOCKER'] = ''
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
if args.setup:
setup()
if args.buildsign:
args.build = True
args.sign = True
if not args.build and not args.sign and not args.verify:
sys.exit(0)
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
script_name = os.path.basename(sys.argv[0])
if not args.signer:
print(script_name+': Missing signer')
print('Try '+script_name+' --help for more information')
sys.exit(1)
if not args.version:
print(script_name+': Missing version')
print('Try '+script_name+' --help for more information')
sys.exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = args.version
os.chdir('fujicoin')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/fujicoin')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
os.chdir('gitian-builder')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
sys.exit(verify())
if __name__ == '__main__':
main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
class TimeDistributedTest(test.TestCase):
def test_timedistributed_dense(self):
# first, test with Dense layer
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
# test config
model.get_config()
def test_timedistributed_static_batch_size(self):
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4), batch_size=10))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
def test_timedistributed_conv2d(self):
# test with Conv2D
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Conv2D(5, (2, 2), padding='same'),
input_shape=(2, 4, 4, 3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5)))
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_timedistributed_stacked(self):
# test stacked layers
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 3)),
epochs=1,
batch_size=10)
def test_regularizers(self):
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2, kernel_regularizer='l1'),
input_shape=(3, 4)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
self.assertEqual(len(model.losses), 1)
def test_TimeDistributed_learning_phase(self):
with self.test_session():
# test layers that need learning_phase to be set
np.random.seed(1234)
x = keras.layers.Input(shape=(3, 2))
y = keras.layers.TimeDistributed(
keras.layers.Dropout(.999))(x, training=True)
model = keras.models.Model(x, y)
y = model.predict(np.random.random((10, 3, 2)))
self.assertAllClose(np.mean(y), 0., atol=1e-1, rtol=1e-1)
def test_TimeDistributed_batchnorm(self):
with self.test_session():
# test that wrapped BN updates still work.
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.BatchNormalization(center=True, scale=True),
name='bn',
input_shape=(10, 2)))
model.compile(optimizer='rmsprop', loss='mse')
# Assert that mean and variance are 0 and 1.
td = model.layers[0]
self.assertAllClose(td.get_weights()[2], np.array([0, 0]))
assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Train
model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
# Assert that mean and variance changed.
assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Verify input_map has one mapping from inputs to reshaped inputs.
self.assertEqual(len(td._input_map.keys()), 1)
class BidirectionalTest(test.TestCase):
def test_bidirectional(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.test_session():
for mode in ['sum', 'concat', 'ave', 'mul']:
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(timesteps, dim)))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test compute output shape
ref_shape = model.layers[-1].output.get_shape()
shape = model.layers[-1]._compute_output_shape(
(None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_bidirectional_weight_loading(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.test_session():
x = np.random.random((samples, timesteps, dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), input_shape=(timesteps, dim)))
y_ref = model.predict(x)
weights = model.layers[-1].get_weights()
model.layers[-1].set_weights(weights)
y = model.predict(x)
self.assertAllClose(y, y_ref)
def test_bidirectional_stacked(self):
# test stacked bidirectional layers
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.test_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.layers.Input((timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
def test_bidirectional_statefulness(self):
# Bidirectional and stateful
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.test_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
inputs = keras.layers.Input(batch_shape=(1, timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
if __name__ == '__main__':
test.main()
| |
# -*- coding: utf-8 -*-
# unicum
# ------
# Python library for simple object cache and factory.
#
# Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]
# Version: 0.3, copyright Wednesday, 18 September 2019
# Website: https://github.com/sonntagsgesicht/unicum
# License: Apache License 2.0 (see LICENSE file)
import logging
import sys
import os
from datetime import datetime
from json import dumps, loads, JSONEncoder
from copy import copy, deepcopy
from unittest import TestCase, TestLoader, TextTestRunner
from os import getcwd
sys.path.append('.')
sys.path.append('..')
from unicum import SingletonObject
from unicum import FactoryObject, ObjectList, LinkedObject
from unicum import PersistentObject, PersistentList, PersistentDict, AttributeList, UnicumJSONEncoder
from unicum import VisibleObject, VisibleAttributeList
from unicum import DataRange
from unicum import SessionHandler
_property_order = ["Name", "Class", "Module", "Currency", "Origin", "Notional"]
#logging.basicConfig()
def _silent(func, *args, **kwargs):
_stout = sys.stdout
sys.stdout = open(os.devnull, 'w')
_res = func(*args, **kwargs)
sys.stdout.close()
sys.stdout = _stout
return _res
class _TestDummy(object):
pass
class SingletonTest(TestCase):
def setUp(self):
class SingeltonDummy(_TestDummy, SingletonObject):
pass
self.Constant = SingeltonDummy
def test_singleton(self):
c = self.Constant()
d = self.Constant()
self.assertEqual(c, d)
class FactoryTest(TestCase):
def setUp(self):
class Currency(FactoryObject):
__factory = dict()
def __init__(self, name=None):
name = self.__class__.__name__ if name is None else name
super(Currency, self).__init__(name)
self._vp_ = self.__class__.__name__
self.Currency = Currency
class EUR(Currency):
@property
def vp(self):
return self._vp_
self.EUR = EUR
class USD(Currency):
@property
def vp(self):
return self._vp_
self.USD = USD
class CurrencyList(ObjectList):
def __init__(self, iterable=None):
super(CurrencyList, self).__init__(iterable, Currency)
def __add__(self, other):
return CurrencyList(super(CurrencyList, self).__add__(other))
def __iadd__(self, other):
return CurrencyList(super(CurrencyList, self).__iadd__(other))
self.CurrencyList = CurrencyList
class Interpolation(FactoryObject):
__factory = dict()
class FactoryDummy(_TestDummy, Interpolation):
pass
self.FactoryDummy = FactoryDummy
class AnotherFactoryDummy(_TestDummy, Interpolation):
pass
self.AnotherFactoryDummy = AnotherFactoryDummy
class FactoryDummySubClass(AnotherFactoryDummy):
pass
self.FactoryDummySubClass = FactoryDummySubClass
def test_first_factory(self):
e = self.FactoryDummy().register()
f = self.AnotherFactoryDummy().register()
n = self.FactoryDummySubClass().register()
g = n.__class__(str(f))
self.assertTrue(e is not f)
self.assertTrue(g is f)
def test_second_factory(self):
# test FactoryObject
eur = self.EUR().register()
self.EUR().register('eur')
usd = self.USD().register()
self.assertTrue(eur is not usd)
neur = self.USD('EUR')
self.assertTrue(eur is neur)
self.assertTrue(self.USD('EUR').vp is 'EUR')
self.assertTrue(self.USD('USD').vp is 'USD')
fEUR = self.Currency('EUR')
feur = self.Currency('eur')
self.assertTrue(type(eur) is type(fEUR))
self.assertTrue(eur is feur)
self.assertTrue(fEUR is feur)
for x, y in zip(list(self.EUR.values()), list(self.USD.values())):
self.assertTrue(x is y)
for ek, (k, v), ev in zip(list(self.EUR.keys()), list(self.USD.items()), list(self.Currency.values())):
self.assertTrue(ek is k)
self.assertTrue(v is ev)
self.Currency('EUR').remove()
self.assertTrue(len(list(self.EUR.items())) is 1)
self.assertTrue(list(self.EUR.values())[0] is usd)
self.assertTrue(usd in list(self.Currency.values()))
self.assertTrue(eur not in list(self.Currency.values()))
# NamedObject -> create item by self.__class__(obj_name)
# NamedObject by SingletonObject
# NamedObject from FactoryObject
# Idempotent, Strong or SingletonNamedObject
# e.g. __factory, singleton subs
ccy = self.EUR().register()
self.assertTrue(ccy == ccy.__class__(ccy.to_serializable()))
self.assertTrue(ccy == ccy.__class__(ccy.__str__()))
self.assertTrue(ccy == ccy.__class__(ccy))
self.assertTrue(ccy == self.Currency(ccy))
# Equivalent, Weak or SimpleNamedObject
# e.g. BusinessDate, BusinessPeriod
ccy = self.EUR()
self.assertTrue(str(ccy) is str(ccy.__class__(ccy.__str__())))
self.assertTrue(str(ccy) is str(ccy.__class__(ccy.__str__())))
def test_register(self):
# test FactoryObject
eur = self.EUR().register()
self.assertTrue('EUR' in list(self.EUR.keys()))
names = 'eur', 'Eur', 'EURO', 'euro'
self.EUR().register(*names)
keys = list(self.EUR.keys())
for n in names:
self.assertTrue(n in keys)
def test_mixed(self):
usd = self.USD().register()
const = self.FactoryDummy().register()
self.assertTrue(usd in list(self.USD.values()))
self.assertTrue(usd not in list(self.FactoryDummy.values()))
self.assertTrue(const not in list(self.USD.values()))
self.assertTrue(const in list(self.FactoryDummy.values()))
def test_objectList(self):
eur, usd = self.EUR().register(), self.USD().register()
l = (eur, usd)
o = ObjectList(l, self.Currency)
o = self.CurrencyList(o)
self.assertTrue(o == o.__class__(l))
self.assertTrue(o == o.__class__(o))
self.assertTrue(o == o.__class__(o.to_serializable()))
for x, y in zip(l, o.to_serializable()):
self.assertTrue(type(y) is str)
self.assertTrue(str(x) is y)
const = self.FactoryDummy().register()
ol = (lambda x: ObjectList(x, object_type=self.Currency))
self.assertRaises(TypeError, ol, (eur, const))
eur.register('eur')
usd.register('usd')
o[0] = eur
self.assertTrue(eur in o)
o.pop(0)
self.assertTrue(eur not in o)
o[0] = 'eur'
self.assertTrue(eur in o)
o.pop(0)
o.insert(0, eur)
self.assertTrue(eur in o)
o.pop(0)
o.insert(0, 'eur')
self.assertTrue(eur in o)
o.pop(0)
o.append(eur)
self.assertTrue(eur in o)
o.pop(0)
o.append('eur')
self.assertTrue(eur in o)
o.pop(0)
o.extend([eur])
self.assertTrue(eur in o)
o.pop(0)
o.extend(['eur'])
self.assertTrue(eur in o)
o.pop(0)
# slices removed with migration to python 3
# o[0:0] = [eur]
# self.assertTrue(eur in o)
# o.pop(0)
# o[0:0] = ['eur']
# self.assertTrue(eur in o)
b = o + ['eur']
self.assertTrue(isinstance(b, self.CurrencyList))
self.assertTrue(eur in b)
b = [eur] + o
self.assertTrue(not isinstance(b, ObjectList))
self.assertTrue(eur in b)
def test_get_item(self):
eur, usd = self.EUR().register(), self.USD().register()
l = (eur, usd)
o = ObjectList(l, self.Currency)
o = self.CurrencyList(o)
self.assertEqual(o['EUR'], eur)
self.assertTrue(eur in o)
self.assertEqual(o.index(eur), 0)
self.assertEqual(o.get(eur), eur)
self.assertEqual(o.get('EUR'), eur)
class MyLO(LinkedObject):
def __init__(self):
super(MyLO, self).__init__()
self.property = LinkedObject()
class YourLO(LinkedObject):
def __init__(self):
super(YourLO, self).__init__()
self.property = 'It is yours.'
class LinkedTest(TestCase):
def test_object_link(self):
# test FactoryObject
# ------------
m = MyLO()
n = MyLO()
self.assertTrue(m.property is not n.property)
self.assertTrue(len(LinkedObject._get_links()) is 1)
self.assertTrue(m.property.__class__.__name__ in LinkedObject._get_links())
self.assertTrue(n.property.__class__.__name__ in LinkedObject._get_links())
m.property = n.property
self.assertTrue(m.property is n.property)
self.assertTrue(len(LinkedObject._get_links()) is 1)
self.assertTrue(m.property.__class__.__name__ in LinkedObject._get_links())
self.assertTrue(n.property.__class__.__name__ in LinkedObject._get_links())
z = YourLO()
y = YourLO()
m.property = n.property = y
self.assertTrue(m.property is y)
self.assertTrue(m.property is not z)
self.assertTrue(m.property is n.property)
self.assertTrue(len(LinkedObject._get_links()) is 1)
self.assertTrue(m.property.__class__.__name__ in LinkedObject._get_links())
self.assertTrue(n.property.__class__.__name__ in LinkedObject._get_links())
z.update_link()
self.assertTrue(m.property is not y)
self.assertTrue(m.property is z)
self.assertTrue(m.property is n.property)
self.assertTrue(len(LinkedObject._get_links()) is 1)
self.assertTrue(m.property.__class__.__name__ in LinkedObject._get_links())
self.assertTrue(n.property.__class__.__name__ in LinkedObject._get_links())
class MyPO(PersistentObject):
def __init__(self):
super(MyPO, self).__init__()
self._my_property_ = 'It is mine.'
class YourPO(PersistentObject):
def __init__(self):
super(YourPO, self).__init__()
self._your_property_ = 'It is yours.'
class PersistentTest(TestCase):
def test_obj(self):
p = MyPO()
self.assertTrue(type(p) is MyPO)
self.assertTrue(hasattr(p, '_class_'))
def test_visible(self):
for a in ['_class_', 'ObjectName']:
if MyPO._is_visible(a):
self.assertEqual(a, MyPO._to_visible(a))
else:
self.assertEqual(a, MyPO._from_visible(a))
# print a, MyPO._is_visible(a), MyPO._to_visible(a), MyPO._from_visible(a)
def test_obj_to_dict(self):
d = MyPO().to_serializable(all_properties_flag=True)
self.assertTrue('Class' in d)
self.assertTrue(d['MyProperty'] == 'It is mine.')
def test_dict_to_obj(self):
e = {'Class': 'PersistentObject'}
self.assertTrue(type(MyPO.from_serializable(e)) is PersistentObject)
e = {'Class': 'YourPO', 'Module': __name__}
self.assertTrue(MyPO.from_serializable(e).to_serializable()['Class'] == 'YourPO')
def test_modify_obj(self):
q = MyPO()
q.modify_object('Property', 'Hello World.')
q.modify_object('MyProperty', 'Hello World.')
self.assertTrue(q._my_property_ == 'Hello World.')
d = q.to_serializable()
self.assertTrue(d['MyProperty'] == 'Hello World.')
# no direct circle assignment
self.assertRaises(ValueError, q.modify_object, 'MyProperty', q)
def test_modify_factory_obj(self):
class MyFactoryObject(FactoryObject):
pass
q = MyPO()
fo = FactoryObject().register()
fo.register('MyObj')
mfo = MyFactoryObject().register()
q._my_property_ = FactoryObject()
self.assertTrue(q._my_property_ == fo)
q.modify_object('MyProperty', 'MyFactoryObject')
self.assertTrue(q._my_property_ == mfo)
q.modify_object('MyProperty', 'MyObj')
self.assertTrue(q._my_property_ == fo)
q.modify_object('MyProperty', MyFactoryObject())
self.assertTrue(q._my_property_ == mfo)
def test_attribute_list(self):
l = MyPO(), YourPO()
a = AttributeList(l)
self.assertTrue(a == a.__class__(l))
self.assertTrue(a == a.__class__(a))
def test_attribute_list2(self):
ol = (lambda x: AttributeList(x, object_type=MyPO))
l = MyPO(), MyPO()
self.assertTrue(len(ol(l)) == 2)
l = YourPO(), MyPO()
self.assertRaises(TypeError, ol, l)
def test_attribute_list3(self):
l = YourPO(), MyPO()
a = AttributeList(l)
m = MyPO()
a[0] = m
self.assertTrue(m in a)
a.pop(0)
self.assertTrue(m not in a)
a.insert(0, m)
self.assertTrue(m in a)
a.pop(0)
self.assertTrue(m not in a)
a.append(m)
self.assertTrue(m in a)
a.pop(-1)
self.assertTrue(m not in a)
a.extend([m])
self.assertTrue(m in a)
a.pop(-1)
self.assertTrue(m not in a)
# slices removed with migration to python 3
# a[0:0] = [m]
# self.assertTrue(m in a)
# a.pop(0)
# self.assertTrue(m not in a)
b = a + [m]
self.assertTrue(isinstance(b, AttributeList))
self.assertTrue(m in b)
b = [m] + a
self.assertTrue(not isinstance(b, AttributeList))
self.assertTrue(m in b)
def test_persistentlist(self):
l = PersistentList(list(range(10)))
l.append(MyPO())
s = l.to_serializable()
self.assertNotEqual(l, s)
for k in range(len(s)):
if hasattr(l[k], 'to_serializable'):
self.assertEqual(s[k], l[k].to_serializable(1))
else:
self.assertEqual(s[k], l[k])
def test_persistentdict(self):
l = PersistentDict({'A': 1, 'B': MyPO(), 'C': 'ABC'})
s = l.to_serializable()
self.assertNotEqual(l, s)
for k in s.keys():
self.assertIn(k, l)
if hasattr(l[k], 'to_serializable'):
self.assertEqual(s[k], l[k].to_serializable(1))
else:
self.assertEqual(s[k], l[k])
class DataRangeTest(TestCase):
def setUp(self):
h = '', 'A', 'B', 'C', 'D'
x = 'X', 1, 2, 3, 4
y = 'Y', 4, 5, 6, 7
z = 'Z', 7, 8, 9, 10
self.datarange = DataRange([h, x, y, z])
pass
def test_init(self):
new_datarange = DataRange(self.datarange.to_serializable())
self.assertEqual(self.datarange.to_serializable(), new_datarange.to_serializable())
new_datarange = DataRange(self.datarange)
self.assertEqual(self.datarange.to_serializable(), new_datarange.to_serializable())
def test_keys(self):
self.assertEqual(self.datarange.col_keys(), ['A', 'B', 'C', 'D'])
self.assertEqual(self.datarange.row_keys(), ['X', 'Y', 'Z'])
def test_get(self):
self.assertEqual(self.datarange.get(('X', 'A')), 1)
self.assertEqual(self.datarange.row('X'), [1, 2, 3, 4])
self.assertEqual(self.datarange.col('D'), [4, 7, 10])
# self.assertEqual(self.datarange[0][-1], 4)
# self.assertEqual(self.datarange[0, -1], 4)
# self.assertEqual(self.datarange['X']['C'], 3)
self.assertEqual(self.datarange['X', 'C'], 3)
def test_slice(self):
self.assertEqual(self.datarange[0:2], self.datarange['X':'Y'])
self.assertEqual(self.datarange[0:1], [[1, 2, 3, 4]])
self.assertEqual(self.datarange[(0, 1):(2, 3)], [[2, 3], [5, 6]])
# self.assertEqual([l[1:3] for l in self.datarange[0:2]], [[2, 3], [5, 6]])
# self.assertEqual([l[1:3] for l in self.datarange['X':'Y']], [[2, 3], [5, 6]])
# self.assertEqual([l['B':'C'] for l in self.datarange['X':'Y']], [[2, 3], [5, 6]])
self.assertEqual(self.datarange[('X', 'B'):('Y', 'C')], [[2, 3], [5, 6]])
self.assertEqual(self.datarange[(None, 'B'):('Y', 'C')], [[2, 3], [5, 6]])
self.assertEqual(self.datarange[(None, 'B'):('Y', None)], [[2, 3, 4], [5, 6, 7]])
def test_flatten(self):
self.assertEqual(DataRange(self.datarange.to_serializable()), self.datarange)
s = "DataRange([[None, 'A', 'B', 'C', 'D'], ['X', 1, 2, 3, 4], ['Y', 4, 5, 6, 7], ['Z', 7, 8, 9, 10]])"
self.assertEqual(str(self.datarange), s)
def test_set(self):
# self.datarange['X']['C'] = 4
# self.assertEqual(self.datarange['X']['C'], 4)
self.datarange['X', 'C'] = 3
self.assertTrue('U' not in self.datarange.row_keys())
self.datarange['U', 'C'] = 4
self.assertTrue('U' in self.datarange.row_keys())
def test_append(self):
self.datarange.append('W', list(range(4)))
l = lambda: self.datarange.row_append('W', list(range(4)))
self.assertRaises(KeyError, l)
self.assertEqual(len(self.datarange), 4)
self.assertEqual(self.datarange.row_keys()[-1], 'W')
self.assertEqual(self.datarange.row('W'), list(range(4)))
self.datarange.row_append('U', list(range(2, 6)))
self.assertEqual(self.datarange.row('U'), list(range(2, 6)))
self.datarange.col_append('T', list(range(5)))
self.assertEqual(self.datarange.col('T'), list(range(5)))
def test_copy(self):
self.assertEqual(self.datarange, copy(self.datarange))
self.assertEqual(type(self.datarange), type(copy(self.datarange)))
self.assertEqual(self.datarange, deepcopy(self.datarange))
self.assertEqual(type(self.datarange), type(deepcopy(self.datarange)))
def test_transpose(self):
# self.assertEqual(type(list(self.datarange)), list)
l = [self.datarange.row(r) for r in self.datarange.row_keys()]
self.assertEqual(self.datarange.item_list, l)
self.assertEqual(type(self.datarange), type(self.datarange.transpose()))
def test_pickle(self):
try:
import dill as pickle
except ImportError:
pass
else:
dr = DataRange()
p = pickle.dumps(dr)
d = pickle.loads(p)
self.assertEqual(type(d), DataRange)
self.assertEqual(dr.to_serializable(), d.to_serializable())
self.assertEqual(type(self.datarange), DataRange)
p = pickle.dumps(self.datarange)
d = pickle.loads(p)
self.assertEqual(self.datarange, d)
def test_json(self):
for i in [None, 0, 1, 2]:
unicum_json = dumps(self.datarange, cls=UnicumJSONEncoder, indent=i)
standard_json = dumps(self.datarange.to_serializable(), indent=i)
unicum_json_2 = dumps(DataRange(loads(standard_json)), cls=UnicumJSONEncoder, indent=i)
standard_json_2 = dumps(DataRange(loads(unicum_json)).to_serializable(), indent=i)
self.assertEqual(unicum_json_2, unicum_json_2)
self.assertEqual(standard_json, standard_json_2)
class MyVO(VisibleObject):
def __init__(self, *args):
super(MyVO, self).__init__(*args)
self._none_prop_ = None
self._str_prop_ = str('my str')
self._int_prop_ = int(100)
self._flt_prop_ = float(99.01)
self._obj_prop_ = VisibleObject('YourVisibleObject')
self._obj_list_prop_ = ObjectList()
self._attr_list_prop_ = AttributeList()
self._data_range_prop_ = DataRange()
@property
def none(self):
return self._none_prop_
@property
def str(self):
return self._str_prop_
@property
def int(self):
return self._int_prop_
@property
def flt(self):
return self._flt_prop_
@property
def obj(self):
return self._obj_prop_
@property
def obj_list(self):
return self._obj_list_prop_
@property
def attr_list(self):
return self._attr_list_prop_
@property
def datarange(self):
return self._data_range_prop_
class VisibleTest(TestCase):
def test_factory(self):
obj = VisibleObject('MyHello')
obj.register()
obj.register('Hello')
self.assertEqual(obj, VisibleObject('Hello'))
o = MyVO._from_class('MyVO', __name__, 'MyTrueVO').register()
self.assertEqual(o, VisibleObject('MyTrueVO'))
s = obj.to_json().replace('MyHello', 'NewHello')
so = VisibleObject.from_json(s).register()
self.assertEqual(so, VisibleObject('NewHello'))
def test_link(self):
obj = MyVO('My')
first = VisibleObject('FirstProperty')
obj.modify_object('ObjProp', first)
second = VisibleObject('FirstProperty')
self.assertEqual(obj.obj, first)
self.assertNotEqual(obj.obj, second)
second.update_link()
self.assertNotEqual(obj.obj, first)
self.assertEqual(obj.obj, second)
def test_persistence(self):
obj = MyVO('My')
dic = obj.to_serializable(all_properties_flag=True)
# test None
self.assertEqual(dic['NoneProp'], obj.none)
# test str
self.assertEqual(dic['StrProp'], obj.str)
# test int
self.assertEqual(dic['IntProp'], obj.int)
# test flt
self.assertEqual(dic['FltProp'], obj.flt)
# test FactoryObject
self.assertEqual(dic['ObjProp'], obj.obj.to_serializable(1))
# test ObjectList
self.assertEqual(dic['ObjListProp'], obj.obj_list.to_serializable(1))
# test AttributeList
self.assertEqual(dic['AttrListProp'], obj.attr_list.to_serializable(1))
# test DataRange
self.assertEqual(dic['DataRangeProp'], obj.datarange.to_serializable(1))
for k, v in list(obj.to_serializable().items()):
# print k.ljust(16), str(type(v)).ljust(20), v
self.assertTrue(isinstance(v, (int, float, str, type(None), list)))
self.assertTrue(obj.to_json())
def test_objlist(self):
names = ['obj' + str(i) for i in range(10)]
l = ObjectList([MyVO(n) for n in names], object_type=VisibleObject)
self.assertEqual(l.to_serializable(1), names)
o = l[0]
self.assertTrue(o in l)
def test_attrlist(self):
names = ['obj' + str(i) for i in range(10)]
l = [VisibleObject(n) for n in names]
self.assertTrue(VisibleAttributeList(l))
l = [object() for n in names]
self.assertRaises(TypeError, VisibleAttributeList, l)
l = [MyVO(n).modify_object('DataRangeProp', DataRange()) for n in names]
self.assertRaises(TypeError, VisibleAttributeList, l)
# accept Attributes with more than depth 2 -> adding list to value typese
t = int, float, str, type(None), VisibleObject, list
a = VisibleAttributeList([o.to_serializable() for o in l], value_types=t)
for r in a:
self.assertTrue(isinstance(r, VisibleObject))
for aa in a:
aa.register()
self.assertEqual(a[0], VisibleObject(a[0].to_serializable(1)))
for r in a.to_serializable():
for c in r:
self.assertTrue(dumps(c)) # JSON to_serializable
self.assertTrue(isinstance(c, (int, float, str, type(None), list)))
obj = MyVO('My')
obj.modify_object('ObjListProp', l)
obj.modify_object('AttrListProp', a)
self.assertTrue(obj.to_json())
def test_from_class(self):
VisibleObject('ME').register()
o = VisibleObject._from_class('VisibleObject', 'unicum', 'ME')
self.assertEqual(o, VisibleObject('ME'))
def test_json(self):
objs = [1, 2, 3], [1e-1, 1e-1, 1e1, 1e3], 0.12345, 99, 'abc', None, int(12345), {'A':3, 3:4, 'a':'B'}
for i in [None, 0, 1, 2]:
for o in objs:
self.assertEqual(UnicumJSONEncoder(indent=i).encode(o), JSONEncoder(indent=i).encode(o))
self.assertEqual(UnicumJSONEncoder(indent=i).encode(o), JSONEncoder(indent=i).encode(o))
rng = DataRange([[' ', 'X'], [1, 2], [0, 1e10]])
obj = MyVO().modify_object('DataRangeProp', rng)
unicum_json = dumps(obj, cls=UnicumJSONEncoder, indent=2, key_order=("Module", "Name", "Class",))
self.assertTrue(unicum_json.find("Module") < unicum_json.find("Name"))
self.assertTrue(unicum_json.find("Name") < unicum_json.find("Class"))
self.assertEqual(len(unicum_json.split("\n")), 2+len(obj.to_serializable())+len(rng)+2)
for i in [None, 0, 1, 2]:
unicum_json = dumps(obj, cls=UnicumJSONEncoder, indent=i)
standard_json = dumps(obj.to_serializable(), indent=i)
unicum_json_2 = dumps(MyVO.from_json(standard_json), cls=UnicumJSONEncoder, indent=i)
standard_json_2 = dumps(MyVO.from_json(unicum_json).to_serializable(), indent=i)
self.assertEqual(unicum_json_2, unicum_json_2)
self.assertEqual(standard_json, standard_json_2)
def test_json_1(self):
e = {'Class': 'VisibleObject', 'Module': 'unicum.visibleobject'}
j = dumps(e)
o = VisibleObject.from_json(j)
self.assertEqual(type(o), VisibleObject)
e = {'Name': 'my vo', 'Class': 'MyVO', 'Module': __name__, 'StrProp': 'It is mine.'}
j = dumps(e, indent=2, sort_keys=True)
o = VisibleObject.from_json(j)
self.assertTrue(type(o) is MyVO)
oj = o.to_json(indent=2, property_order=sorted(e.keys()))
self.assertEqual(oj, j)
self.assertEqual(o.to_serializable(), VisibleObject.from_serializable(e).to_serializable())
class TestVisibleObject(VisibleObject):
def __init__(self, *args, **kwargs):
super(TestVisibleObject, self).__init__(*args, **kwargs)
self._folder_ = ''
self._float_ = 0.
class SessionTest(TestCase):
def test_session(self):
my_session_id = 'my session'
my_object_name = 'my object'
handler = SessionHandler('unittests', 'TestVisibleObject')
self.assertFalse(handler.validate_session(my_session_id))
session_id = handler.start_session(my_session_id)
self.assertEqual(session_id, my_session_id)
self.assertTrue(handler.validate_session(session_id))
self.assertTrue(handler.call_session(session_id, 'create', {'name': my_object_name, 'register_flag': True} ))
rng = handler.call_session(session_id, 'to_range', {'self': my_object_name, 'all_properties_flag': True})
self.assertEqual('Name', rng[0][0])
self.assertEqual(my_object_name, rng[0][1])
self.assertEqual('Class', rng[1][0])
self.assertEqual('TestVisibleObject', rng[1][1])
json = handler.call_session(session_id, 'to_json', {'self': my_object_name, 'all_properties_flag': True})
self.assertTrue(isinstance(json, str))
d = loads(json)
self.assertEqual(d['Name'], my_object_name)
self.assertEqual(d['Class'], 'TestVisibleObject')
self.assertTrue(handler.validate_session(session_id))
handler.stop_session(session_id)
if __name__ == '__main__':
import sys
start_time = datetime.now()
print('')
print('======================================================================')
print('')
print('run %s' % __file__)
print('in %s' % getcwd())
print('started at %s' % str(start_time))
print('')
print('----------------------------------------------------------------------')
print('')
suite = TestLoader().loadTestsFromModule(__import__("__main__"))
testrunner = TextTestRunner(stream=sys.stdout, descriptions=2, verbosity=2)
testrunner.run(suite)
print('')
print('======================================================================')
print('')
print(('ran %s' % __file__))
print(('in %s' % getcwd()))
print(('started at %s' % str(start_time)))
print(('finished at %s' % str(datetime.now())))
print('')
print('----------------------------------------------------------------------')
print('')
| |
from math import *
from legendre import legendre
twopi = 2.*pi
piover2 = .5*pi
ns_max = 8192
ee = exp(1.)
def pix2ang_ring(nside,ipix):
#c=======================================================================
#c gives theta and phi corresponding to pixel ipix (RING)
#c for a parameter nside
#c=======================================================================
# pi = 3.1415926535897932384626434
ns_max=8192
npix = 12*nside*nside
ipix1 = ipix + 1
nl2 = 2*nside
nl4 = 4*nside
ncap = 2*nside*(nside-1)#// ! points in each polar cap, =0 for nside =1
fact1 = 1.5*nside
fact2 = 3.0*nside*nside
if ipix1 <= ncap :# { //! North Polar cap -------------
hip = ipix1/2.
fihip = floor(hip)
iring = int(floor( sqrt( hip - sqrt(fihip) ) ) + 1)#;// ! counted from North pole
iphi = ipix1 - 2*iring*(iring - 1)
theta = acos( 1. - iring*iring / fact2 )
phi = (1.*iphi - 0.5) * pi/(2.*iring)
else:
if ipix1 <= nl2*(5*nside+1):# {//then ! Equatorial region ------
ip = ipix1 - ncap - 1
iring = int(floor( ip / nl4 ) + nside)#;// ! counted from North pole
iphi = int(fmod(ip,nl4) + 1)
fodd = 0.5 * (1 + fmod(float(iring+nside),2))#// ! 1 if iring+nside is odd, 1/2 otherwise
theta = acos( (nl2 - iring) / fact1 )
phi = (1.*iphi - fodd) * pi /(2.*nside)
else:# {//! South Polar cap -----------------------------------
ip = npix - ipix1 + 1
hip = ip/2.
#/* bug corrige floor instead of 1.* */
fihip = floor(hip)
iring = int(floor( sqrt( hip - sqrt(fihip) ) ) + 1)#;// ! counted from South pole
iphi = int((4.*iring + 1 - (ip - 2.*iring*(iring-1))))
theta = acos( -1. + iring*iring / fact2 )
phi = (1.*iphi - 0.5) * pi/(2.*iring)
return theta,phi
def ang2pix_ring(nside,theta,phi):
# c=======================================================================
# c gives the pixel number ipix (RING)
# c corresponding to angles theta and phi
# c=======================================================================
z0=2.0/3.0
ns_max=8192
z = cos(theta)
za = fabs(z)
if phi >= twopi:
phi = phi - twopi
if phi < 0.:
phi = phi + twopi
tt = phi / piover2#;// ! in [0,4)
nl2 = 2*nside
nl4 = 4*nside
ncap = nl2*(nside-1)#// ! number of pixels in the north polar cap
npix = 12*nside*nside
if za <= z0:# {
jp = int(floor(nside*(0.5 + tt - z*0.75)))#; /*index of ascending edge line*/
jm = int(floor(nside*(0.5 + tt + z*0.75)))#; /*index of descending edge line*/
ir = nside + 1 + jp - jm#;// ! in {1,2n+1} (ring number counted from z=2/3)
kshift = 0
if fmod(ir,2)==0.:
kshift = 1#;// ! kshift=1 if ir even, 0 otherwise
ip = int(floor( ( jp+jm - nside + kshift + 1 ) / 2 ) + 1)#;// ! in {1,4n}
if ip>nl4:
ip = ip - nl4
ipix1 = ncap + nl4*(ir-1) + ip
else:
tp = tt - floor(tt)#;// !MOD(tt,1.d0)
tmp = sqrt( 3.*(1. - za) )
jp = int(floor( nside * tp * tmp ))#;// ! increasing edge line index
jm = int(floor( nside * (1. - tp) * tmp ))#;// ! decreasing edge line index
ir = jp + jm + 1#;// ! ring number counted from the closest pole
ip = int(floor( tt * ir ) + 1)#;// ! in {1,4*ir}
if ip>4*ir:
ip = ip - 4*ir
ipix1 = 2*ir*(ir-1) + ip
if z<=0.:
ipix1 = npix - 2*ir*(ir+1) + ip
return ipix1 - 1
def healRDfullsky(res):
#write out positions of pixel centers, for, e.g., plotting purposes
np = 12*res*res
h = healpix()
fo = open('healRDfullsky'+str(res)+'.dat','w')
for i in range(0,np):
th,phi = h.pix2ang_nest(res,i)
ra,dec = thphi2radec(th,phi)
fo.write(str(ra)+' '+str(dec)+'\n')
fo.close()
return True
def pixl_up(file,reso,resn):
h = healpix()
f = open(file+str(reso)+'.dat')
ol = []
for line in f:
ol.append(float(line))
nl = []
np = 12*resn*resn
for i in range(0,np):
nl.append(0)
for i in range(0,len(ol)):
th,phi = h.pix2ang_nest(reso,i)
p = int(h.ang2pix_nest(resn,th,phi))
nl[p] += ol[i]
fo = open(file+str(resn)+'.dat','w')
for i in range(0,np):
fo.write(str(nl[i])+'\n')
fo.close()
return True
def ranpixl_up(file,reso,resn):
h = healpix()
f = open('ranHeal_pix'+str(reso)+file+'.dat')
ol = []
npo = 12*reso*reso
for i in range(0,npo):
ol.append(0)
for line in f:
ln = line.split()
p = int(ln[0])
ol[p] += float(ln[1])
nl = []
np = 12*resn*resn
for i in range(0,np):
nl.append(0)
for i in range(0,len(ol)):
th,phi = h.pix2ang_nest(reso,i)
p = int(h.ang2pix_nest(resn,th,phi))
nl[p] += ol[i]
fo = open('ranHeal_pix'+str(resn)+file+'.dat','w')
for i in range(0,np):
if nl[i] != 0:
fo.write(str(i)+' '+str(nl[i])+'\n')
fo.close()
return True
def mkhealpixl_simp(file,res=256,rc=0,dc=1,zc=2,md='csv'):
h = healpix()
pixl = []
npo = 12*res**2
for i in range(0,npo):
pixl.append(0)
f = open(file+'.'+md)
f.readline()
n = 0
for line in f:
if line[0] != '#':
if md == 'csv':
ln = line.split(',')
else:
ln = line.split()
try:
ra,dec = float(ln[rc]),float(ln[dc])
th,phi = radec2thphi(ra,dec)
p = int(h.ang2pix_nest(res,th,phi))
pixl[p] += 1.
n += 1
except:
pass
print n
fo = open(file+'hpixall'+str(res)+'.dat','w')
for i in range(0,npo):
th,phi = h.pix2ang_nest(res,i)
fo.write(str(pixl[i])+'\n')
fo.close()
return True
def thphi2le(theta,phi):
deg2Rad = pi/180.0
rarad = phi
decrad = -(theta-piover2)
surveyCenterDEC = 32.5
surveyCenterRA = 185.0
etaPole = deg2Rad*surveyCenterDEC
node = deg2Rad*(surveyCenterRA - 90.0)
x = cos(rarad-node)*cos(decrad)
y = sin(rarad-node)*cos(decrad)
z = sin(decrad)
lam = -1.0*asin(x)/deg2Rad
eta = (atan2(z,y) - etaPole)/deg2Rad
if eta < -180.0:
eta += 360.0
if eta > 180.0:
eta -= 360.0
return (lam,eta)
def le2thetaphi(lam,eta):
deg2Rad = pi/180.0
surveyCenterDEC = 32.5
surveyCenterRA = 185.0
etaPole = deg2Rad*surveyCenterDEC
node = deg2Rad*(surveyCenterRA - 90.0)
x = -1.0*sin(lam*deg2Rad)
y = cos(lam*deg2Rad)*cos(eta*deg2Rad+etaPole)
z = cos(lam*deg2Rad)*sin(eta*deg2Rad+etaPole)
ra = (atan2(y,x) + node)
if ra < 0.0:
ra +=twopi
dec = asin(z)
return -dec+piover2,ra
def radec2thphi(ra,dec):
return (-dec+90.)*pi/180.,ra*pi/180.
def thphi2radec(theta,phi):
return 180./pi*phi,-(180./pi*theta-90)
class healpix:
#translated from c by Ashley J. Ross; no guarantees but everything has checked out so far...
def __init__(self):
self.pix2x,self.pix2y = self.mk_pix2xy()
self.x2pix,self.y2pix = self.mk_xy2pix()
def ang2pix_nest(self,nside,theta,phi):
#x2pix,y2pix = mk_xy2pix()
z = cos(theta)
za = fabs(z)
z0 = 2./3.
if phi>=twopi:
phi = phi - twopi
if phi<0.:
phi = phi + twopi
tt = phi / piover2
if za<=z0: #{ /* equatorial region */
#/* (the index of edge lines increase when the longitude=phi goes up) */
jp = int(floor(ns_max*(0.5 + tt - z*0.75)))# /* ascending edge line index */
jm = int(floor(ns_max*(0.5 + tt + z*0.75)))#; /* descending edge line index */
#/* finds the face */
ifp = jp / ns_max#; /* in {0,4} */
ifm = jm / ns_max
if ifp==ifm:
face_num = int(fmod(ifp,4)) + 4#; /* faces 4 to 7 */
else:
if ifp<ifm:
face_num = int(fmod(ifp,4)) #/* (half-)faces 0 to 3 */
else:
face_num = int(fmod(ifm,4)) + 8#; /* (half-)faces 8 to 11 */
ix = int(fmod(jm, ns_max))
iy = ns_max - int(fmod(jp, ns_max)) - 1
else:# { /* polar region, za > 2/3 */
ntt = int(floor(tt))
if ntt>=4:
ntt = 3
tp = tt - ntt
tmp = sqrt( 3.*(1. - za) )#; /* in ]0,1] */
#/* (the index of edge lines increase when distance from the closest pole
# * goes up)
# */
#/* line going toward the pole as phi increases */
jp = int(floor( ns_max * tp * tmp ))
#/* that one goes away of the closest pole */
jm = int(floor( ns_max * (1. - tp) * tmp ))
if jp >= ns_max:
jp = ns_max-1
if jm >= ns_max:
jm = ns_max-1
#jp = int((jp < ns_max-1 ? jp : ns_max-1))
#jm = (int)(jm < ns_max-1 ? jm : ns_max-1);
#/* finds the face and pixel's (x,y) */
if z>=0:# ) {
face_num = ntt#; /* in {0,3} */
ix = ns_max - jm - 1
iy = ns_max - jp - 1
else:
face_num = ntt + 8#; /* in {8,11} */
ix = jp
iy = jm
ix_low = int(fmod(ix,128))
ix_hi = ix/128
iy_low = int(fmod(iy,128))
iy_hi = iy/128
ipf = (self.x2pix[ix_hi]+self.y2pix[iy_hi]) * (128 * 128)+ (self.x2pix[ix_low]+self.y2pix[iy_low]);
ipf = (long)(ipf / pow(ns_max/nside,2))#; /* in {0, nside**2 - 1} */
return ( ipf + face_num*pow(nside,2))#; /* in {0, 12*nside**2 - 1} */
def mk_xy2pix(self):
# /* =======================================================================
# * subroutine mk_xy2pix
# * =======================================================================
# * sets the array giving the number of the pixel lying in (x,y)
# * x and y are in {1,128}
# * the pixel number is in {0,128**2-1}
# *
# * if i-1 = sum_p=0 b_p * 2^p
# * then ix = sum_p=0 b_p * 4^p
# * iy = 2*ix
# * ix + iy in {0, 128**2 -1}
# * =======================================================================
# */
# int i, K,IP,I,J,ID;
x2pix = []
y2pix = []
for i in range(0,128):#(i = 0; i < 127; i++) x2pix[i] = 0;
x2pix.append(0)
y2pix.append(0)
for I in range(1,129):#( I=1;I<=128;I++ ) {
J = I-1#;// !pixel numbers
K = 0#;//
IP = 1#;//
while J!=0:
# truc : if( J==0 ) {
# x2pix[I-1] = K;
# y2pix[I-1] = 2*K;
# }
# else {
ID = int(fmod(J,2))
J = J/2
K = IP*ID+K
IP = IP*4
# goto truc;
x2pix[I-1] = K
y2pix[I-1] = 2*K
return x2pix,y2pix
def mk_pix2xy(self):
# /* =======================================================================
# * subroutine mk_pix2xy
# * =======================================================================
# * constructs the array giving x and y in the face from pixel number
# * for the nested (quad-cube like) ordering of pixels
# *
# * the bits corresponding to x and y are interleaved in the pixel number
# * one breaks up the pixel number by even and odd bits
# * =======================================================================
# */
# int i, kpix, jpix, IX, IY, IP, ID;
pix2x = []
pix2y = []
for i in range(0,1024):
pix2x.append(0)
pix2y.append(0)
# for (i = 0; i < 1023; i++) pix2x[i]=0;
# for( kpix=0;kpix<1024;kpix++ ) {
for kpix in range(0,1024):
jpix = kpix
IX = 0
IY = 0
IP = 1# ;// ! bit position (in x and y)
while jpix!=0:# ){// ! go through all the bits
ID = int(fmod(jpix,2))#;// ! bit value (in kpix), goes in ix
jpix = jpix/2
IX = ID*IP+IX
ID = int(fmod(jpix,2))#;// ! bit value (in kpix), goes in iy
jpix = jpix/2
IY = ID*IP+IY
IP = 2*IP#;// ! next bit (in x and y)
pix2x[kpix] = IX#;// ! in 0,31
pix2y[kpix] = IY#;// ! in 0,31
return pix2x,pix2y
def pix2ang_nest(self,nside, ipix):
# /*
# c=======================================================================
# subroutine pix2ang_nest(nside, ipix, theta, phi)
# c=======================================================================
# c gives theta and phi corresponding to pixel ipix (NESTED)
# c for a parameter nside
# c=======================================================================
# */
#pix2x,pix2y = mk_pix2xy()
jrll = []
jpll = []
for i in range(0,12):
jrll.append(0)
jpll.append(0)
jrll[0]=2
jrll[1]=2
jrll[2]=2
jrll[3]=2
jrll[4]=3
jrll[5]=3
jrll[6]=3
jrll[7]=3
jrll[8]=4
jrll[9]=4
jrll[10]=4
jrll[11]=4
jpll[0]=1
jpll[1]=3
jpll[2]=5
jpll[3]=7
jpll[4]=0
jpll[5]=2
jpll[6]=4
jpll[7]=6
jpll[8]=1
jpll[9]=3
jpll[10]=5
jpll[11]=7
npix = 12 * nside*nside
if ipix < 0 or ipix > npix-1:
return 'ipix out of range'
# /* initiates the array for the pixel number -> (x,y) mapping */
fn = 1.*nside
fact1 = 1./(3.*fn*fn)
fact2 = 2./(3.*fn)
nl4 = 4*nside
# //c finds the face, and the number in the face
npface = nside*nside
face_num = ipix/npface#// ! face number in {0,11}
ipf = int(fmod(ipix,npface))#// ! pixel number in the face {0,npface-1}
# //c finds the x,y on the face (starting from the lowest corner)
# //c from the pixel number
ip_low = int(fmod(ipf,1024))#;// ! content of the last 10 bits
ip_trunc = ipf/1024# ;// ! truncation of the last 10 bits
ip_med = int(fmod(ip_trunc,1024))#;// ! content of the next 10 bits
ip_hi = ip_trunc/1024 #;//! content of the high weight 10 bits
ix = 1024*self.pix2x[ip_hi] + 32*self.pix2x[ip_med] + self.pix2x[ip_low]
iy = 1024*self.pix2y[ip_hi] + 32*self.pix2y[ip_med] + self.pix2y[ip_low]
# //c transforms this in (horizontal, vertical) coordinates
jrt = ix + iy#;// ! 'vertical' in {0,2*(nside-1)}
jpt = ix - iy#;// ! 'horizontal' in {-nside+1,nside-1}
# //c computes the z coordinate on the sphere
# // jr = jrll[face_num+1]*nside - jrt - 1;// ! ring number in {1,4*nside-1}
jr = jrll[face_num]*nside - jrt - 1
# // cout << "face_num=" << face_num << endl;
# // cout << "jr = " << jr << endl;
# // cout << "jrll(face_num)=" << jrll[face_num] << endl;
# // cout << "----------------------------------------------------" << endl;
nr = nside#;// ! equatorial region (the most frequent)
z = (2*nside-jr)*fact2
kshift = int(fmod(jr - nside, 2))
if jr<nside:# { //then ! north pole region
nr = jr
z = 1. - nr*nr*fact1
kshift = 0
else:# {
if jr>3*nside:# {// then ! south pole region
nr = nl4 - jr
z = - 1. + nr*nr*fact1
kshift = 0
theta = acos(z)
# //c computes the phi coordinate on the sphere, in [0,2Pi]
# // jp = (jpll[face_num+1]*nr + jpt + 1 + kshift)/2;// ! 'phi' number in the ring in {1,4*nr}
jp = (jpll[face_num]*nr + jpt + 1 + kshift)/2
if jp>nl4:
jp = jp - nl4
if jp<1:
jp = jp + nl4
phi = (jp - (kshift+1)*0.5) * (piover2 / nr)
return theta,phi
def ring2nest(self,nside,p_ring):
# """ /*
# c=======================================================================
# subroutine ring2nest(nside, ipring, ipnest)
# c=======================================================================
# c conversion from RING to NESTED pixel number
# c=======================================================================
# */
# """
ns_max=8192
# static int x2pix[128], y2pix[128];
# // common /xy2pix/ x2pix,y2pix
jrll = []
jpll = []#;// ! coordinate of the lowest corner of each face
for i in range(0,12):
jrll.append(0)
jpll.append(0)
jrll[0]=2
jrll[1]=2
jrll[2]=2
jrll[3]=2
jrll[4]=3
jrll[5]=3
jrll[6]=3
jrll[7]=3
jrll[8]=4
jrll[9]=4
jrll[10]=4
jrll[11]=4
jpll[0]=1
jpll[1]=3
jpll[2]=5
jpll[3]=7
jpll[4]=0
jpll[5]=2
jpll[6]=4
jpll[7]=6
jpll[8]=1
jpll[9]=3
jpll[10]=5
jpll[11]=7
npix = 12 * nside*nside
# if( ipring<0 || ipring>npix-1 ) {
# fprintf(stderr, "ipring out of range\n");
# exit(0);
# }
if x2pix[127]<=0:
self.mk_xy2pix()
nl2 = 2*nside
nl4 = 4*nside
npix = 12*nside*nside#;// ! total number of points
ncap = 2*nside*(nside-1)#;// ! points in each polar cap, =0 for nside =1
ipring1 = p_ring + 1
# //c finds the ring number, the position of the ring and the face number
if ipring1<=ncap: #//then
hip = ipring1/2.
fihip = int(floor ( hip ))
irn = int(floor( sqrt( hip - sqrt(fihip) ) ) + 1)#;// ! counted from North pole
iphi = ipring1 - 2*irn*(irn - 1);
kshift = 0
nr = irn # ;// ! 1/4 of the number of points on the current ring
face_num = (iphi-1) / irn#;// ! in {0,3}
else:
if ipring1<=nl2*(5*nside+1):# {//then
ip = ipring1 - ncap - 1
irn = int(floor( ip / nl4 ) + nside)#;// ! counted from North pole
iphi = int(fmod(ip,nl4) + 1)
kshift = int(fmod(irn+nside,2))#;// ! 1 if irn+nside is odd, 0 otherwise
nr = nside
ire = irn - nside + 1#;// ! in {1, 2*nside +1}
irm = nl2 + 2 - ire
ifm = (iphi - ire/2 + nside -1) / nside#;// ! face boundary
ifp = (iphi - irm/2 + nside -1) / nside
if ifp==ifm:# {//then ! faces 4 to 7
face_num = int(fmod(ifp,4) + 4)
else:
if ifp + 1==ifm:# {//then ! (half-)faces 0 to 3
face_num = ifp
else:
if ifp - 1==ifm:# {//then ! (half-)faces 8 to 11
face_num = ifp + 7
else:
ip = npix - ipring1 + 1
hip = ip/2.
fihip = floor ( hip )
irs = int(floor( sqrt( hip - sqrt(fihip) ) ) + 1)#;// ! counted from South pole
iphi = 4*irs + 1 - (ip - 2*irs*(irs-1))
kshift = 0
nr = irs
irn = nl4 - irs
face_num = (iphi-1) / irs + 8#;// ! in {8,11}
# //c finds the (x,y) on the face
# // irt = irn - jrll[face_num+1]*nside + 1;// ! in {-nside+1,0}
# // ipt = 2*iphi - jpll[face_num+1]*nr - kshift - 1;// ! in {-nside+1,nside-1}
irt = irn - jrll[face_num]*nside + 1#;// ! in {-nside+1,0}
ipt = 2*iphi - jpll[face_num]*nr - kshift - 1
if ipt>=nl2:
ipt = ipt - 8*nside#;// ! for the face #4
ix = (ipt - irt ) / 2
iy = -(ipt + irt ) / 2
ix_low = int(fmod(ix,128))
ix_hi = ix/128
iy_low = int(fmod(iy,128))
iy_hi = iy/128
# // cout << "ix_low = " << ix_low << " ix_hi = " << ix_hi << endl;
# // cout << "iy_low = " << iy_low << " iy_hi = " << iy_hi << endl;
# // ipf = (x2pix[ix_hi +1]+y2pix[iy_hi +1]) * (128 * 128)
# // + (x2pix[ix_low+1]+y2pix[iy_low+1]);// ! in {0, nside**2 - 1}
ipf = (x2pix[ix_hi]+y2pix[iy_hi]) * (128 * 128)+ (x2pix[ix_low]+y2pix[iy_low])
# // cout << "ipf = " << ipf << endl;
# // for( int i(0);i<128;i++ ) cout << x2pix[i] << " || " << y2pix[i] << endl;
return ipf + face_num* nside *nside#;// ! in {0, 12*nside**2 - 1}
| |
from pycharm_generator_utils.util_methods import *
from pycharm_generator_utils.constants import *
import keyword, re
class emptylistdict(dict):
"""defaultdict not available before 2.5; simplest reimplementation using [] as default"""
def __getitem__(self, item):
if item in self:
return dict.__getitem__(self, item)
else:
it = []
self.__setitem__(item, it)
return it
class Buf(object):
"""Buffers data in a list, can write to a file. Indentation is provided externally."""
def __init__(self, indenter):
self.data = []
self.indenter = indenter
def put(self, data):
if data:
self.data.append(ensureUnicode(data))
def out(self, indent, *what):
"""Output the arguments, indenting as needed, and adding an eol"""
self.put(self.indenter.indent(indent))
for item in what:
self.put(item)
self.put("\n")
def flush_bytes(self, outfile):
for data in self.data:
outfile.write(data.encode(OUT_ENCODING, "replace"))
def flush_str(self, outfile):
for data in self.data:
outfile.write(data)
if version[0] < 3:
flush = flush_bytes
else:
flush = flush_str
def isEmpty(self):
return len(self.data) == 0
#noinspection PyUnresolvedReferences,PyBroadException
class ModuleRedeclarator(object):
def __init__(self, module, outfile, mod_filename, indent_size=4, doing_builtins=False):
"""
Create new instance.
@param module module to restore.
@param outfile output file, must be open and writable.
@param mod_filename filename of binary module (the .dll or .so)
@param indent_size amount of space characters per indent
"""
self.module = module
self.outfile = outfile # where we finally write
self.mod_filename = mod_filename
# we write things into buffers out-of-order
self.header_buf = Buf(self)
self.imports_buf = Buf(self)
self.functions_buf = Buf(self)
self.classes_buf = Buf(self)
self.footer_buf = Buf(self)
self.indent_size = indent_size
self._indent_step = " " * self.indent_size
#
self.imported_modules = {"": the_builtins} # explicit module imports: {"name": module}
self.hidden_imports = {} # {'real_mod_name': 'alias'}; we alias names with "__" since we don't want them exported
# ^ used for things that we don't re-export but need to import, e.g. certain base classes in gnome.
self._defined = {} # stores True for every name defined so far, to break circular refs in values
self.doing_builtins = doing_builtins
self.ret_type_cache = {}
self.used_imports = emptylistdict() # qual_mod_name -> [imported_names,..]: actually used imported names
def _initializeQApp(self):
try: # QtGui should be imported _before_ QtCore package.
# This is done for the QWidget references from QtCore (such as QSignalMapper). Known bug in PyQt 4.7+
# Causes "TypeError: C++ type 'QWidget*' is not supported as a native Qt signal type"
import PyQt4.QtGui
except ImportError:
pass
# manually instantiate and keep reference to singleton QCoreApplication (we don't want it to be deleted during the introspection)
# use QCoreApplication instead of QApplication to avoid blinking app in Dock on Mac OS
try:
from PyQt4.QtCore import QCoreApplication
self.app = QCoreApplication([])
return
except ImportError:
pass
try:
from PyQt5.QtCore import QCoreApplication
self.app = QCoreApplication([])
except ImportError:
pass
def indent(self, level):
"""Return indentation whitespace for given level."""
return self._indent_step * level
def flush(self):
for buf in (self.header_buf, self.imports_buf, self.functions_buf, self.classes_buf, self.footer_buf):
buf.flush(self.outfile)
# Some builtin classes effectively change __init__ signature without overriding it.
# This callable serves as a placeholder to be replaced via REDEFINED_BUILTIN_SIGS
def fake_builtin_init(self):
pass # just a callable, sig doesn't matter
fake_builtin_init.__doc__ = object.__init__.__doc__ # this forces class's doc to be used instead
def find_imported_name(self, item):
"""
Finds out how the item is represented in imported modules.
@param item what to check
@return qualified name (like "sys.stdin") or None
"""
# TODO: return a pair, not a glued string
if not isinstance(item, SIMPLEST_TYPES):
for mname in self.imported_modules:
m = self.imported_modules[mname]
for inner_name in m.__dict__:
suspect = getattr(m, inner_name)
if suspect is item:
if mname:
mname += "."
elif self.module is the_builtins: # don't short-circuit builtins
return None
return mname + inner_name
return None
_initializers = (
(dict, "{}"),
(tuple, "()"),
(list, "[]"),
)
def invent_initializer(self, a_type):
"""
Returns an innocuous initializer expression for a_type, or "None"
"""
for initializer_type, r in self._initializers:
if initializer_type == a_type:
return r
# NOTE: here we could handle things like defaultdict, sets, etc if we wanted
return "None"
def fmt_value(self, out, p_value, indent, prefix="", postfix="", as_name=None, seen_values=None):
"""
Formats and outputs value (it occupies an entire line or several lines).
@param out function that does output (a Buf.out)
@param p_value the value.
@param indent indent level.
@param prefix text to print before the value
@param postfix text to print after the value
@param as_name hints which name are we trying to print; helps with circular refs.
@param seen_values a list of keys we've seen if we're processing a dict
"""
SELF_VALUE = "<value is a self-reference, replaced by this string>"
ERR_VALUE = "<failed to retrieve the value>"
if isinstance(p_value, SIMPLEST_TYPES):
out(indent, prefix, reliable_repr(p_value), postfix)
else:
if sys.platform == "cli":
imported_name = None
else:
imported_name = self.find_imported_name(p_value)
if imported_name:
out(indent, prefix, imported_name, postfix)
# TODO: kind of self.used_imports[imported_name].append(p_value) but split imported_name
# else we could potentially return smth we did not otherwise import. but not likely.
else:
if isinstance(p_value, (list, tuple)):
if not seen_values:
seen_values = [p_value]
if len(p_value) == 0:
out(indent, prefix, repr(p_value), postfix)
else:
if isinstance(p_value, list):
lpar, rpar = "[", "]"
else:
lpar, rpar = "(", ")"
out(indent, prefix, lpar)
for value in p_value:
if value in seen_values:
value = SELF_VALUE
elif not isinstance(value, SIMPLEST_TYPES):
seen_values.append(value)
self.fmt_value(out, value, indent + 1, postfix=",", seen_values=seen_values)
out(indent, rpar, postfix)
elif isinstance(p_value, dict):
if len(p_value) == 0:
out(indent, prefix, repr(p_value), postfix)
else:
if not seen_values:
seen_values = [p_value]
out(indent, prefix, "{")
keys = list(p_value.keys())
try:
keys.sort()
except TypeError:
pass # unsortable keys happen, e,g, in py3k _ctypes
for k in keys:
value = p_value[k]
try:
is_seen = value in seen_values
except:
is_seen = False
value = ERR_VALUE
if is_seen:
value = SELF_VALUE
elif not isinstance(value, SIMPLEST_TYPES):
seen_values.append(value)
if isinstance(k, SIMPLEST_TYPES):
self.fmt_value(out, value, indent + 1, prefix=repr(k) + ": ", postfix=",",
seen_values=seen_values)
else:
# both key and value need fancy formatting
self.fmt_value(out, k, indent + 1, postfix=": ", seen_values=seen_values)
self.fmt_value(out, value, indent + 2, seen_values=seen_values)
out(indent + 1, ",")
out(indent, "}", postfix)
else: # something else, maybe representable
# look up this value in the module.
if sys.platform == "cli":
out(indent, prefix, "None", postfix)
return
found_name = ""
for inner_name in self.module.__dict__:
if self.module.__dict__[inner_name] is p_value:
found_name = inner_name
break
if self._defined.get(found_name, False):
out(indent, prefix, found_name, postfix)
else:
# a forward / circular declaration happens
notice = ""
real_value = cleanup(repr(p_value))
if found_name:
if found_name == as_name:
notice = " # (!) real value is %r" % real_value
real_value = "None"
else:
notice = " # (!) forward: %s, real value is %r" % (found_name, real_value)
if SANE_REPR_RE.match(real_value):
out(indent, prefix, real_value, postfix, notice)
else:
if not found_name:
notice = " # (!) real value is %r" % real_value
out(indent, prefix, "None", postfix, notice)
def get_ret_type(self, attr):
"""
Returns a return type string as given by T_RETURN in tokens, or None
"""
if attr:
ret_type = RET_TYPE.get(attr, None)
if ret_type:
return ret_type
thing = getattr(self.module, attr, None)
if thing:
if not isinstance(thing, type) and is_callable(thing): # a function
return None # TODO: maybe divinate a return type; see pygame.mixer.Channel
return attr
# adds no noticeable slowdown, I did measure. dch.
for im_name, im_module in self.imported_modules.items():
cache_key = (im_name, attr)
cached = self.ret_type_cache.get(cache_key, None)
if cached:
return cached
ret_type = getattr(im_module, attr, None)
if ret_type:
if isinstance(ret_type, type):
# detect a constructor
constr_args = detect_constructor(ret_type)
if constr_args is None:
constr_args = "*(), **{}" # a silly catch-all constructor
reference = "%s(%s)" % (attr, constr_args)
elif is_callable(ret_type): # a function, classes are ruled out above
return None
else:
reference = attr
if im_name:
result = "%s.%s" % (im_name, reference)
else: # built-in
result = reference
self.ret_type_cache[cache_key] = result
return result
# TODO: handle things like "[a, b,..] and (foo,..)"
return None
SIG_DOC_NOTE = "restored from __doc__"
SIG_DOC_UNRELIABLY = "NOTE: unreliably restored from __doc__ "
def restore_by_docstring(self, signature_string, class_name, deco=None, ret_hint=None):
"""
@param signature_string: parameter list extracted from the doc string.
@param class_name: name of the containing class, or None
@param deco: decorator to use
@param ret_hint: return type hint, if available
@return (reconstructed_spec, return_type, note) or (None, _, _) if failed.
"""
action("restoring func %r of class %r", signature_string, class_name)
# parse
parsing_failed = False
ret_type = None
try:
# strict parsing
tokens = paramSeqAndRest.parseString(signature_string, True)
ret_name = None
if tokens:
ret_t = tokens[-1]
if ret_t[0] is T_RETURN:
ret_name = ret_t[1]
ret_type = self.get_ret_type(ret_name) or self.get_ret_type(ret_hint)
except ParseException:
# it did not parse completely; scavenge what we can
parsing_failed = True
tokens = []
try:
# most unrestrictive parsing
tokens = paramSeq.parseString(signature_string, False)
except ParseException:
pass
#
seq = transform_seq(tokens)
# add safe defaults for unparsed
if parsing_failed:
doc_node = self.SIG_DOC_UNRELIABLY
starred = None
double_starred = None
for one in seq:
if type(one) is str:
if one.startswith("**"):
double_starred = one
elif one.startswith("*"):
starred = one
if not starred:
seq.append("*args")
if not double_starred:
seq.append("**kwargs")
else:
doc_node = self.SIG_DOC_NOTE
# add 'self' if needed YYY
if class_name and (not seq or seq[0] != 'self'):
first_param = propose_first_param(deco)
if first_param:
seq.insert(0, first_param)
seq = make_names_unique(seq)
return (seq, ret_type, doc_node)
def parse_func_doc(self, func_doc, func_id, func_name, class_name, deco=None, sip_generated=False):
"""
@param func_doc: __doc__ of the function.
@param func_id: name to look for as identifier of the function in docstring
@param func_name: name of the function.
@param class_name: name of the containing class, or None
@param deco: decorator to use
@return (reconstructed_spec, return_literal, note) or (None, _, _) if failed.
"""
if sip_generated:
overloads = []
for part in func_doc.split('\n'):
signature = func_id + '('
i = part.find(signature)
if i >= 0:
overloads.append(part[i + len(signature):])
if len(overloads) > 1:
docstring_results = [self.restore_by_docstring(overload, class_name, deco) for overload in overloads]
ret_types = []
for result in docstring_results:
rt = result[1]
if rt and rt not in ret_types:
ret_types.append(rt)
if ret_types:
ret_literal = " or ".join(ret_types)
else:
ret_literal = None
param_lists = [result[0] for result in docstring_results]
spec = build_signature(func_name, restore_parameters_for_overloads(param_lists))
return (spec, ret_literal, "restored from __doc__ with multiple overloads")
# find the first thing to look like a definition
prefix_re = re.compile("\s*(?:(\w+)[ \\t]+)?" + func_id + "\s*\(") # "foo(..." or "int foo(..."
match = prefix_re.search(func_doc) # Note: this and previous line may consume up to 35% of time
# parse the part that looks right
if match:
ret_hint = match.group(1)
params, ret_literal, doc_note = self.restore_by_docstring(func_doc[match.end():], class_name, deco, ret_hint)
spec = func_name + flatten(params)
return (spec, ret_literal, doc_note)
else:
return (None, None, None)
def is_predefined_builtin(self, module_name, class_name, func_name):
return self.doing_builtins and module_name == BUILTIN_MOD_NAME and (
class_name, func_name) in PREDEFINED_BUILTIN_SIGS
def redo_function(self, out, p_func, p_name, indent, p_class=None, p_modname=None, classname=None, seen=None):
"""
Restore function argument list as best we can.
@param out output function of a Buf
@param p_func function or method object
@param p_name function name as known to owner
@param indent indentation level
@param p_class the class that contains this function as a method
@param p_modname module name
@param seen {id(func): name} map of functions already seen in the same namespace;
id() because *some* functions are unhashable (eg _elementtree.Comment in py2.7)
"""
action("redoing func %r of class %r", p_name, p_class)
if seen is not None:
other_func = seen.get(id(p_func), None)
if other_func and getattr(other_func, "__doc__", None) is getattr(p_func, "__doc__", None):
# _bisect.bisect == _bisect.bisect_right in py31, but docs differ
out(indent, p_name, " = ", seen[id(p_func)])
out(indent, "")
return
else:
seen[id(p_func)] = p_name
# real work
if classname is None:
classname = p_class and p_class.__name__ or None
if p_class and hasattr(p_class, '__mro__'):
sip_generated = [base_t for base_t in p_class.__mro__ if 'sip.simplewrapper' in str(base_t)]
else:
sip_generated = False
deco = None
deco_comment = ""
mod_class_method_tuple = (p_modname, classname, p_name)
ret_literal = None
is_init = False
# any decorators?
action("redoing decos of func %r of class %r", p_name, p_class)
if self.doing_builtins and p_modname == BUILTIN_MOD_NAME:
deco = KNOWN_DECORATORS.get((classname, p_name), None)
if deco:
deco_comment = " # known case"
elif p_class and p_name in p_class.__dict__:
# detect native methods declared with METH_CLASS flag
descriptor = p_class.__dict__[p_name]
if p_name != "__new__" and type(descriptor).__name__.startswith('classmethod'):
# 'classmethod_descriptor' in Python 2.x and 3.x, 'classmethod' in Jython
deco = "classmethod"
elif type(p_func).__name__.startswith('staticmethod'):
deco = "staticmethod"
if p_name == "__new__":
deco = "staticmethod"
deco_comment = " # known case of __new__"
action("redoing innards of func %r of class %r", p_name, p_class)
if deco and HAS_DECORATORS:
out(indent, "@", deco, deco_comment)
if inspect and inspect.isfunction(p_func):
out(indent, "def ", p_name, restore_by_inspect(p_func), ": # reliably restored by inspect", )
out_doc_attr(out, p_func, indent + 1, p_class)
elif self.is_predefined_builtin(*mod_class_method_tuple):
spec, sig_note = restore_predefined_builtin(classname, p_name)
out(indent, "def ", spec, ": # ", sig_note)
out_doc_attr(out, p_func, indent + 1, p_class)
elif sys.platform == 'cli' and is_clr_type(p_class):
spec, sig_note = restore_clr(p_name, p_class)
if not spec: return
if sig_note:
out(indent, "def ", spec, ": #", sig_note)
else:
out(indent, "def ", spec, ":")
if not p_name in ['__gt__', '__ge__', '__lt__', '__le__', '__ne__', '__reduce_ex__', '__str__']:
out_doc_attr(out, p_func, indent + 1, p_class)
elif mod_class_method_tuple in PREDEFINED_MOD_CLASS_SIGS:
sig, ret_literal = PREDEFINED_MOD_CLASS_SIGS[mod_class_method_tuple]
if classname:
ofwhat = "%s.%s.%s" % mod_class_method_tuple
else:
ofwhat = "%s.%s" % (p_modname, p_name)
out(indent, "def ", p_name, sig, ": # known case of ", ofwhat)
out_doc_attr(out, p_func, indent + 1, p_class)
else:
# __doc__ is our best source of arglist
sig_note = "real signature unknown"
spec = ""
is_init = (p_name == "__init__" and p_class is not None)
funcdoc = None
if is_init and hasattr(p_class, "__doc__"):
if hasattr(p_func, "__doc__"):
funcdoc = p_func.__doc__
if funcdoc == object.__init__.__doc__:
funcdoc = p_class.__doc__
elif hasattr(p_func, "__doc__"):
funcdoc = p_func.__doc__
sig_restored = False
action("parsing doc of func %r of class %r", p_name, p_class)
if isinstance(funcdoc, STR_TYPES):
(spec, ret_literal, more_notes) = self.parse_func_doc(funcdoc, p_name, p_name, classname, deco,
sip_generated)
if spec is None and p_name == '__init__' and classname:
(spec, ret_literal, more_notes) = self.parse_func_doc(funcdoc, classname, p_name, classname, deco,
sip_generated)
sig_restored = spec is not None
if more_notes:
if sig_note:
sig_note += "; "
sig_note += more_notes
if not sig_restored:
# use an allow-all declaration
decl = []
if p_class:
first_param = propose_first_param(deco)
if first_param:
decl.append(first_param)
decl.append("*args")
decl.append("**kwargs")
spec = p_name + "(" + ", ".join(decl) + ")"
out(indent, "def ", spec, ": # ", sig_note)
# to reduce size of stubs, don't output same docstring twice for class and its __init__ method
if not is_init or funcdoc != p_class.__doc__:
out_docstring(out, funcdoc, indent + 1)
# body
if ret_literal and not is_init:
out(indent + 1, "return ", ret_literal)
else:
out(indent + 1, "pass")
if deco and not HAS_DECORATORS:
out(indent, p_name, " = ", deco, "(", p_name, ")", deco_comment)
out(0, "") # empty line after each item
def redo_class(self, out, p_class, p_name, indent, p_modname=None, seen=None, inspect_dir=False):
"""
Restores a class definition.
@param out output function of a relevant buf
@param p_class the class object
@param p_name class name as known to owner
@param indent indentation level
@param p_modname name of module
@param seen {class: name} map of classes already seen in the same namespace
"""
action("redoing class %r of module %r", p_name, p_modname)
if seen is not None:
if p_class in seen:
out(indent, p_name, " = ", seen[p_class])
out(indent, "")
return
else:
seen[p_class] = p_name
bases = get_bases(p_class)
base_def = ""
skipped_bases = []
if bases:
skip_qualifiers = [p_modname, BUILTIN_MOD_NAME, 'exceptions']
skip_qualifiers.extend(KNOWN_FAKE_REEXPORTERS.get(p_modname, ()))
bases_list = [] # what we'll render in the class decl
for base in bases:
if [1 for (cls, mdl) in KNOWN_FAKE_BASES if cls == base and mdl != self.module]:
# our base is a wrapper and our module is not its defining module
skipped_bases.append(str(base))
continue
# somehow import every base class
base_name = base.__name__
qual_module_name = qualifier_of(base, skip_qualifiers)
got_existing_import = False
if qual_module_name:
if qual_module_name in self.used_imports:
import_list = self.used_imports[qual_module_name]
if base in import_list:
bases_list.append(base_name) # unqualified: already set to import
got_existing_import = True
if not got_existing_import:
mangled_qualifier = "__" + qual_module_name.replace('.', '_') # foo.bar -> __foo_bar
bases_list.append(mangled_qualifier + "." + base_name)
self.hidden_imports[qual_module_name] = mangled_qualifier
else:
bases_list.append(base_name)
base_def = "(" + ", ".join(bases_list) + ")"
out(indent, "class ", p_name, base_def, ":",
skipped_bases and " # skipped bases: " + ", ".join(skipped_bases) or "")
out_doc_attr(out, p_class, indent + 1)
# inner parts
methods = {}
properties = {}
others = {}
we_are_the_base_class = p_modname == BUILTIN_MOD_NAME and p_name == "object"
field_source = {}
try:
if hasattr(p_class, "__dict__") and not inspect_dir:
field_source = p_class.__dict__
field_keys = field_source.keys() # Jython 2.5.1 _codecs fail here
else:
field_keys = dir(p_class) # this includes unwanted inherited methods, but no dict + inheritance is rare
except:
field_keys = ()
for item_name in field_keys:
if item_name in ("__doc__", "__module__"):
if we_are_the_base_class:
item = "" # must be declared in base types
else:
continue # in all other cases must be skipped
elif keyword.iskeyword(item_name): # for example, PyQt4 contains definitions of methods named 'exec'
continue
else:
try:
item = getattr(p_class, item_name) # let getters do the magic
except AttributeError:
item = field_source[item_name] # have it raw
except Exception:
continue
if is_callable(item) and not isinstance(item, type):
methods[item_name] = item
elif is_property(item):
properties[item_name] = item
else:
others[item_name] = item
#
if we_are_the_base_class:
others["__dict__"] = {} # force-feed it, for __dict__ does not contain a reference to itself :)
# add fake __init__s to have the right sig
if p_class in FAKE_BUILTIN_INITS:
methods["__init__"] = self.fake_builtin_init
note("Faking init of %s", p_name)
elif '__init__' not in methods:
init_method = getattr(p_class, '__init__', None)
if init_method:
methods['__init__'] = init_method
#
seen_funcs = {}
for item_name in sorted_no_case(methods.keys()):
item = methods[item_name]
try:
self.redo_function(out, item, item_name, indent + 1, p_class, p_modname, classname=p_name, seen=seen_funcs)
except:
handle_error_func(item_name, out)
#
known_props = KNOWN_PROPS.get(p_modname, {})
a_setter = "lambda self, v: None"
a_deleter = "lambda self: None"
for item_name in sorted_no_case(properties.keys()):
item = properties[item_name]
prop_docstring = getattr(item, '__doc__', None)
prop_key = (p_name, item_name)
if prop_key in known_props:
prop_descr = known_props.get(prop_key, None)
if prop_descr is None:
continue # explicitly omitted
acc_line, getter_and_type = prop_descr
if getter_and_type:
getter, prop_type = getter_and_type
else:
getter, prop_type = None, None
out(indent + 1, item_name,
" = property(", format_accessors(acc_line, getter, a_setter, a_deleter), ")"
)
if prop_type:
if prop_docstring:
out(indent + 1, '"""', prop_docstring)
out(0, "")
out(indent + 1, ':type: ', prop_type)
out(indent + 1, '"""')
else:
out(indent + 1, '""":type: ', prop_type, '"""')
out(0, "")
else:
out(indent + 1, item_name, " = property(lambda self: object(), lambda self, v: None, lambda self: None) # default")
if prop_docstring:
out(indent + 1, '"""', prop_docstring, '"""')
out(0, "")
if properties:
out(0, "") # empty line after the block
#
for item_name in sorted_no_case(others.keys()):
item = others[item_name]
self.fmt_value(out, item, indent + 1, prefix=item_name + " = ")
if p_name == "object":
out(indent + 1, "__module__ = ''")
if others:
out(0, "") # empty line after the block
#
if not methods and not properties and not others:
out(indent + 1, "pass")
def redo_simple_header(self, p_name):
"""Puts boilerplate code on the top"""
out = self.header_buf.out # 1st class methods rule :)
out(0, "# encoding: %s" % OUT_ENCODING) # line 1
# NOTE: maybe encoding should be selectable
if hasattr(self.module, "__name__"):
self_name = self.module.__name__
if self_name != p_name:
mod_name = " calls itself " + self_name
else:
mod_name = ""
else:
mod_name = " does not know its name"
out(0, "# module ", p_name, mod_name) # line 2
BUILT_IN_HEADER = "(built-in)"
if self.mod_filename:
filename = self.mod_filename
elif p_name in sys.builtin_module_names:
filename = BUILT_IN_HEADER
else:
filename = getattr(self.module, "__file__", BUILT_IN_HEADER)
out(0, "# from %s" % filename) # line 3
out(0, "# by generator %s" % VERSION) # line 4
if p_name == BUILTIN_MOD_NAME and version[0] == 2 and version[1] >= 6:
out(0, "from __future__ import print_function")
out_doc_attr(out, self.module, 0)
def redo_imports(self):
module_type = type(sys)
for item_name in self.module.__dict__.keys():
try:
item = self.module.__dict__[item_name]
except:
continue
if type(item) is module_type: # not isinstance, py2.7 + PyQt4.QtCore on windows have a bug here
self.imported_modules[item_name] = item
self.add_import_header_if_needed()
ref_notice = getattr(item, "__file__", str(item))
if hasattr(item, "__name__"):
self.imports_buf.out(0, "import ", item.__name__, " as ", item_name, " # ", ref_notice)
else:
self.imports_buf.out(0, item_name, " = None # ??? name unknown; ", ref_notice)
def add_import_header_if_needed(self):
if self.imports_buf.isEmpty():
self.imports_buf.out(0, "")
self.imports_buf.out(0, "# imports")
def redo(self, p_name, inspect_dir):
"""
Restores module declarations.
Intended for built-in modules and thus does not handle import statements.
@param p_name name of module
"""
action("redoing header of module %r %r", p_name, str(self.module))
if "pyqt" in p_name.lower(): # qt specific patch
self._initializeQApp()
self.redo_simple_header(p_name)
# find whatever other self.imported_modules the module knows; effectively these are imports
action("redoing imports of module %r %r", p_name, str(self.module))
try:
self.redo_imports()
except:
pass
action("redoing innards of module %r %r", p_name, str(self.module))
module_type = type(sys)
# group what we have into buckets
vars_simple = {}
vars_complex = {}
funcs = {}
classes = {}
module_dict = self.module.__dict__
if inspect_dir:
module_dict = dir(self.module)
for item_name in module_dict:
note("looking at %s", item_name)
if item_name in (
"__dict__", "__doc__", "__module__", "__file__", "__name__", "__builtins__", "__package__"):
continue # handled otherwise
try:
item = getattr(self.module, item_name) # let getters do the magic
except AttributeError:
if not item_name in self.module.__dict__: continue
item = self.module.__dict__[item_name] # have it raw
# check if it has percolated from an imported module
except NotImplementedError:
if not item_name in self.module.__dict__: continue
item = self.module.__dict__[item_name] # have it raw
# unless we're adamantly positive that the name was imported, we assume it is defined here
mod_name = None # module from which p_name might have been imported
# IronPython has non-trivial reexports in System module, but not in others:
skip_modname = sys.platform == "cli" and p_name != "System"
surely_not_imported_mods = KNOWN_FAKE_REEXPORTERS.get(p_name, ())
## can't figure weirdness in some modules, assume no reexports:
#skip_modname = skip_modname or p_name in self.KNOWN_FAKE_REEXPORTERS
if not skip_modname:
try:
mod_name = getattr(item, '__module__', None)
except:
pass
# we assume that module foo.bar never imports foo; foo may import foo.bar. (see pygame and pygame.rect)
maybe_import_mod_name = mod_name or ""
import_is_from_top = len(p_name) > len(maybe_import_mod_name) and p_name.startswith(maybe_import_mod_name)
note("mod_name = %s, prospective = %s, from top = %s", mod_name, maybe_import_mod_name, import_is_from_top)
want_to_import = False
if (mod_name
and mod_name != BUILTIN_MOD_NAME
and mod_name != p_name
and mod_name not in surely_not_imported_mods
and not import_is_from_top
):
# import looks valid, but maybe it's a .py file? we're certain not to import from .py
# e.g. this rules out _collections import collections and builtins import site.
try:
imported = __import__(mod_name) # ok to repeat, Python caches for us
if imported:
qualifiers = mod_name.split(".")[1:]
for qual in qualifiers:
imported = getattr(imported, qual, None)
if not imported:
break
imported_path = (getattr(imported, '__file__', False) or "").lower()
want_to_import = not (imported_path.endswith('.py') or imported_path.endswith('.pyc'))
note("path of %r is %r, want? %s", mod_name, imported_path, want_to_import)
except ImportError:
want_to_import = False
# NOTE: if we fail to import, we define 'imported' names here lest we lose them at all
if want_to_import:
import_list = self.used_imports[mod_name]
if item_name not in import_list:
import_list.append(item_name)
if not want_to_import:
if isinstance(item, type) or type(item).__name__ == 'classobj':
classes[item_name] = item
elif is_callable(item): # some classes are callable, check them before functions
funcs[item_name] = item
elif isinstance(item, module_type):
continue # self.imported_modules handled above already
else:
if isinstance(item, SIMPLEST_TYPES):
vars_simple[item_name] = item
else:
vars_complex[item_name] = item
# sort and output every bucket
action("outputting innards of module %r %r", p_name, str(self.module))
#
omitted_names = OMIT_NAME_IN_MODULE.get(p_name, [])
if vars_simple:
out = self.functions_buf.out
prefix = "" # try to group variables by common prefix
PREFIX_LEN = 2 # default prefix length if we can't guess better
out(0, "# Variables with simple values")
for item_name in sorted_no_case(vars_simple.keys()):
if item_name in omitted_names:
out(0, "# definition of " + item_name + " omitted")
continue
item = vars_simple[item_name]
# track the prefix
if len(item_name) >= PREFIX_LEN:
prefix_pos = string.rfind(item_name, "_") # most prefixes end in an underscore
if prefix_pos < 1:
prefix_pos = PREFIX_LEN
beg = item_name[0:prefix_pos]
if prefix != beg:
out(0, "") # space out from other prefix
prefix = beg
else:
prefix = ""
# output
replacement = REPLACE_MODULE_VALUES.get((p_name, item_name), None)
if replacement is not None:
out(0, item_name, " = ", replacement, " # real value of type ", str(type(item)), " replaced")
elif is_skipped_in_module(p_name, item_name):
t_item = type(item)
out(0, item_name, " = ", self.invent_initializer(t_item), " # real value of type ", str(t_item),
" skipped")
else:
self.fmt_value(out, item, 0, prefix=item_name + " = ")
self._defined[item_name] = True
out(0, "") # empty line after vars
#
if funcs:
out = self.functions_buf.out
out(0, "# functions")
out(0, "")
seen_funcs = {}
for item_name in sorted_no_case(funcs.keys()):
if item_name in omitted_names:
out(0, "# definition of ", item_name, " omitted")
continue
item = funcs[item_name]
try:
self.redo_function(out, item, item_name, 0, p_modname=p_name, seen=seen_funcs)
except:
handle_error_func(item_name, out)
else:
self.functions_buf.out(0, "# no functions")
#
if classes:
out = self.functions_buf.out
out(0, "# classes")
out(0, "")
seen_classes = {}
# sort classes so that inheritance order is preserved
cls_list = [] # items are (class_name, mro_tuple)
for cls_name in sorted_no_case(classes.keys()):
cls = classes[cls_name]
ins_index = len(cls_list)
for i in range(ins_index):
maybe_child_bases = cls_list[i][1]
if cls in maybe_child_bases:
ins_index = i # we could not go farther than current ins_index
break # ...and need not go fartehr than first known child
cls_list.insert(ins_index, (cls_name, get_mro(cls)))
for item_name in [cls_item[0] for cls_item in cls_list]:
if item_name in omitted_names:
out(0, "# definition of ", item_name, " omitted")
continue
item = classes[item_name]
self.redo_class(out, item, item_name, 0, p_modname=p_name, seen=seen_classes, inspect_dir=inspect_dir)
self._defined[item_name] = True
out(0, "") # empty line after each item
if self.doing_builtins and p_name == BUILTIN_MOD_NAME and version[0] < 3:
# classobj still supported
txt = classobj_txt
self.classes_buf.out(0, txt)
if self.doing_builtins and p_name == BUILTIN_MOD_NAME:
txt = create_generator()
self.classes_buf.out(0, txt)
# Fake <type 'namedtuple'>
if version[0] >= 3 or (version[0] == 2 and version[1] >= 6):
namedtuple_text = create_named_tuple()
self.classes_buf.out(0, namedtuple_text)
else:
self.classes_buf.out(0, "# no classes")
#
if vars_complex:
out = self.footer_buf.out
out(0, "# variables with complex values")
out(0, "")
for item_name in sorted_no_case(vars_complex.keys()):
if item_name in omitted_names:
out(0, "# definition of " + item_name + " omitted")
continue
item = vars_complex[item_name]
if str(type(item)) == "<type 'namespace#'>":
continue # this is an IronPython submodule, we mustn't generate a reference for it in the base module
replacement = REPLACE_MODULE_VALUES.get((p_name, item_name), None)
if replacement is not None:
out(0, item_name + " = " + replacement + " # real value of type " + str(type(item)) + " replaced")
elif is_skipped_in_module(p_name, item_name):
t_item = type(item)
out(0, item_name + " = " + self.invent_initializer(t_item) + " # real value of type " + str(
t_item) + " skipped")
else:
self.fmt_value(out, item, 0, prefix=item_name + " = ", as_name=item_name)
self._defined[item_name] = True
out(0, "") # empty line after each item
values_to_add = ADD_VALUE_IN_MODULE.get(p_name, None)
if values_to_add:
self.footer_buf.out(0, "# intermittent names")
for value in values_to_add:
self.footer_buf.out(0, value)
# imports: last, because previous parts could alter used_imports or hidden_imports
self.output_import_froms()
if self.imports_buf.isEmpty():
self.imports_buf.out(0, "# no imports")
self.imports_buf.out(0, "") # empty line after imports
def output_import_froms(self):
"""Mention all imported names known within the module, wrapping as per PEP."""
out = self.imports_buf.out
if self.used_imports:
self.add_import_header_if_needed()
for mod_name in sorted_no_case(self.used_imports.keys()):
import_names = self.used_imports[mod_name]
if import_names:
self._defined[mod_name] = True
right_pos = 0 # tracks width of list to fold it at right margin
import_heading = "from % s import (" % mod_name
right_pos += len(import_heading)
names_pack = [import_heading]
indent_level = 0
import_names = list(import_names)
import_names.sort()
for n in import_names:
self._defined[n] = True
len_n = len(n)
if right_pos + len_n >= 78:
out(indent_level, *names_pack)
names_pack = [n, ", "]
if indent_level == 0:
indent_level = 1 # all but first line is indented
right_pos = self.indent_size + len_n + 2
else:
names_pack.append(n)
names_pack.append(", ")
right_pos += (len_n + 2)
# last line is...
if indent_level == 0: # one line
names_pack[0] = names_pack[0][:-1] # cut off lpar
names_pack[-1] = "" # cut last comma
else: # last line of multiline
names_pack[-1] = ")" # last comma -> rpar
out(indent_level, *names_pack)
out(0, "") # empty line after group
if self.hidden_imports:
self.add_import_header_if_needed()
for mod_name in sorted_no_case(self.hidden_imports.keys()):
out(0, 'import ', mod_name, ' as ', self.hidden_imports[mod_name])
out(0, "") # empty line after group
| |
# -*- coding: utf-8 -*-
import requests
from pydeform.auth import (
get_session_http_auth_header,
get_token_http_auth_header
)
from pydeform.resources import (
CollectionListResource,
CollectionOneResource,
CurrentProjectInfoResource,
DocumentListResource,
DocumentOneResource,
NonAuthUserResource,
ProjectListResource,
ProjectOneResource,
SessionUserResource
)
from pydeform.utils import get_base_uri
_DOCS_DATA = {
'requests_session_url': (
'http://docs.python-requests.org/en/master/user/advanced/#session-objects'
),
'requests_request_url': (
'http://docs.python-requests.org/en/master/api/#requests.request'
)
}
class Client(object):
__doc__ = """Deform.io python client class.
Parameters:
* `host` - HTTP server host. E.g. `deform.io`.
* `port` - HTTP server port. Default is `None`.
* `secure` - if `True` client will make secure request via `https`.
Default is `True`.
* `requests_session` - python requests' [Session][requests-session]
instance. Default is `None`.
* `request_defaults` - python requests' [request][requests-request]
defaults. Default is `None`.
* `api_base_path` - HTTP server's api uri base path. Default is `/api/`.
Example:
```python
client = Client(host='deform.io')
```
[requests-session]: %(requests_session_url)s
[requests-request]: %(requests_request_url)s
""" % {
'requests_session_url': _DOCS_DATA['requests_session_url'],
'requests_request_url': _DOCS_DATA['requests_request_url'],
}
def __init__(self,
host,
port=None,
secure=True,
requests_session=None,
request_defaults=None,
api_base_path='/api/'):
self.host = host
self.port = port
self.secure = secure
self.requests_session = requests_session or requests.Session()
self.request_defaults = request_defaults
self.api_base_path = api_base_path
self.user = NonAuthUserResource(
base_uri=get_base_uri(
host=self.host,
port=self.port,
secure=self.secure,
api_base_path=self.api_base_path
),
auth_header=None,
requests_session=self.requests_session,
request_defaults=self.request_defaults
)
def auth(self, auth_type, auth_key, project_id=None):
"""Creates authenticated client.
Parameters:
* `auth_type` - Authentication type. Use `session` for auth
by session key. Use `token` for auth by token.
* `auth_key` - Authentication `session key` or `token`.
* `project_id` - Project identifier. Must be provided for
`token` authentication. Default is `None`.
Returns:
* Instance of [SessionAuthClient](#sessionauthclient) if
`auth_type` is `session`.
* Instance of [ProjectClient](#projectclient) if
`auth_type` is `token`
Raises:
* ValueError: if `project_id` parameter was not provided
Examples:
For auth with `session` you should obtain session key by
[Client.user.login](#clientuserlogin) providing
your account's email and password:
```python
client = Client(host='deform.io')
session_client = client.auth(
'session',
client.user.login(
email='email@example.com',
password='password'
),
)
print session_client
<pydeform.client.SessionAuthClient object at 0x10c585650>
```
Authentication with `token` example:
```python
client = Client(host='deform.io')
token_client = client.auth(
'token',
auth_key='token-value',
project_id='some-project',
)
print token_client
<pydeform.client.ProjectClient object at 0x11c585650>
```
"""
if auth_type == 'session':
return SessionAuthClient(
auth_header=get_session_http_auth_header(auth_key),
host=self.host,
port=self.port,
secure=self.secure,
requests_session=self.requests_session,
request_defaults=self.request_defaults,
api_base_path=self.api_base_path,
)
elif auth_type == 'token':
if not project_id:
msg = 'You should provide project_id for token authentication'
raise ValueError(msg)
return ProjectClient(
base_uri=get_base_uri(
project=project_id,
host=self.host,
port=self.port,
secure=self.secure,
api_base_path=self.api_base_path
),
auth_header=get_token_http_auth_header(auth_key),
requests_session=self.requests_session,
request_defaults=self.request_defaults,
)
class SessionAuthClient(object):
"""Session auth client.
You should not initalize this client manually.
Use [Client.auth](#clientauth) method with ``session`` authentication.
"""
def __init__(self,
auth_header,
host,
port,
secure,
requests_session,
request_defaults,
api_base_path):
self.host = host
self.port = port
self.secure = secure
self.requests_session = requests_session
self.request_defaults = request_defaults
self.auth_header = auth_header
self.api_base_path = api_base_path
self.base_uri = get_base_uri(
host=self.host,
port=self.port,
secure=self.secure,
api_base_path=self.api_base_path
)
resource_kwargs = {
'base_uri': self.base_uri,
'auth_header': auth_header,
'requests_session': requests_session,
'request_defaults': request_defaults
}
self.user = SessionUserResource(**resource_kwargs)
self.projects = ProjectListResource(**resource_kwargs)
self.project = ProjectOneResource(**resource_kwargs)
def use_project(self, project_id):
"""Creates an instance of [ProjectClient](#projectclient),
providing session authentication.
Parameters:
* `project_id` - project identifier.
Returns:
Instance of [ProjectClient](#projectclient) with
session authentication.
Example:
```python
client = Client('deform.io')
session_client = client.auth(
'session',
client.user.login('email@example.com', 'password')
)
session_client.use_project('some-project-id')
```
"""
return ProjectClient(
base_uri=get_base_uri(
project=project_id,
host=self.host,
port=self.port,
secure=self.secure,
api_base_path=self.api_base_path
),
auth_header=self.auth_header,
requests_session=self.requests_session,
request_defaults=self.request_defaults,
)
class ProjectClient(object):
"""Project client.
You should not initalize this client manually.
Use [Client.auth](#clientauth) method with ``token`` authentication or
[SessionAuthClient.use_project](#sessionauthclientuse_project) method.
"""
def __init__(self,
base_uri,
auth_header,
requests_session,
request_defaults):
resource_kwargs = {
'base_uri': base_uri,
'auth_header': auth_header,
'requests_session': requests_session,
'request_defaults': request_defaults
}
self.base_uri = base_uri
self.auth_header = auth_header
self.request_session = requests_session
self.request_defaults = request_defaults
self.info = CurrentProjectInfoResource(**resource_kwargs)
self.collections = CollectionListResource(**resource_kwargs)
self.collection = CollectionOneResource(**resource_kwargs)
self.documents = DocumentListResource(**resource_kwargs)
self.document = DocumentOneResource(**resource_kwargs)
| |
#!/usr/bin/env python
# coding: utf-8
import sys
import os
from textwrap import dedent
from traitlets import default
from traitlets.config.application import catch_config_error
from jupyter_core.application import NoStart
import nbgrader
from .baseapp import nbgrader_aliases, nbgrader_flags
from . import (
NbGrader,
AssignApp,
GenerateAssignmentApp,
AutogradeApp,
FormgradeApp,
FeedbackApp,
GenerateFeedbackApp,
ReleaseFeedbackApp,
ValidateApp,
ReleaseApp,
ReleaseAssignmentApp,
CollectApp,
FetchApp,
FetchAssignmentApp,
FetchFeedbackApp,
SubmitApp,
ListApp,
ExtensionApp,
QuickStartApp,
ExportApp,
DbApp,
UpdateApp,
ZipCollectApp,
GenerateConfigApp,
GenerateSolutionApp
)
from traitlets.traitlets import MetaHasTraits
from typing import List
aliases = {}
aliases.update(nbgrader_aliases)
aliases.update({
})
flags = {}
flags.update(nbgrader_flags)
flags.update({
})
class NbGraderApp(NbGrader):
name = u'nbgrader'
description = u'A system for assigning and grading notebooks'
version = nbgrader.__version__
aliases = aliases
flags = flags
examples = """
The nbgrader application is a system for assigning and grading notebooks.
Each subcommand of this program corresponds to a different step in the
grading process. In order to facilitate the grading pipeline, nbgrader
places some constraints on how the assignments must be structured. By
default, the directory structure for the assignments must look like this:
{nbgrader_step}/{student_id}/{assignment_id}/{notebook_id}.ipynb
where 'nbgrader_step' is the step in the nbgrader pipeline, 'student_id'
is the ID of the student, 'assignment_id' is the name of the assignment,
and 'notebook_id' is the name of the notebook (excluding the extension).
For example, when running `nbgrader autograde "Problem Set 1"`, the
autograder will first look for all notebooks for all students in the
following directories:
submitted/*/Problem Set 1/*.ipynb
and it will write the autograded notebooks to the corresponding directory
and filename for each notebook and each student:
autograded/{student_id}/Problem Set 1/{notebook_id}.ipynb
These variables, as well as the overall directory structure, can be
configured through the `NbGrader` class (run `nbgrader --help-all`
to see these options).
For more details on how each of the subcommands work, please see the help
for that command (e.g. `nbgrader generate_assignment --help-all`).
"""
subcommands = dict(
assign=(
AssignApp,
dedent(
"""
DEPRECATED, please use generate_assignment instead.
"""
).strip()
),
generate_assignment=(
GenerateAssignmentApp,
dedent(
"""
Create the student version of an assignment. Intended for use by
instructors only.
"""
).strip()
),
autograde=(
AutogradeApp,
dedent(
"""
Autograde submitted assignments. Intended for use by instructors
only.
"""
).strip()
),
formgrade=(
FormgradeApp,
dedent(
"""
Manually grade assignments (after autograding). Intended for use
by instructors only.
"""
).strip()
),
feedback=(
FeedbackApp,
dedent(
"""
DEPRECATED: use generate_feedback instead.
"""
).strip()
),
generate_feedback=(
GenerateFeedbackApp,
dedent(
"""
Generate feedback (after autograding and manual grading).
Intended for use by instructors only.
"""
).strip()
),
validate=(
ValidateApp,
dedent(
"""
Validate a notebook in an assignment. Intended for use by
instructors and students.
"""
).strip()
),
release=(
ReleaseApp,
dedent(
"""
DEPRECATED: use release_assignment instead.
"""
).strip()
),
release_assignment=(
ReleaseAssignmentApp,
dedent(
"""
Release an assignment to students through the nbgrader exchange.
Intended for use by instructors only.
"""
).strip()
),
release_feedback=(
ReleaseFeedbackApp,
dedent(
"""
Release assignment feedback to students through the nbgrader exchange.
Intended for use by instructors only.
"""
).strip()
),
collect=(
CollectApp,
dedent(
"""
Collect an assignment from students through the nbgrader exchange.
Intended for use by instructors only.
"""
).strip()
),
zip_collect=(
ZipCollectApp,
dedent(
"""
Collect assignment submissions from files and/or archives (zip
files) manually downloaded from a LMS.
Intended for use by instructors only.
"""
).strip()
),
fetch=(
FetchApp,
dedent(
"""
DEPRECATED: use fetch_assignment instead.
"""
).strip()
),
fetch_assignment=(
FetchAssignmentApp,
dedent(
"""
Fetch an assignment from an instructor through the nbgrader exchange.
Intended for use by students only.
"""
).strip()
),
fetch_feedback=(
FetchFeedbackApp,
dedent(
"""
Fetch feedback for an assignment from an instructor through the nbgrader exchange.
Intended for use by students only.
"""
).strip()
),
submit=(
SubmitApp,
dedent(
"""
Submit an assignment to an instructor through the nbgrader exchange.
Intended for use by students only.
"""
).strip()
),
list=(
ListApp,
dedent(
"""
List inbound or outbound assignments in the nbgrader exchange.
Intended for use by instructors and students.
"""
).strip()
),
extension=(
ExtensionApp,
dedent(
"""
Install and activate the "Create Assignment" notebook extension.
"""
).strip()
),
quickstart=(
QuickStartApp,
dedent(
"""
Create an example class files directory with an example
config file and assignment.
"""
).strip()
),
export=(
ExportApp,
dedent(
"""
Export grades from the database to another format.
"""
).strip()
),
db=(
DbApp,
dedent(
"""
Perform operations on the nbgrader database, such as adding,
removing, importing, and listing assignments or students.
"""
).strip()
),
update=(
UpdateApp,
dedent(
"""
Update nbgrader cell metadata to the most recent version.
"""
).strip()
),
generate_config=(
GenerateConfigApp,
dedent(
"""
Generates a default nbgrader_config.py file.
"""
).strip()
),
generate_solution=(
GenerateSolutionApp,
dedent(
"""
Generates the solution for the given assignment.
"""
).strip()
),
)
@default("classes")
def _classes_default(self) -> List[MetaHasTraits]:
return self.all_configurable_classes()
@catch_config_error
def initialize(self, argv: List[str] = None) -> None:
super(NbGraderApp, self).initialize(argv)
def start(self) -> None:
# check: is there a subapp given?
if self.subapp is None:
print("No command given (run with --help for options). List of subcommands:\n")
self.print_subcommands()
# This starts subapps
super(NbGraderApp, self).start()
def print_version(self):
print("Python version {}".format(sys.version))
print("nbgrader version {}".format(nbgrader.__version__))
def main():
NbGraderApp.launch_instance()
| |
import datetime
import os
from tic.web.cdp import deps
TEMPLATES = {
'dojo': 'templates/command_dojo.jst',
'closure': 'templates/command_closure.jst'
}
_ALLOWED_PROPERTY_TYPES = set([
basestring,
str,
unicode,
bool,
int,
long,
float,
dict, # TODO: experimental
datetime.datetime,
datetime.date,
datetime.time
])
class Error(Exception):
"""
Base error
"""
class BadValueError(Error):
"""
Raised when property is set to an invalid value
"""
class DuplicatePropertyError(Error):
"""
Duplication error
"""
class Property(object):
"""
Base class for all command properties
"""
def __init__(self):
"""Documentation"""
self.value = None
def __get__(self, instance, owner):
"""Documentation"""
return self.value
def __set__(self, instance, value):
""" Docs """
self.value = self.validate(value)
def validate(self, value):
"""Validates if the value of the 'right' type"""
if value is not None and not isinstance(value, self.data_type):
raise BadValueError(
'Property %s must be %s instance, not a %s'
% ("self.name", self.data_type.__name__, type(value).__name__))
return value
def __property_config__(self, model_class, property_name):
"""Configure property, connecting it to its model.
Configure the property so that it knows its property name and what class
it belongs to.
Args:
model_class: Model class which Property will belong to.
property_name: Name of property within Model instance to store property
values in. By default this will be the property name preceded by
an underscore, but may change for different subclasses.
"""
self.model_class = model_class
self.name = property_name
def to_js(self):
"""Documentation"""
raise NotImplementedError
def from_js(self, value):
"""Documentation"""
raise NotImplementedError
data_type = str
class PropertiedClass(type):
"""Meta-class for initializing Model classes properties.
Used for initializing Properties defined in the context of a model.
By using a meta-class much of the configuration of a Property
descriptor becomes implicit. By using this meta-class, descriptors
that are of class Model are notified about which class they
belong to and what attribute they are associated with and can
do appropriate initialization via __property_config__.
Duplicate properties are not permitted.
"""
def __init__(cls, name, bases, dct):
"""Initializes a class that might have property definitions.
This method is called when a class is created with the PropertiedClass
meta-class.
Loads all properties for this model and its base classes in to a dictionary
for easy reflection via the 'properties' method.
Configures each property defined in the new class.
Duplicate properties, either defined in the new class or defined separately
in two base classes are not permitted.
Properties may not assigned to names which are in the list of
_RESERVED_WORDS. It is still possible to store a property using a reserved
word in the datastore by using the 'name' keyword argument to the Property
constructor.
Args:
cls: Class being initialized.
name: Name of new class.
bases: Base classes of new class.
dct: Dictionary of new definitions for class.
Raises:
DuplicatePropertyError when a property is duplicated either in the new
class or separately in two base classes.
ReservedWordError when a property is given a name that is in the list of
reserved words, attributes of Model and names of the form '__.*__'.
"""
super(PropertiedClass, cls).__init__(name, bases, dct)
_initialize_properties(cls, name, bases, dct)
def _initialize_properties(model_class, name, bases, dct):
"""Initialize Property attributes for Model-class.
Args:
model_class: Model class to initialize properties for.
"""
model_class._properties = {}
property_source = {}
def get_attr_source(name, cls):
for src_cls in cls.mro():
if name in src_cls.__dict__:
return src_cls
defined = set()
for base in bases:
if hasattr(base, '_properties'):
property_keys = set(base._properties.keys())
duplicate_property_keys = defined & property_keys
for dupe_prop_name in duplicate_property_keys:
old_source = property_source[dupe_prop_name] = get_attr_source(
dupe_prop_name, property_source[dupe_prop_name])
new_source = get_attr_source(dupe_prop_name, base)
if old_source != new_source:
raise DuplicatePropertyError(
'Duplicate property, %s, is inherited from both %s and %s.' %
(dupe_prop_name, old_source.__name__, new_source.__name__))
property_keys -= duplicate_property_keys
if property_keys:
defined |= property_keys
property_source.update(dict.fromkeys(property_keys, base))
model_class._properties.update(base._properties)
for attr_name in dct.keys():
attr = dct[attr_name]
if isinstance(attr, Property):
#check_reserved_word(attr_name)
if attr_name in defined:
raise DuplicatePropertyError('Duplicate property: %s' % attr_name)
defined.add(attr_name)
model_class._properties[attr_name] = attr
attr.__property_config__(model_class, attr_name)
class Command(object):
"""Command is the superclass of all commands.
The programming model is to declare Python subclasses of the Model class,
declaring datastore properties as class members of that class. So if you want
to publish a story with title, body, and created date, you would do it like
this:
class LoginCommand(Command):
title = StringProperty()
created = db.DateTimeProperty()
"""
__metaclass__ = PropertiedClass
def __init__(self, javascript_toolkit='dojo'):
"""init the command"""
self.javascript_toolkit = javascript_toolkit
@classmethod
def properties(cls):
"""Returns a dictionary of all the properties defined for this model."""
return dict(cls._properties)
def to_js(self):
"""Generates dojo class"""
properties = ""
length = len(self.properties().values())
for index, prop in enumerate(self.properties().values()):
properties += '%s:%s' % (prop.name, prop.to_js())
if index != length - 1:
properties += ","
vars = {
'properties': self.properties().values(),
'class_name': "%s.%s" % (self.__class__.__module__, self.__class__.__name__),
'types': self.js_types()
}
path = os.path.join(os.path.dirname(__file__), TEMPLATES[self.javascript_toolkit])
return deps.template.render(path, vars)
def from_js(self, json):
"""Documentation"""
if isinstance(json, dict):
json_dict = json
else:
json_dict = deps.loads(json)
for key, prop in self.properties().items():
value = json_dict[key]
prop.from_js(value)
def to_json(self):
"""Documentation"""
properties = ""
length = len(self.properties().values())
for index, prop in enumerate(self.properties().values()):
properties += '%s:%s' % (prop.name, prop.to_js())
if index != length - 1:
properties += ","
return "{%s}" % properties
def js_types(self):
"""returns a set of closure_types if it is defined in the property
"""
types = set()
for key, prop in self.properties().items():
if hasattr(prop, 'closure_type'):
types.add(prop.closure_type)
return types
Result = Command
class StringProperty(Property):
data_type = basestring
def to_js(self):
"""Documentation"""
from tic.utils.simplejson.encoder import encode_basestring
if self.value is None:
self.value = ""
return encode_basestring(self.value)
def from_js(self, value):
"""Documentation"""
self.value = "" if value is None else value
class IntegerProperty(Property):
data_type = int
def to_js(self):
"""Documentation"""
from tic.utils.simplejson.encoder import encode_basestring
if self.value is None:
self.value = "undefined"
return self.value
def from_js(self, value):
"""Documentation"""
self.value = "undefined" if value is None else value
class LongProperty(IntegerProperty):
data_type = long
class DateTimeProperty(Property):
data_type = datetime.datetime
closure_type = 'goog.date.DateTime'
def to_js(self):
"""Documentation"""
return "null" if self.value is None else "new Date(%i)" % self._get_unix_epoch()
def from_js(self, value):
"""Documentation"""
v = None if value is None else datetime.datetime.fromtimestamp(value / 1000)
self.value = self.validate(v)
def _get_unix_epoch(self):
"""
returns the unix epoch in milliseconds
"""
from time import mktime
return int((mktime(self.value.timetuple()) + 1e-6 * self.value.microsecond) * 1000)
class ListProperty(Property):
data_type = list
def __init__(self, item_type):
"""Construct ListProperty.
Args:
item_type: Type for the list items; must be one of the allowed property
types.
verbose_name: Optional verbose name.
default: Optional default value; if omitted, an empty list is used.
**kwds: Optional additional keyword arguments, passed to base class.
Note that the only permissible value for 'required' is True.
"""
if item_type is str:
item_type = basestring
if not isinstance(item_type, type):
raise TypeError('Item type should be a type object')
if item_type not in _ALLOWED_PROPERTY_TYPES:
raise ValueError('Item type %s is not acceptable' % item_type.__name__)
self.item_type = item_type
super(ListProperty, self).__init__()
def to_js(self):
"""
generates js for now it only allows str
"""
return deps.dumps(self.value)
def from_js(self, value):
return self.validate(deps.loads(value))
def validate(self, value):
"""Validate list.
Returns:
A valid value.
Raises:
BadValueError if property is not a list whose items are instances of
the item_type given to the constructor.
"""
value = super(ListProperty, self).validate(value)
value = self.validate_list_contents(value)
return value
def validate_list_contents(self, value):
"""Validates that all items in the list are of the correct type.
Returns:
The validated list.
Raises:
BadValueError if the list has items are not instances of the
item_type given to the constructor.
"""
if self.item_type in (int, long):
item_type = (int, long)
else:
item_type = self.item_type
for item in value:
if not isinstance(item, item_type):
if item_type == (int, long):
raise BadValueError('Items in the %s list must all be integers.' %
self.name)
else:
raise BadValueError(
'Items in the %s list must all be %s instances' %
(self.name, self.item_type.__name__))
return value
class DictProperty(ListProperty):
data_type = dict
def __init__(self, item_type):
"""Construct DictProperty.
Args:
item_type: Type for the list items; must be one of the allowed property
types.
verbose_name: Optional verbose name.
default: Optional default value; if omitted, an empty list is used.
**kwds: Optional additional keyword arguments, passed to base class.
Note that the only permissible value for 'required' is True.
"""
super(DictProperty, self).__init__(item_type)
def validate(self, value):
"""Validate list.
Returns:
A valid value.
Raises:
BadValueError if property is not a list whose items are instances of
the item_type given to the constructor.
"""
value = super(DictProperty, self).validate(value)
value = self.validate_dict_contents(value)
return value
def validate_dict_contents(self, value):
"""Validates that all items in the list are of the correct type.
Returns:
The validated dict.
Raises:
BadValueError if the list has items are not instances of the
item_type given to the constructor.
"""
self.validate_list_contents(value.values())
return value
| |
from subprocess import call
import os
import json
import sys
BUILDER_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(BUILDER_PATH, '..')
FONTS_FOLDER_PATH = os.path.join(ROOT_PATH, 'fonts')
CSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'css')
SCSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'scss')
LESS_FOLDER_PATH = os.path.join(ROOT_PATH, 'less')
def main():
generate_font_files()
data = get_build_data()
rename_svg_glyph_names(data)
generate_scss(data)
generate_less(data)
generate_cheatsheet(data)
generate_component_json(data)
generate_composer_json(data)
generate_bower_json(data)
def generate_font_files():
print "Generate Fonts"
cmd = "fontforge -script %s/scripts/generate_font.py" % (BUILDER_PATH)
call(cmd, shell=True)
def rename_svg_glyph_names(data):
# hacky and slow (but safe) way to rename glyph-name attributes
svg_path = os.path.join(FONTS_FOLDER_PATH, 'ionicons.svg')
svg_file = open(svg_path, 'r+')
svg_text = svg_file.read()
svg_file.seek(0)
reload(sys)
sys.setdefaultencoding("utf8")
for ionicon in data['icons']:
# uniF2CA
org_name = 'uni%s' % (ionicon['code'].replace('0x', '').upper())
ion_name = 'ion-%s' % (ionicon['name'])
svg_text = svg_text.replace(org_name, ion_name)
svg_file.write(svg_text)
svg_file.close()
def generate_less(data):
print "Generate LESS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-variables.less')
icons_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-icons.less')
d = []
d.append('/*!');
d.append('Ionicons, v%s' % (font_version) );
d.append('Created by Ben Sperry for the Ionic Framework, http://ionicons.com/');
d.append('https://twitter.com/benjsperry https://twitter.com/ionicframework');
d.append('MIT License: https://github.com/driftyco/ionicons');
d.append('*/');
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('@ionicons-font-path: "../fonts";')
d.append('@ionicons-font-family: "%s";' % (font_name) )
d.append('@ionicons-version: "%s";' % (font_version) )
d.append('@ionicons-prefix: %s;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('@ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.@{ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' &:extend(.ion);')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.@{ionicons-prefix}%s:before { content: @ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
def generate_scss(data):
print "Generate SCSS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-variables.scss')
icons_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-icons.scss')
d = []
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('$ionicons-font-path: "../fonts" !default;')
d.append('$ionicons-font-family: "%s" !default;' % (font_name) )
d.append('$ionicons-version: "%s" !default;' % (font_version) )
d.append('$ionicons-prefix: %s !default;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('$ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.#{$ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' @extend .ion;')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.#{$ionicons-prefix}%s:before { content: $ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
generate_css_from_scss(data)
def generate_css_from_scss(data):
print "Generate CSS From SCSS"
scss_file_path = os.path.join(SCSS_FOLDER_PATH, 'ionicons.scss')
css_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.css')
css_min_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.min.css')
cmd = "sass %s %s --style compact" % (scss_file_path, css_file_path)
call(cmd, shell=True)
print "Generate Minified CSS From SCSS"
cmd = "sass %s %s --style compressed" % (scss_file_path, css_min_file_path)
call(cmd, shell=True)
def generate_cheatsheet(data):
print "Generate Cheatsheet"
cheatsheet_file_path = os.path.join(ROOT_PATH, 'cheatsheet.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'icon-row.html')
f = open(template_path, 'r')
template_html = f.read()
f.close()
f = open(icon_row_path, 'r')
icon_row_template = f.read()
f.close()
content = []
for ionicon in data['icons']:
css_code = ionicon['code'].replace('0x', '\\')
escaped_html_code = ionicon['code'].replace('0x', '&#x') + ';'
html_code = ionicon['code'].replace('0x', '&#x') + ';'
item_row = icon_row_template
item_row = item_row.replace('{{name}}', ionicon['name'])
item_row = item_row.replace('{{prefix}}', data['prefix'])
item_row = item_row.replace('{{css_code}}', css_code)
item_row = item_row.replace('{{escaped_html_code}}', escaped_html_code)
item_row = item_row.replace('{{html_code}}', html_code)
content.append(item_row)
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(len(data["icons"])) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = open(cheatsheet_file_path, 'w')
f.write(template_html)
f.close()
def generate_component_json(data):
print "Generate component.json"
d = {
"name": data['name'],
"repo": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"version": data['version'],
"keywords": [],
"dependencies": {},
"development": {},
"license": "MIT",
"styles": [
"css/%s.css" % (data['name'].lower())
],
"fonts": [
"fonts/%s.eot" % (data['name'].lower()),
"fonts/%s.svg" % (data['name'].lower()),
"fonts/%s.ttf" % (data['name'].lower()),
"fonts/%s.woff" % (data['name'].lower())
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
component_file_path = os.path.join(ROOT_PATH, 'component.json')
f = open(component_file_path, 'w')
f.write(txt)
f.close()
def generate_composer_json(data):
print "Generate composer.json"
d = {
"name": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"homepage": "http://ionicons.com/",
"authors": [
{
"name": "Ben Sperry",
"email": "ben@drifty.com",
"role": "Designer",
"homepage": "https://twitter.com/benjsperry"
},
{
"name": "Adam Bradley",
"email": "adam@drifty.com",
"role": "Developer",
"homepage": "https://twitter.com/adamdbradley"
},
{
"name": "Max Lynch",
"email": "max@drifty.com",
"role": "Developer",
"homepage": "https://twitter.com/maxlynch"
}
],
"extra": {},
"license": [ "MIT" ]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
composer_file_path = os.path.join(ROOT_PATH, 'composer.json')
f = open(composer_file_path, 'w')
f.write(txt)
f.close()
def generate_bower_json(data):
print "Generate bower.json"
d = {
"name": data['name'],
"version": data['version'],
"homepage": "https://github.com/driftyco/ionicons",
"authors": [
"Ben Sperry <ben@drifty.com>",
"Adam Bradley <adam@drifty.com>",
"Max Lynch <max@drifty.com>"
],
"description": "Ionicons - free and beautiful icons from the creators of Ionic Framework",
"main": [
"css/%s.css" % (data['name'].lower()),
"fonts/*"
],
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"license": "MIT",
"ignore": [
"**/.*",
"builder",
"node_modules",
"bower_components",
"test",
"tests"
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
bower_file_path = os.path.join(ROOT_PATH, 'bower.json')
f = open(bower_file_path, 'w')
f.write(txt)
f.close()
def get_build_data():
build_data_path = os.path.join(BUILDER_PATH, 'build_data.json')
f = open(build_data_path, 'r')
data = json.loads(f.read())
f.close()
return data
if __name__ == "__main__":
main()
| |
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Catalog service."""
import abc
import six
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
from keystone import config
from keystone import exception
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
def format_url(url, data):
"""Safely string formats a user-defined URL with the given data."""
try:
result = url.replace('$(', '%(') % data
except AttributeError:
return None
except KeyError as e:
LOG.error(_("Malformed endpoint %(url)s - unknown key %(keyerror)s"),
{"url": url,
"keyerror": e})
raise exception.MalformedEndpoint(endpoint=url)
except TypeError as e:
LOG.error(_("Malformed endpoint %(url)s - unknown key %(keyerror)s"
"(are you missing brackets ?)"),
{"url": url,
"keyerror": e})
raise exception.MalformedEndpoint(endpoint=url)
except ValueError as e:
LOG.error(_("Malformed endpoint %s - incomplete format "
"(are you missing a type notifier ?)"), url)
raise exception.MalformedEndpoint(endpoint=url)
return result
@dependency.provider('catalog_api')
class Manager(manager.Manager):
"""Default pivot point for the Catalog backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
def __init__(self):
super(Manager, self).__init__(CONF.catalog.driver)
def create_region(self, region_ref):
# Check duplicate ID
try:
self.get_region(region_ref['id'])
except exception.RegionNotFound:
pass
else:
msg = _('Duplicate ID, %s.') % region_ref['id']
raise exception.Conflict(type='region', details=msg)
try:
return self.driver.create_region(region_ref)
except exception.NotFound:
parent_region_id = region_ref.get('parent_region_id')
raise exception.RegionNotFound(region_id=parent_region_id)
def get_region(self, region_id):
try:
return self.driver.get_region(region_id)
except exception.NotFound:
raise exception.RegionNotFound(region_id=region_id)
def delete_region(self, region_id):
try:
return self.driver.delete_region(region_id)
except exception.NotFound:
raise exception.RegionNotFound(region_id=region_id)
def create_service(self, service_id, service_ref):
service_ref.setdefault('enabled', True)
return self.driver.create_service(service_id, service_ref)
def get_service(self, service_id):
try:
return self.driver.get_service(service_id)
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
def delete_service(self, service_id):
try:
return self.driver.delete_service(service_id)
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
@manager.response_truncated
def list_services(self, hints=None):
return self.driver.list_services(hints or driver_hints.Hints())
def create_endpoint(self, endpoint_id, endpoint_ref):
try:
return self.driver.create_endpoint(endpoint_id, endpoint_ref)
except exception.NotFound:
service_id = endpoint_ref.get('service_id')
raise exception.ServiceNotFound(service_id=service_id)
def delete_endpoint(self, endpoint_id):
try:
return self.driver.delete_endpoint(endpoint_id)
except exception.NotFound:
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
def get_endpoint(self, endpoint_id):
try:
return self.driver.get_endpoint(endpoint_id)
except exception.NotFound:
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
@manager.response_truncated
def list_endpoints(self, hints=None):
return self.driver.list_endpoints(hints or driver_hints.Hints())
def get_catalog(self, user_id, tenant_id, metadata=None):
try:
return self.driver.get_catalog(user_id, tenant_id, metadata)
except exception.NotFound:
raise exception.NotFound('Catalog not found for user and tenant')
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
"""Interface description for an Catalog driver."""
def _get_list_limit(self):
return CONF.catalog.list_limit or CONF.list_limit
@abc.abstractmethod
def create_region(self, region_ref):
"""Creates a new region.
:raises: keystone.exception.Conflict
:raises: keystone.exception.RegionNotFound (if parent region invalid)
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_regions(self):
"""List all regions.
:returns: list of region_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_region(self, region_id):
"""Get region by id.
:returns: region_ref dict
:raises: keystone.exception.RegionNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_region(self, region_id):
"""Update region by id.
:returns: region_ref dict
:raises: keystone.exception.RegionNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_region(self, region_id):
"""Deletes an existing region.
:raises: keystone.exception.RegionNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def create_service(self, service_id, service_ref):
"""Creates a new service.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_services(self):
"""List all services.
:returns: list of service_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_service(self, service_id):
"""Get service by id.
:returns: service_ref dict
:raises: keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_service(self, service_id):
"""Update service by id.
:returns: service_ref dict
:raises: keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_service(self, service_id):
"""Deletes an existing service.
:raises: keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def create_endpoint(self, endpoint_id, endpoint_ref):
"""Creates a new endpoint for a service.
:raises: keystone.exception.Conflict,
keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_endpoint(self, endpoint_id):
"""Get endpoint by id.
:returns: endpoint_ref dict
:raises: keystone.exception.EndpointNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_endpoints(self):
"""List all endpoints.
:returns: list of endpoint_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_endpoint(self, endpoint_id, endpoint_ref):
"""Get endpoint by id.
:returns: endpoint_ref dict
:raises: keystone.exception.EndpointNotFound
keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_endpoint(self, endpoint_id):
"""Deletes an endpoint for a service.
:raises: keystone.exception.EndpointNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_catalog(self, user_id, tenant_id, metadata=None):
"""Retrieve and format the current service catalog.
Example::
{ 'RegionOne':
{'compute': {
'adminURL': u'http://host:8774/v1.1/tenantid',
'internalURL': u'http://host:8774/v1.1/tenant_id',
'name': 'Compute Service',
'publicURL': u'http://host:8774/v1.1/tenantid'},
'ec2': {
'adminURL': 'http://host:8773/services/Admin',
'internalURL': 'http://host:8773/services/Cloud',
'name': 'EC2 Service',
'publicURL': 'http://host:8773/services/Cloud'}}
:returns: A nested dict representing the service catalog or an
empty dict.
:raises: keystone.exception.NotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_v3_catalog(self, user_id, tenant_id, metadata=None):
"""Retrieve and format the current V3 service catalog.
Example::
[
{
"endpoints": [
{
"interface": "public",
"id": "--endpoint-id--",
"region": "RegionOne",
"url": "http://external:8776/v1/--project-id--"
},
{
"interface": "internal",
"id": "--endpoint-id--",
"region": "RegionOne",
"url": "http://internal:8776/v1/--project-id--"
}],
"id": "--service-id--",
"type": "volume"
}]
:returns: A list representing the service catalog or an empty list
:raises: keystone.exception.NotFound
"""
raise exception.NotImplemented()
| |
"""
This module abstracts all the bytes<-->string conversions so that the python 2
and 3 code everywhere else is similar. This also has a few simple functions
that deal with the fact that bytes are different between the 2 versions even
when using from __future__ import unicode_literals. For example:
Python 2:
b'mybytes'[0] --> b'm' (type str)
Python 3:
b'mybytes'[0] --> 109 (type int)
So in some places we get an index of a bytes object and we have to make sure
the behaviour is the same in both versions of python so functions here take
care of that.
"""
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
try:
import cmp
except ImportError:
#No cmp function available, probably Python 3
def cmp(a, b):
return (a > b) - (a < b)
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
def string_to_bytestr(string):
"""
Convert a string to a bytes object. This encodes the object as well which
will typically change ord on each element & change the length (i.e. 1 char
could become 1/2/3 bytes)
"""
return string.encode('utf-8')
if sys.version_info >= (3,):
#some constants that are python2 only
unicode = str
long = int
range = range
unichr = chr
def b(s):
return s.encode("latin-1")
def u(s):
return s
def iteritems(d):
return d.items()
from io import BytesIO as SimIO
def string_to_bytes(text):
"""
Convert a string to a bytes object. This is a raw conversion
so that ord() on each element remains unchanged.
Input type: string
Output type: bytes
"""
return bytes([ord(c) for c in text])
def bytes_to_string(byte_array):
"""
Inverse of string_to_bytes.
"""
return u('').join([chr(b) for b in byte_array])
def string_to_bytestr(string):
"""
Convert a string to a bytes object. This encodes the object as well which
will typically change ord on each element & change the length (i.e. 1 char
could become 1/2/3 bytes)
"""
return string.encode('utf-8')
def bytestr_to_string(bytestr):
"""
Inverse of string_to_bytestr.
"""
return bytes([c for c in bytestr]).decode('utf-8')
def byte_chr(bytes_str):
"""
This converts a *single* input byte to a bytes object. Usually used in
conjuction with b'mybytes'[i]. See module description.
Input: 2: string/pseudo-bytes 3: int
Output: bytes
"""
return bytes([bytes_str])
def bytestr(val):
"""
Convert a *single* integer to a bytes object. Usually used like
bytestr(int).
Input: int
Output: bytes
"""
return bytes([val])
def bytes_as_num(val):
"""
Python 2:
b'mybytes'[0] --> b'm' (type str)
Python 3:
b'mybytes'[0] --> 109 (type int)
Given a number, returns it. For Python2, we use ord for the conversion.
"""
return val
def num_as_byte(val):
"""
Converts an element of a byte string to a byte, effectively an inverse of bytes_as_num
"""
return bytes([val])
else:
#some constants that are python2 only
range = xrange
unicode = unicode
long = long
unichr = unichr
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
def iteritems(d):
return d.iteritems()
try:
from cStringIO import StringIO as SimIO
except:
from StringIO import StringIO as SimIO
def string_to_bytes(text):
"""
See other implementation for notes
"""
return "".join([c for c in text])
def bytes_to_string(byte_array):
"""
See other implementation for notes
"""
return ''.join([b for b in byte_array])
def bytestr_to_string(bytestr):
"""
See other implementation for notes
"""
return unicode(bytestr, 'utf-8')
def byte_chr(bytes_str):
"""
See other implementation for notes
"""
return bytes_str
def bytestr(val):
"""
See other implementation for notes
"""
return chr(val)
def bytes_as_num(val):
"""
Python 2:
b'mybytes'[0] --> b'm' (type str)
Python 3:
b'mybytes'[0] --> 109 (type int)
Given a number, returns it. For Python2, we use ord for the conversion.
"""
return ord(val)
def num_as_byte(val):
"""
Converts an element of a byte string to a byte, effectively an inverse of bytes_as_num
"""
return val
if sys.version_info >= (2, 6):
bytes = bytes
else:
bytes = str
if sys.version_info >= (3, 0):
import builtins
print_ = getattr(builtins, 'print', None)
elif sys.version_info >= (2, 6):
import __builtin__
print_ = getattr(__builtin__, 'print', None)
else:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import logging
import os
import datetime
import pdb
from libs.config_libs import *
from libs.verify_libs import *
from libs.utils_libs import *
def main():
#Run the Testcases:
test = test_gbp_nsp_func()
if test.test_gbp_nsp_func_1()==0:
test.cleanup(tc_name='TESTCASE_GBP_NSP_FUNC_1')
if test.test_gbp_nsp_func_2()==0:
test.cleanup(tc_name='TESTCASE_GBP_NSP_FUNC_2')
if test.test_gbp_nsp_func_3()==0:
test.cleanup(tc_name='TESTCASE_GBP_NSP_FUNC_3')
test.cleanup()
report_results('test_gbp_nsp_func','test_results.txt')
sys.exit(1)
class test_gbp_nsp_func(object):
# Initialize logging
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s - %(message)s', level=logging.WARNING)
_log = logging.getLogger( __name__ )
cmd = 'rm /tmp/test_gbp_nsp_func.log'
getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_nsp_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
_log.addHandler(hdlr)
_log.setLevel(logging.INFO)
_log.setLevel(logging.DEBUG)
def __init__(self):
"""
Init def
"""
self._log.info("\n## START OF GBP NETWORK_SERVICE_POLICY FUNCTIONALITY TESTSUITE\n")
self.gbpcfg = Gbp_Config()
self.gbpverify = Gbp_Verify()
self.nsp_name = 'demo_nsp'
def cleanup(self,tc_name=''):
if tc_name !='':
self._log.info('Testcase %s: FAILED' %(tc_name))
for obj in ['group','nsp']:
self.gbpcfg.gbp_del_all_anyobj(obj)
def test_gbp_nsp_func_1(self):
self._log.info("\n###############################################################\n"
"TESTCASE_GBP_NSP_FUNC_1: TO CREATE/REFER/DELETE/VERIFY NTK-SVC-POLICY in PTG\n"
"TEST_STEPS::\n"
"Create two NSPs one with type:ip-pool & ip-single, value:self_subnet and self_subnet\n"
"Verify the attributes & values\n"
"Create two PTGs and reference each one of the above NSP in one of the PTG\n"
"Verify the NSP reference in the PTGs\n"
"Delete the PTG and the NSP\n"
"Verify that NSP got deleted\n"
"###############################################################\n")
###### Testcase work-flow starts
## Create and Verify NSPolicy with type=ip_single & ip-single, name:self_subnet & self_subnet
self._log.info('\n## Step 1: Create NSPolicy with type=ip_single & name:self_subnet ##\n')
nsp1_uuid = self.gbpcfg.gbp_policy_cfg_all(1,'nsp','demo_nsp_1',network_service_params="type=ip_single,name=vip_ip1,value=self_subnet")
if nsp1_uuid == 0:
self._log.info("\n## Step 1A: Create NSPolicy with type=ip_single & name:self_subnet == Failed")
return 0
nsp2_uuid = self.gbpcfg.gbp_policy_cfg_all(1,'nsp','demo_nsp_2',network_service_params="type=ip_single,name=vip_ip2,value=self_subnet")
if nsp2_uuid == 0:
self._log.info("\n## Step 1B: Create NSPolicy with type=ip_single & name:self_subnet == Failed")
return 0
## Verify
self._log.info("\n## Step 2: Verify NSPolicies are successfully created")
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp1_uuid,name='demo_nsp_1',\
network_service_params='{"type": "ip_single", "name": "vip_ip1", "value": "self_subnet"}')==0:
self._log.info("\n## Step 2A: Verify NSPolicy demo_nsp_1 with valued attributes, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp2_uuid,name='demo_nsp_2',\
network_service_params='{"type": "ip_single", "name": "vip_ip2", "value": "self_subnet"}')==0:
self._log.info("\n## Step 2A: Verify NSPolicy demo_nsp_2 with valued attributes, Failed")
return 0
## Create two PTGs, each referencing one of the two NSPs
self._log.info("\n## Step 3: Create and Verify two PTGs each referencing one of the two NSPs")
uuid = self.gbpcfg.gbp_policy_cfg_all(1,'group','demo_ptg_1',network_service_policy=nsp1_uuid)
if uuid == 0:
self._log.info("\n## Step 3A: Create PTG using NSP demo_nsp_1,Failed")
return 0
else:
ptg1_uuid = uuid[0]
_uuid = self.gbpcfg.gbp_policy_cfg_all(1,'group','demo_ptg_2',network_service_policy=nsp2_uuid)
if _uuid == 0:
self._log.info("\n## Step 3B: Create PTG using NSP demo_nsp_2,Failed")
return 0
else:
ptg2_uuid = _uuid[0]
## Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp1_uuid,policy_target_groups=ptg1_uuid) == 0:
self._log.info("\n## Step 3C: Verify PTG demo_ptg_1 seen in NSP demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp2_uuid,policy_target_groups=ptg2_uuid) == 0:
self._log.info("\n## Step 3C: Verify PTG demo_ptg_2 seen in NSP demo_nsp_2, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(1,'group',ptg1_uuid,network_service_policy_id=nsp1_uuid) == 0:
self._log.info("\n## Step 3D: Verify PTG demo_ptg_1 references NSP demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(1,'group',ptg2_uuid,network_service_policy_id=nsp2_uuid) == 0:
self._log.info("\n## Step 3D: Verify PTG demo_ptg_2 references NSP demo_nsp_2, Failed")
return 0
## Delete PTGs & NSPs
self._log.info("\n## Step 4: Delete and Verify two PTGs each referencing one of the two NSPs")
ptg_list = [ptg1_uuid,ptg2_uuid]
nsp_list = [nsp1_uuid,nsp2_uuid]
for i in range(len(ptg_list)):
if self.gbpcfg.gbp_policy_cfg_all(0,'group',ptg_list[i]) == 0:
self._log.info("\n## Step 4A: Deletion of PTG %s, Failed" %(ptg_list[i]))
return 0
if self.gbpcfg.gbp_policy_cfg_all(0,'nsp',nsp_list[i]) == 0:
self._log.info("\n## Step 4B: Deletion of NSP %s, Failed" %(nsp_list[i]))
return 0
##Verify
for n in range(len(nsp_list)):
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp_list[n]) != 0:
self._log.info("\n## Step 4C: Verify deletion of NSP, Failed")
return 0
self._log.info("\n## TESTCASE_GBP_NSP_FUNC_1: PASSED")
return 1
def test_gbp_nsp_func_2(self):
self._log.info("\n###############################################################\n"
"TESTCASE_GBP_NSP_FUNC_2: TO CREATE/UPDATE/DELETE/VERIFY a PTG with NTK-SVC-POLICY with MULTIPLE PTGs\n"
"TEST_STEPS::\n"
"Create two NSPolicy Object with non-default params\n"
"Create PTG using one of the two NSPs\n"
"Verify the PTG and NSP are reflecting in each other in the DB\n"
"Update the PTG to use the second NSP\n"
"Verify the PTG and NSP are reflecting in each other in the DB\n"
"Update/Revert the PTG so that it refers to the initial NSP\n"
"Delete all PTG, NSP\n"
"Verify that PTG and NSPs got deleted\n"
"###############################################################\n")
###### Testcase work-flow starts
## Create NSPolicy with non-default attrs
self._log.info('\n## Step 1: Create two NSPolicy ##\n')
nsp1_uuid = self.gbpcfg.gbp_policy_cfg_all(1,'nsp','demo_nsp_1',network_service_params="type=ip_single,name=vip_ip1,value=self_subnet")
if nsp1_uuid == 0:
self._log.info("\n## Step 1A: Create NSPolicy with type=ip_single & name:self_subnet == Failed")
return 0
nsp2_uuid = self.gbpcfg.gbp_policy_cfg_all(1,'nsp','demo_nsp_2',network_service_params="type=ip_single,name=vip_ip2,value=self_subnet")
if nsp2_uuid == 0:
self._log.info("\n## Step 1B: Create NSPolicy with type=ip_single & name:self_subnet == Failed")
return 0
## Create PTG, referencing one of the two NSPs
self._log.info("\n## Step 3: Create and Verify PTG referencing one of the two NSPs")
uuid = self.gbpcfg.gbp_policy_cfg_all(1,'group','demo_ptg_1',network_service_policy=nsp1_uuid)
if uuid == 0:
self._log.info("\n## Step 3A: Create PTG using NSP demo_nsp_1,Failed")
return 0
else:
ptg1_uuid = uuid[0]
## Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp1_uuid,policy_target_groups=ptg1_uuid) == 0:
self._log.info("\n## Step 3B: Verify PTG demo_ptg_1 seen in NSP demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(1,'group',ptg1_uuid,network_service_policy_id=nsp1_uuid) == 0:
self._log.info("\n## Step 3C: Verify PTG demo_ptg_1 references NSP demo_nsp_1, Failed")
return 0
self._log.info("\n## Step 4: Update and Verify the PTG with the second NSP")
## Update the PTG with second NSP and Verify
if self.gbpcfg.gbp_policy_cfg_all(2,'group',ptg1_uuid,network_service_policy=nsp2_uuid) == 0:
self._log.info("\n## Step 4A: Updating NSP attribute of PTG, Failed")
return 0
## Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp1_uuid,policy_target_groups=ptg1_uuid) != 0:
self._log.info("\n## Step 4B: Verify PTG demo_ptg_1 is NOT seen in NSP demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp2_uuid,policy_target_groups=ptg1_uuid) == 0:
self._log.info("\n## Step 4C: Verify PTG demo_ptg_1 is seen in NSP demo_nsp_2, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(1,'group',ptg1_uuid,network_service_policy_id=nsp2_uuid) == 0:
self._log.info("\n## Step 4D: Verify PTG demo_ptg_1 references NSP demo_nsp_2, Failed")
return 0
self._log.info("\n## Step 5: Update/Revert the NSP attr of PTG and Verify")
## Update the PTG by reverting the NSP to its initial one
if self.gbpcfg.gbp_policy_cfg_all(2,'group',ptg1_uuid,network_service_policy=nsp1_uuid) == 0:
self._log.info("\n## Step 5A: Reverting the NSP attribute of PTG by update action, Failed")
return 0
## Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp2_uuid,policy_target_groups=ptg1_uuid) != 0:
self._log.info("\n## Step 5B: Verify PTG demo_ptg_1 is NOT seen in NSP demo_nsp_2, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp1_uuid,policy_target_groups=ptg1_uuid) == 0:
self._log.info("\n## Step 5C: Verify PTG demo_ptg_1 is seen in NSP demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(1,'group',ptg1_uuid,network_service_policy_id=nsp1_uuid) == 0:
self._log.info("\n## Step 5D: Verify PTG demo_ptg_1 references NSP demo_nsp_1, Failed")
return 0
self._log.info("\n## Step 6: Delete and Verify two PTGs each referencing one of the two NSPs")
## Delete PTG & NSP
if self.gbpcfg.gbp_policy_cfg_all(0,'group',ptg1_uuid) == 0:
self._log.info("\n## Step 6A: Deletion of PTG,Failed")
return 0
nsp_list = [nsp1_uuid,nsp2_uuid]
for i in range(len(nsp_list)):
if self.gbpcfg.gbp_policy_cfg_all(0,'nsp',nsp_list[i]) == 0:
self._log.info("\n## Step 6B: Deletion of NSP %s, Failed" %(nsp_list[i]))
return 0
##Verify
for n in range(len(nsp_list)):
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp_list[n]) != 0:
self._log.info("\n## Step 6C: Verify deletion of NSP, Failed")
return 0
self._log.info("\n## TESTCASE_GBP_NSP_FUNC_2: PASSED")
return 1
def test_gbp_nsp_func_3(self):
self._log.info("\n###############################################################\n"
"TESTCASE_GBP_NSP_FUNC_3: TO CREATE/DELETE/VERIFY NTK-SVC-POLICY while REFERENCED IN PTG\n"
"TEST_STEPS::\n"
"Create NSPolicy Object with non-default params\n"
"Create PTG referencing the NSP\n"
"Verify the PTG and NSP are reflecting in each other in the DB\n"
"Delete and Verify the deletion of referenced NSP fails\n"
"Delete PTG & NSP, Verify that PTG and NSPs got deleted\n"
"###############################################################\n")
###### Testcase work-flow starts
## Create NSPolicy with non-default attrs
self._log.info('\n## Step 1: Create NSPolicy with non-default params ##\n')
nsp1_uuid = self.gbpcfg.gbp_policy_cfg_all(1,'nsp','demo_nsp_1',network_service_params="type=ip_single,name=vip_ip1,value=self_subnet")
if nsp1_uuid == 0:
self._log.info("\n## Step 1A: Create NSPolicy with type=ip_single & name:self_subnet == Failed")
return 0
## Create PTG, referencing one of the two NSPs
self._log.info("\n## Step 2: Create and Verify PTG referencing the NSP")
uuid = self.gbpcfg.gbp_policy_cfg_all(1,'group','demo_ptg_1',network_service_policy=nsp1_uuid)
if uuid == 0:
self._log.info("\n## Step 2A: Create PTG using NSP demo_nsp_1,Failed")
return 0
else:
ptg1_uuid = uuid[0]
## Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp1_uuid,policy_target_groups=ptg1_uuid) == 0:
self._log.info("\n## Step 2B: Verify PTG demo_ptg_1 seen in NSP demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(1,'group',ptg1_uuid,network_service_policy_id=nsp1_uuid) == 0:
self._log.info("\n## Step 2C: Verify PTG demo_ptg_1 references NSP demo_nsp_1, Failed")
return 0
## Delete the referenced NSP
self._log.info("\n## Step 3: Delete the NSP while it is still referenced in a PTG")
if self.gbpcfg.gbp_policy_cfg_all(0,'nsp',nsp1_uuid) != 0:
self._log.info("\n## Step 3A: Deletion of Referenced NSP DID NOT fail")
return 0
## Delete PTG & NSP
self._log.info("\n## Step 4: Delete PTG followed by NSP and Verify")
if self.gbpcfg.gbp_policy_cfg_all(0,'group',ptg1_uuid) == 0:
self._log.info("\n## Step 4A: Deletion of PTG,Failed")
return 0
if self.gbpcfg.gbp_policy_cfg_all(0,'nsp',nsp1_uuid) == 0:
self._log.info("\n## Step 4B: Deletion of NSP,Failed")
return 0
##Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1,'nsp',nsp1_uuid) != 0:
self._log.info("\n## Step 4C: Verify deletion of NSP, Failed")
return 0
self._log.info("\n## TESTCASE_GBP_NSP_FUNC_3: PASSED")
return 1
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.