gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts Contrack text format into jsonfiles format for experiments."""
import json
import os
import re
from typing import List, Text
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from contrack import encoding
flags.DEFINE_string('input_file', '/tmp/input.txt', 'input file path')
flags.DEFINE_string('output_dir', '/tmp/output', 'output directory path')
FLAGS = flags.FLAGS
ENVIRONMENT = 'env'
GENDER_FLAGS = {'f': 'female', 'm': 'male', 'n': 'neuter', 'u': 'unknown'}
def _parse_enrefs(encodings, entities,
utterance, sender,
declarations):
"""Parses the enref declarations."""
enrefs = []
participants = entities[:2]
for decl in declarations:
if not decl:
continue
is_new = False
if decl[-1] != ']':
raise Exception('Missing bracket in enref declaration %s' % decl)
decl = decl[:-1]
elements = decl.split(' ')
if len(elements) != 3:
raise Exception('Invalid enref declaration %s' % decl)
entity_name = elements[0]
domain = 'people'
if entity_name.startswith('person:') or entity_name.startswith('p:'):
domain = 'people'
entity_name = re.sub(r'^.*?:', '', entity_name)
if entity_name.startswith('location:') or entity_name.startswith('l:'):
domain = 'locations'
entity_name = re.sub(r'^.*?:', '', entity_name)
if entity_name not in entities:
entities.append(entity_name)
is_new = True
span = [int(k.strip()) for k in elements[2].split('-')]
if len(span) != 2:
raise Exception('Invalid span in enref declaration %s' % decl)
span_words = utterance.split(' ')[span[0]:(span[1] + 1)]
span_text = ' '.join(span_words)
enref = encodings.new_enref_encoding()
enref.populate(entity_name, (span[0], span[1] + 1), span_text)
enref.enref_meta.set_is_enref(True)
enref.enref_meta.set_is_new(is_new)
enref.enref_meta.set_is_new_continued(False)
enref.enref_id.set(entities.index(entity_name))
enref.enref_properties.set_domain(domain)
if elements[1].startswith('g'):
members_decl = re.search(r'\((.*?)\)', elements[1])
if members_decl is None:
raise Exception('Cannot parse group declaration: %s' % elements[1])
members = members_decl.group(1).split(':')
if members == ['']:
members = []
member_ids = [entities.index(m) for m in members]
enref.enref_properties.set_is_group(True)
enref.enref_membership.set(member_ids, members)
else:
enref.enref_properties.set_is_group(False)
if domain == 'people':
gender = GENDER_FLAGS[elements[1][0]]
enref.enref_properties.set_gender(gender)
is_sender = entity_name == sender
is_recipient = not is_sender and entity_name in participants
enref.enref_context.set_is_sender(is_sender)
enref.enref_context.set_is_recipient(is_recipient)
enref.enref_context.set_message_offset(0)
enref.signals.set([])
logging.info('enref: %s', str(enref))
enrefs.append(enref)
return enrefs
def convert(input_path, output_path):
"""Converts a file with conversations into a jsonfiles file."""
encodings = encoding.Encodings()
logging.info('Converting data from %s', input_path)
input_file_name = os.path.basename(input_path)
input_file_name = os.path.splitext(input_file_name)[0]
entities = []
conversations = []
conversation = {}
conversation_id = 0
word_count = 0
scenario_id = None
with tf.io.gfile.GFile(input_path, 'r') as input_file:
for line in input_file:
if not line.strip() and conversation:
conversation['entities'] = list(entities)
conversations.append(conversation)
entities = []
conversation = {}
scenario_id = None
conversation_id += 1
word_count = 0
continue
logging.info('read line %s', line)
# Extract line sections
sections = line.strip().split('|')
sender = sections[0].strip()
utterance = sections[1].strip()
enrefs_section = sections[2].strip()
if sender.startswith('conv:'):
scenario_id = sender[5:]
sender = ENVIRONMENT
conversation = {'conv_id': conversation_id,
'scenario_id': scenario_id,
'turns': []}
# Parse (enrefs)
enref_decls = enrefs_section.split('[')
enrefs = _parse_enrefs(encodings, entities, utterance, sender,
enref_decls)
for enref in enrefs:
enref.word_span = (enref.word_span[0] + word_count,
enref.word_span[1] - 1 + word_count)
# Parse words in utterance
words = utterance.lower().split(' ')
logging.info(words)
if sender != ENVIRONMENT:
turn = {'sender': sender, 'words': words, 'enrefs': enrefs}
word_count += len(words)
conversation['turns'].append(turn)
# Create output directory
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
output_file_name = (
os.path.splitext(os.path.basename(input_path))[0] + '.jsonlines')
data_file_path = os.path.join(output_path, output_file_name)
logging.info('Writing to %s', data_file_path)
with tf.io.gfile.GFile(data_file_path, 'w') as output_file:
for conversation in conversations:
jsonline = {
'doc_key': 'tc/' + conversation['scenario_id'],
'sentences': [],
'speakers': []
}
enrefs = []
for turn in conversation['turns']:
jsonline['sentences'].append(turn['words'])
jsonline['speakers'].append([turn['sender']] * len(turn['words']))
enrefs += turn['enrefs']
clusters = []
logging.info(enrefs)
for e_id, _ in enumerate(conversation['entities']):
cluster = []
for e in enrefs:
if e.enref_id.get() == e_id and e.enref_properties.is_group() <= 0:
cluster.append(list(e.word_span))
if cluster:
clusters.append(cluster)
jsonline['clusters'] = clusters
output_file.write(json.dumps(jsonline) + '\n')
def main(argv):
del argv
convert(FLAGS.input_file, FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
|
|
# Function to scan for pseudolandmarks along the x-axis
import cv2
import os
import numpy as np
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv import fatal_error
def x_axis_pseudolandmarks(img, obj, mask, label="default"):
"""Divide up object contour into 20 equidistance segments and generate landmarks for each
Inputs:
img = This is a copy of the original plant image generated using np.copy if debug is true it will be drawn on
obj = a contour of the plant object (this should be output from the object_composition.py fxn)
mask = this is a binary image. The object should be white and the background should be black
label = optional label parameter, modifies the variable name of observations recorded
Returns:
top = List of landmark points within 'top' portion
bottom = List of landmark points within the 'bottom' portion
center_v = List of landmark points within the middle portion
:param img: numpy.ndarray
:param obj: list
:param mask: numpy.ndarray
:param label: str
:return top: list
:return bottom: list
:return center_v: list
"""
# Lets get some landmarks scanning along the x-axis
params.device += 1
if not np.any(obj):
return ('NA', 'NA'), ('NA', 'NA'), ('NA', 'NA')
x, y, width, height = cv2.boundingRect(obj)
extent = width
# Outputs
top = []
bottom = []
center_v = []
top_list = []
bottom_list = []
center_v_list = []
# If width is greater than 21 pixels make 20 increments (5% intervals)
if extent >= 21:
inc = int(extent / 21)
# Define variable for max points and min points
pts_max = []
pts_min = []
# Get max and min points for each of the intervals
for i in range(1, 21):
if i == 1:
pt_max = x + (inc * i)
pt_min = x
else:
pt_max = x + (inc * i)
pt_min = x + (inc * (i - 1))
# Put these in an array
pts_max.append(pt_max)
pts_min.append(pt_min)
# Combine max and min into a set of tuples
point_range = list(zip(pts_min, pts_max))
# define some list variables to fill
col_median = []
col_ave = []
max_height = []
top_points = []
bottom_points = []
x_vals = []
x_centroids = []
y_centroids = []
# For each of the 20 intervals
for pt in point_range:
# Get the left and right bounds
left_point, right_point = pt
# Get all cols within these two points
cols = []
ups = []
bps = []
# Get a continuous list of the values between the left and the right of the interval save as vals
vals = list(range(left_point, right_point))
# For each col... get all coordinates from object contour that match col
for v in vals:
# Value is all entries that match the col
value = obj[v == obj[:, 0, 0]]
if len(value) > 0:
# Could potentially be more than two points in all contour in each pixel row
# Grab largest y coordinate (row)
largest = value[:, 0, 1].max()
# Grab smallest y coordinate (row)
smallest = value[:, 0, 1].min()
# Take the difference between the two (this is how far across the object is on this plane)
col_width = largest - smallest
# Append this value to a list
cols.append(col_width)
ups.append(smallest)
bps.append(largest)
if len(value) == 0:
col_width = 1
cols.append(col_width)
ups.append(1)
bps.append(1)
# For each of the points find the median and average width
col_median.append(np.median(np.array(cols)))
col_ave.append(np.mean(np.array(cols)))
max_height.append(np.max(np.array(cols)))
top_points.append(np.mean(smallest))
bottom_points.append(np.mean(largest))
xval = int((left_point + right_point) / 2)
x_vals.append(xval)
# Make a copy of the mask; we want to get landmark points from this
window = np.copy(mask)
window[:, :left_point] = 0
window[:, right_point:] = 0
s = cv2.moments(window)
# Centroid (center of mass x, center of mass y)
if largest - smallest > 3:
if s['m00'] > 0.001:
smx, smy = (s['m10'] / s['m00'], s['m01'] / s['m00'])
x_centroids.append(int(smx))
y_centroids.append(int(smy))
if s['m00'] < 0.001:
smx, smy = (s['m10'] / 0.001, s['m01'] / 0.001)
x_centroids.append(int(smx))
y_centroids.append(int(smy))
else:
smx = (largest + smallest) / 2
smy = xval
x_centroids.append(int(smx))
y_centroids.append(int(smy))
# Get the indicie of the largest median/average y-axis value (if there is a tie it takes largest index)
# indice_median = col_median.index(max(col_median))
# indice_ave = col_ave.index(max(col_ave))
# median_value = col_median[indice_median]
# ave_value = col_ave[indice_ave]
# max_value = max_width[indice_ave]
top = list(zip(x_vals, top_points))
top = np.array(top)
top.shape = (20, 1, 2)
bottom = list(zip(x_vals, bottom_points))
bottom = np.array(bottom)
bottom.shape = (20, 1, 2)
center_v = list(zip(x_centroids, y_centroids))
center_v = np.array(center_v)
center_v.shape = (20, 1, 2)
img2 = np.copy(img)
for i in top:
x = i[0, 0]
y = i[0, 1]
cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 0), -1)
for i in bottom:
x = i[0, 0]
y = i[0, 1]
cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 255), -1)
for i in center_v:
x = i[0, 0]
y = i[0, 1]
cv2.circle(img2, (int(x), int(y)), params.line_thickness, (0, 79, 255), -1)
if params.debug == 'plot':
plot_image(img2)
elif params.debug == 'print':
print_image(img2,
os.path.join(params.debug_outdir, (str(params.device) + '_x_axis_pseudolandmarks.png')))
elif extent < 21:
# If the width of the object is less than 20 pixels just make the object a 20 pixel rectangle
x, y, width, height = cv2.boundingRect(obj)
x_coords = list(range(x, x + 20))
u_points = [y] * 20
top = list(zip(x_coords, u_points))
top = np.array(top)
top.shape = (20, 1, 2)
b_points = [y + width] * 20
bottom = list(zip(x_coords, b_points))
bottom = np.array(bottom)
bottom.shape = (20, 1, 2)
m = cv2.moments(mask, binaryImage=True)
if m['m00'] == 0:
fatal_error('Check input parameters, first moment=0')
else:
# Centroid (center of mass x, center of mass y)
cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
c_points = [cmy] * 20
center_v = list(zip(x_coords, c_points))
center_v = np.array(center_v)
center_v.shape = (20, 1, 2)
img2 = np.copy(img)
for i in top:
x = i[0, 0]
y = i[0, 1]
cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 0), -1)
for i in bottom:
x = i[0, 0]
y = i[0, 1]
cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 255), -1)
for i in center_v:
x = i[0, 0]
y = i[0, 1]
cv2.circle(img2, (int(x), int(y)), params.line_thickness, (0, 79, 255), -1)
if params.debug == 'plot':
plot_image(img2)
elif params.debug == 'print':
print_image(img2, os.path.join(params.debug_outdir, (str(params.device) + '_x_axis_pseudolandmarks.png')))
# Store into global measurements
for pt in top:
top_list.append(pt[0].tolist())
for pt in bottom:
bottom_list.append(pt[0].tolist())
for pt in center_v:
center_v_list.append(pt[0].tolist())
outputs.add_observation(sample=label, variable='top_lmk', trait='top landmark coordinates',
method='plantcv.plantcv.x_axis_pseudolandmarks', scale='none', datatype=tuple,
value=tuple(top_list), label='none')
outputs.add_observation(sample=label, variable='bottom_lmk', trait='bottom landmark coordinates',
method='plantcv.plantcv.x_axis_pseudolandmarks', scale='none', datatype=tuple,
value=tuple(bottom_list), label='none')
outputs.add_observation(sample=label, variable='center_v_lmk', trait='center vertical landmark coordinates',
method='plantcv.plantcv.x_axis_pseudolandmarks', scale='none', datatype=tuple,
value=tuple(center_v_list), label='none')
return top, bottom, center_v
|
|
# Copyright (c) 2015 Thales Services SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from concurrent import futures
import contextlib
import functools
import os
import random
import re
import select
import shlex
import signal
import subprocess
import fixtures
import netaddr
from neutron_lib import constants as n_const
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from neutron.agent.common import config
from neutron.agent.common import ovs_lib
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.db import db_base_plugin_common
from neutron.plugins.ml2.drivers.linuxbridge.agent import \
linuxbridge_neutron_agent as linuxbridge_agent
from neutron.tests import base as tests_base
from neutron.tests.common import base as common_base
from neutron.tests import tools
UNDEFINED = object()
NS_PREFIX = 'test-'
BR_PREFIX = 'test-br'
PORT_PREFIX = 'port'
VETH0_PREFIX = 'test-veth0'
VETH1_PREFIX = 'test-veth1'
PATCH_PREFIX = 'patch'
MACVTAP_PREFIX = 'macvtap'
# port name should be shorter than DEVICE_NAME_MAX_LEN because if this
# port is used to provide vlan connection between two linuxbridge
# agents then place for vlan ID is also required, Vlan ID can take max 4 digits
# and there is also additional "." in device name so it will in overall gives
# DEVICE_NAME_MAX_LEN = 15 chars
LB_DEVICE_NAME_MAX_LEN = 10
SS_SOURCE_PORT_PATTERN = re.compile(
r'^.*\s+\d+\s+.*:(?P<port>\d+)\s+[^\s]+:.*')
READ_TIMEOUT = os.environ.get('OS_TEST_READ_TIMEOUT', 5)
CHILD_PROCESS_TIMEOUT = os.environ.get('OS_TEST_CHILD_PROCESS_TIMEOUT', 20)
CHILD_PROCESS_SLEEP = os.environ.get('OS_TEST_CHILD_PROCESS_SLEEP', 0.5)
TRANSPORT_PROTOCOLS = (n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP)
def increment_ip_cidr(ip_cidr, offset=1):
"""Increment ip_cidr offset times.
example: increment_ip_cidr("1.2.3.4/24", 2) ==> "1.2.3.6/24"
"""
net0 = netaddr.IPNetwork(ip_cidr)
net = netaddr.IPNetwork(ip_cidr)
net.value += offset
if not net0.network < net.ip < net0[-1]:
tools.fail(
'Incorrect ip_cidr,offset tuple (%s,%s): "incremented" ip_cidr is '
'outside ip_cidr' % (ip_cidr, offset))
return str(net)
def set_namespace_gateway(port_dev, gateway_ip):
"""Set gateway for the namespace associated to the port."""
if not port_dev.namespace:
tools.fail('tests should not change test machine gateway')
port_dev.route.add_gateway(gateway_ip)
def assert_ping(src_namespace, dst_ip, timeout=1, count=1):
ipversion = netaddr.IPAddress(dst_ip).version
ping_command = 'ping' if ipversion == 4 else 'ping6'
ns_ip_wrapper = ip_lib.IPWrapper(src_namespace)
ns_ip_wrapper.netns.execute([ping_command, '-c', count, '-W', timeout,
dst_ip])
@contextlib.contextmanager
def async_ping(namespace, ips):
with futures.ThreadPoolExecutor(max_workers=len(ips)) as executor:
fs = [executor.submit(assert_ping, namespace, ip, count=10)
for ip in ips]
yield lambda: all(f.done() for f in fs)
futures.wait(fs)
for f in fs:
f.result()
def assert_no_ping(src_namespace, dst_ip, timeout=1, count=1):
try:
assert_ping(src_namespace, dst_ip, timeout, count)
except RuntimeError:
pass
else:
tools.fail("destination ip %(destination)s is replying to ping from "
"namespace %(ns)s, but it shouldn't" %
{'ns': src_namespace, 'destination': dst_ip})
def assert_arping(src_namespace, dst_ip, source=None, timeout=1, count=1):
"""Send arp request using arping executable.
NOTE: ARP protocol is used in IPv4 only. IPv6 uses Neighbour Discovery
Protocol instead.
"""
ns_ip_wrapper = ip_lib.IPWrapper(src_namespace)
arping_cmd = ['arping', '-c', count, '-w', timeout]
if source:
arping_cmd.extend(['-s', source])
arping_cmd.append(dst_ip)
ns_ip_wrapper.netns.execute(arping_cmd)
def assert_no_arping(src_namespace, dst_ip, source=None, timeout=1, count=1):
try:
assert_arping(src_namespace, dst_ip, source, timeout, count)
except RuntimeError:
pass
else:
tools.fail("destination ip %(destination)s is replying to arp from "
"namespace %(ns)s, but it shouldn't" %
{'ns': src_namespace, 'destination': dst_ip})
def _get_source_ports_from_ss_output(output):
ports = set()
for line in output.splitlines():
match = SS_SOURCE_PORT_PATTERN.match(line)
if match:
ports.add(int(match.group('port')))
return ports
def get_unused_port(used, start=1024, end=65535):
candidates = set(range(start, end + 1))
return random.choice(list(candidates - used))
def get_free_namespace_port(protocol, namespace=None):
"""Return an unused port from given namespace
WARNING: This function returns a port that is free at the execution time of
this function. If this port is used later for binding then there
is a potential danger that port will be no longer free. It's up to
the programmer to handle error if port is already in use.
:param protocol: Return free port for given protocol. Supported protocols
are 'tcp' and 'udp'.
"""
if protocol == n_const.PROTO_NAME_TCP:
param = '-tna'
elif protocol == n_const.PROTO_NAME_UDP:
param = '-una'
else:
raise ValueError("Unsupported procotol %s" % protocol)
ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
output = ip_wrapper.netns.execute(['ss', param])
used_ports = _get_source_ports_from_ss_output(output)
return get_unused_port(used_ports)
def create_patch_ports(source, destination):
"""Hook up two OVS bridges.
The result is two patch ports, each end connected to a bridge.
The two patch port names will start with 'patch-', followed by identical
four characters. For example patch-xyzw-fedora, and patch-xyzw-ubuntu,
where fedora and ubuntu are random strings.
:param source: Instance of OVSBridge
:param destination: Instance of OVSBridge
"""
common = tests_base.get_rand_name(max_length=4, prefix='')
prefix = '%s-%s-' % (PATCH_PREFIX, common)
source_name = tests_base.get_rand_device_name(prefix=prefix)
destination_name = tests_base.get_rand_device_name(prefix=prefix)
source.add_patch_port(source_name, destination_name)
destination.add_patch_port(destination_name, source_name)
class RootHelperProcess(subprocess.Popen):
def __init__(self, cmd, *args, **kwargs):
for arg in ('stdin', 'stdout', 'stderr'):
kwargs.setdefault(arg, subprocess.PIPE)
self.namespace = kwargs.pop('namespace', None)
self.cmd = cmd
if self.namespace is not None:
cmd = ['ip', 'netns', 'exec', self.namespace] + cmd
root_helper = config.get_root_helper(utils.cfg.CONF)
cmd = shlex.split(root_helper) + cmd
self.child_pid = None
super(RootHelperProcess, self).__init__(cmd, *args, **kwargs)
self._wait_for_child_process()
def kill(self, sig=signal.SIGKILL):
pid = self.child_pid or str(self.pid)
utils.execute(['kill', '-%d' % sig, pid], run_as_root=True)
def read_stdout(self, timeout=None):
return self._read_stream(self.stdout, timeout)
@staticmethod
def _read_stream(stream, timeout):
if timeout:
poller = select.poll()
poller.register(stream.fileno())
poll_predicate = functools.partial(poller.poll, 1)
utils.wait_until_true(poll_predicate, timeout, 0.1,
RuntimeError(
'No output in %.2f seconds' % timeout))
return stream.readline()
def writeline(self, data):
self.stdin.write(data + os.linesep)
self.stdin.flush()
def _wait_for_child_process(self, timeout=CHILD_PROCESS_TIMEOUT,
sleep=CHILD_PROCESS_SLEEP):
def child_is_running():
child_pid = utils.get_root_helper_child_pid(
self.pid, self.cmd, run_as_root=True)
if utils.pid_invoked_with_cmdline(child_pid, self.cmd):
return True
utils.wait_until_true(
child_is_running,
timeout,
exception=RuntimeError("Process %s hasn't been spawned "
"in %d seconds" % (self.cmd, timeout)))
self.child_pid = utils.get_root_helper_child_pid(
self.pid, self.cmd, run_as_root=True)
@property
def is_running(self):
return self.poll() is None
class Pinger(object):
"""Class for sending ICMP packets asynchronously
The aim is to keep sending ICMP packets on background while executing other
code. After background 'ping' command is stopped, statistics are available.
Difference to assert_(no_)ping() functions located in this module is that
these methods send given count of ICMP packets while they wait for the
exit code of 'ping' command.
>>> pinger = Pinger('pinger_test', '192.168.0.2')
>>> pinger.start(); time.sleep(5); pinger.stop()
>>> pinger.sent, pinger.received
7 7
"""
stats_pattern = re.compile(
r'^(?P<trans>\d+) packets transmitted,.*(?P<recv>\d+) received.*$')
unreachable_pattern = re.compile(
r'.* Destination .* Unreachable')
TIMEOUT = 15
def __init__(self, namespace, address, count=None, timeout=1):
self.proc = None
self.namespace = namespace
self.address = address
self.count = count
self.timeout = timeout
self.destination_unreachable = False
self.sent = 0
self.received = 0
def _wait_for_death(self):
is_dead = lambda: self.proc.poll() is not None
utils.wait_until_true(
is_dead, timeout=self.TIMEOUT, exception=RuntimeError(
"Ping command hasn't ended after %d seconds." % self.TIMEOUT))
def _parse_stats(self):
for line in self.proc.stdout:
if (not self.destination_unreachable and
self.unreachable_pattern.match(line)):
self.destination_unreachable = True
continue
result = self.stats_pattern.match(line)
if result:
self.sent = int(result.group('trans'))
self.received = int(result.group('recv'))
break
else:
raise RuntimeError("Didn't find ping statistics.")
def start(self):
if self.proc and self.proc.is_running:
raise RuntimeError("This pinger has already a running process")
ip_version = ip_lib.get_ip_version(self.address)
ping_exec = 'ping' if ip_version == 4 else 'ping6'
cmd = [ping_exec, self.address, '-W', str(self.timeout)]
if self.count:
cmd.extend(['-c', str(self.count)])
self.proc = RootHelperProcess(cmd, namespace=self.namespace)
def stop(self):
if self.proc and self.proc.is_running:
self.proc.kill(signal.SIGINT)
self._wait_for_death()
self._parse_stats()
def wait(self):
if self.count:
self._wait_for_death()
self._parse_stats()
else:
raise RuntimeError("Pinger is running infinitelly, use stop() "
"first")
class NetcatTester(object):
TCP = n_const.PROTO_NAME_TCP
UDP = n_const.PROTO_NAME_UDP
VERSION_TO_ALL_ADDRESS = {
4: '0.0.0.0',
6: '::',
}
def __init__(self, client_namespace, server_namespace, address,
dst_port, protocol, server_address=None, src_port=None):
"""
Tool for testing connectivity on transport layer using netcat
executable.
The processes are spawned lazily.
:param client_namespace: Namespace in which netcat process that
connects to other netcat will be spawned
:param server_namespace: Namespace in which listening netcat process
will be spawned
:param address: Server address from client point of view
:param dst_port: Port on which netcat listens
:param protocol: Transport protocol, either 'tcp' or 'udp'
:param server_address: Address in server namespace on which netcat
should listen
:param src_port: Source port of netcat process spawned in client
namespace - packet will have src_port in TCP/UDP
header with this value
"""
self.client_namespace = client_namespace
self.server_namespace = server_namespace
self._client_process = None
self._server_process = None
self.address = address
self.dst_port = str(dst_port)
self.src_port = str(src_port) if src_port else None
if protocol not in TRANSPORT_PROTOCOLS:
raise ValueError("Unsupported protocol %s" % protocol)
self.protocol = protocol
ip_version = netaddr.IPAddress(address).version
self.server_address = (
server_address or self.VERSION_TO_ALL_ADDRESS[ip_version])
@property
def client_process(self):
if not self._client_process:
self.establish_connection()
return self._client_process
@property
def server_process(self):
if not self._server_process:
self._spawn_server_process()
return self._server_process
def _spawn_server_process(self):
self._server_process = self._spawn_nc_in_namespace(
self.server_namespace,
address=self.server_address,
listen=True)
@property
def is_established(self):
return bool(self._client_process and not self._client_process.poll())
def establish_connection(self):
if self.is_established:
raise RuntimeError('%(proto)s connection to %(ip_addr)s is already'
' established' %
{'proto': self.protocol,
'ip_addr': self.address})
if not self._server_process:
self._spawn_server_process()
self._client_process = self._spawn_nc_in_namespace(
self.client_namespace,
address=self.address)
if self.protocol == self.UDP:
# Create an ASSURED entry in conntrack table for UDP packets,
# that requires 3-way communication
# 1st transmission creates UNREPLIED
# 2nd transmission removes UNREPLIED
# 3rd transmission creates ASSURED
data = 'foo'
self.client_process.writeline(data)
self.server_process.read_stdout(READ_TIMEOUT)
self.server_process.writeline(data)
self.client_process.read_stdout(READ_TIMEOUT)
self.client_process.writeline(data)
self.server_process.read_stdout(READ_TIMEOUT)
def test_connectivity(self, respawn=False):
testing_string = uuidutils.generate_uuid()
if respawn:
self.stop_processes()
self.client_process.writeline(testing_string)
message = self.server_process.read_stdout(READ_TIMEOUT).strip()
self.server_process.writeline(message)
message = self.client_process.read_stdout(READ_TIMEOUT).strip()
return message == testing_string
def _spawn_nc_in_namespace(self, namespace, address, listen=False):
cmd = ['nc', address, self.dst_port]
if self.protocol == self.UDP:
cmd.append('-u')
if listen:
cmd.append('-l')
if self.protocol == self.TCP:
cmd.append('-k')
else:
cmd.extend(['-w', '20'])
if self.src_port:
cmd.extend(['-p', self.src_port])
proc = RootHelperProcess(cmd, namespace=namespace)
return proc
def stop_processes(self):
for proc_attr in ('_client_process', '_server_process'):
proc = getattr(self, proc_attr)
if proc:
if proc.poll() is None:
proc.kill()
proc.wait()
setattr(self, proc_attr, None)
class NamespaceFixture(fixtures.Fixture):
"""Create a namespace.
:ivar ip_wrapper: created namespace
:type ip_wrapper: IPWrapper
:ivar name: created namespace name
:type name: str
"""
def __init__(self, prefix=NS_PREFIX):
super(NamespaceFixture, self).__init__()
self.prefix = prefix
def _setUp(self):
ip = ip_lib.IPWrapper()
self.name = self.prefix + uuidutils.generate_uuid()
self.addCleanup(self.destroy)
self.ip_wrapper = ip.ensure_namespace(self.name)
def destroy(self):
if self.ip_wrapper.netns.exists(self.name):
self.ip_wrapper.netns.delete(self.name)
class VethFixture(fixtures.Fixture):
"""Create a veth.
:ivar ports: created veth ports
:type ports: tuple of 2 IPDevice
"""
def _setUp(self):
ip_wrapper = ip_lib.IPWrapper()
self.ports = common_base.create_resource(
VETH0_PREFIX,
lambda name: ip_wrapper.add_veth(name, self.get_peer_name(name)))
self.addCleanup(self.destroy)
def destroy(self):
for port in self.ports:
ip_wrapper = ip_lib.IPWrapper(port.namespace)
if (ip_wrapper.netns.exists(port.namespace) or
port.namespace is None):
try:
ip_wrapper.del_veth(port.name)
break
except RuntimeError:
# NOTE(cbrandily): It seems a veth is automagically deleted
# when a namespace owning a veth endpoint is deleted.
pass
@staticmethod
def get_peer_name(name):
if name.startswith(VETH0_PREFIX):
return name.replace(VETH0_PREFIX, VETH1_PREFIX)
elif name.startswith(VETH1_PREFIX):
return name.replace(VETH1_PREFIX, VETH0_PREFIX)
else:
tools.fail('%s is not a valid VethFixture veth endpoint' % name)
class NamedVethFixture(VethFixture):
"""Create a veth with at least one specified name of a device
:ivar ports: created veth ports
:type ports: tuple of 2 IPDevice
"""
def __init__(self, veth0_prefix=VETH0_PREFIX, veth1_prefix=VETH1_PREFIX):
super(NamedVethFixture, self).__init__()
self.veth0_name = self.get_veth_name(veth0_prefix)
self.veth1_name = self.get_veth_name(veth1_prefix)
def _setUp(self):
ip_wrapper = ip_lib.IPWrapper()
self.ports = ip_wrapper.add_veth(self.veth0_name, self.veth1_name)
self.addCleanup(self.destroy)
@staticmethod
def get_veth_name(name):
if name.startswith(VETH0_PREFIX):
return tests_base.get_rand_device_name(VETH0_PREFIX)
if name.startswith(VETH1_PREFIX):
return tests_base.get_rand_device_name(VETH1_PREFIX)
return name
class MacvtapFixture(fixtures.Fixture):
"""Create a macvtap.
:param src_dev: source device for macvtap
:type src_dev: IPDevice
:param mode: mode of macvtap
:type mode: string
:ivar ip_dev: created macvtap
:type ip_dev: IPDevice
"""
def __init__(self, src_dev=None, mode=None, prefix=MACVTAP_PREFIX):
super(MacvtapFixture, self).__init__()
self.src_dev = src_dev
self.mode = mode
self.prefix = prefix
def _setUp(self):
ip_wrapper = ip_lib.IPWrapper()
self.ip_dev = common_base.create_resource(
self.prefix,
ip_wrapper.add_macvtap,
self.src_dev, mode=self.mode)
self.addCleanup(self.destroy)
def destroy(self):
ip_wrapper = ip_lib.IPWrapper(self.ip_dev.namespace)
if (ip_wrapper.netns.exists(self.ip_dev.namespace) or
self.ip_dev.namespace is None):
try:
self.ip_dev.link.delete()
except RuntimeError:
pass
@six.add_metaclass(abc.ABCMeta)
class PortFixture(fixtures.Fixture):
"""Create a port.
:ivar port: created port
:type port: IPDevice
:ivar bridge: port bridge
"""
def __init__(self, bridge=None, namespace=None, mac=None, port_id=None):
super(PortFixture, self).__init__()
self.bridge = bridge
self.namespace = namespace
self.mac = (
mac or db_base_plugin_common.DbBasePluginCommon._generate_mac())
self.port_id = port_id or uuidutils.generate_uuid()
@abc.abstractmethod
def _create_bridge_fixture(self):
pass
@abc.abstractmethod
def _setUp(self):
super(PortFixture, self)._setUp()
if not self.bridge:
self.bridge = self.useFixture(self._create_bridge_fixture()).bridge
@classmethod
def get(cls, bridge, namespace=None, mac=None, port_id=None):
"""Deduce PortFixture class from bridge type and instantiate it."""
if isinstance(bridge, ovs_lib.OVSBridge):
return OVSPortFixture(bridge, namespace, mac, port_id)
if isinstance(bridge, bridge_lib.BridgeDevice):
return LinuxBridgePortFixture(bridge, namespace, mac, port_id)
if isinstance(bridge, VethBridge):
return VethPortFixture(bridge, namespace)
tools.fail('Unexpected bridge type: %s' % type(bridge))
class OVSBridgeFixture(fixtures.Fixture):
"""Create an OVS bridge.
:ivar prefix: bridge name prefix
:type prefix: str
:ivar bridge: created bridge
:type bridge: OVSBridge
"""
def __init__(self, prefix=BR_PREFIX):
super(OVSBridgeFixture, self).__init__()
self.prefix = prefix
def _setUp(self):
ovs = ovs_lib.BaseOVS()
self.bridge = common_base.create_resource(self.prefix, ovs.add_bridge)
self.addCleanup(self.bridge.destroy)
class OVSPortFixture(PortFixture):
def _create_bridge_fixture(self):
return OVSBridgeFixture()
def _setUp(self):
super(OVSPortFixture, self)._setUp()
interface_config = cfg.ConfigOpts()
interface_config.register_opts(interface.OPTS)
ovs_interface = interface.OVSInterfaceDriver(interface_config)
# because in some tests this port can be used to providing connection
# between linuxbridge agents and vlan_id can be also added to this
# device name it has to be max LB_DEVICE_NAME_MAX_LEN long
port_name = tests_base.get_rand_name(
LB_DEVICE_NAME_MAX_LEN,
PORT_PREFIX
)
ovs_interface.plug_new(
None,
self.port_id,
port_name,
self.mac,
bridge=self.bridge.br_name,
namespace=self.namespace)
self.addCleanup(self.bridge.delete_port, port_name)
self.port = ip_lib.IPDevice(port_name, self.namespace)
class LinuxBridgeFixture(fixtures.Fixture):
"""Create a linux bridge.
:ivar bridge: created bridge
:type bridge: BridgeDevice
:ivar namespace: created bridge namespace
:type namespace: str
"""
def __init__(self, prefix=BR_PREFIX, namespace=UNDEFINED,
prefix_is_full_name=False):
super(LinuxBridgeFixture, self).__init__()
self.prefix = prefix
self.prefix_is_full_name = prefix_is_full_name
self.namespace = namespace
def _setUp(self):
if self.namespace is UNDEFINED:
self.namespace = self.useFixture(NamespaceFixture()).name
self.bridge = self._create_bridge()
self.addCleanup(self.safe_delete)
self.bridge.link.set_up()
self.addCleanup(self.safe_set_down)
def safe_set_down(self):
try:
self.bridge.link.set_down()
except RuntimeError:
pass
def safe_delete(self):
try:
self.bridge.delbr()
except RuntimeError:
pass
def _create_bridge(self):
if self.prefix_is_full_name:
return bridge_lib.BridgeDevice.addbr(
name=self.prefix,
namespace=self.namespace
)
else:
return common_base.create_resource(
self.prefix,
bridge_lib.BridgeDevice.addbr,
namespace=self.namespace)
class LinuxBridgePortFixture(PortFixture):
"""Create a linux bridge port.
:ivar port: created port
:type port: IPDevice
:ivar br_port: bridge side veth peer port
:type br_port: IPDevice
"""
def __init__(self, bridge, namespace=None, mac=None, port_id=None):
super(LinuxBridgePortFixture, self).__init__(
bridge, namespace, mac, port_id)
# we need to override port_id value here because in Port() class it is
# always generated as random. In LinuxBridgePortFixture we need to have
# it empty if it was not give because then proper veth_pair will be
# created (for example in some functional tests)
self.port_id = port_id
def _create_bridge_fixture(self):
return LinuxBridgeFixture()
def _setUp(self):
super(LinuxBridgePortFixture, self)._setUp()
br_port_name = self._get_port_name()
if br_port_name:
self.br_port, self.port = self.useFixture(
NamedVethFixture(veth0_prefix=br_port_name)).ports
else:
self.br_port, self.port = self.useFixture(VethFixture()).ports
if self.mac:
self.port.link.set_address(self.mac)
# bridge side
br_ip_wrapper = ip_lib.IPWrapper(self.bridge.namespace)
br_ip_wrapper.add_device_to_namespace(self.br_port)
self.bridge.addif(self.br_port)
self.br_port.link.set_up()
# port side
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
def _get_port_name(self):
if self.port_id:
return linuxbridge_agent.LinuxBridgeManager.get_tap_device_name(
self.port_id)
return None
class VethBridge(object):
def __init__(self, ports):
self.ports = ports
self.unallocated_ports = set(self.ports)
def allocate_port(self):
try:
return self.unallocated_ports.pop()
except KeyError:
tools.fail('All FakeBridge ports (%s) are already allocated.' %
len(self.ports))
class VethBridgeFixture(fixtures.Fixture):
"""Simulate a bridge with a veth.
:ivar bridge: created bridge
:type bridge: FakeBridge
"""
def _setUp(self):
ports = self.useFixture(VethFixture()).ports
self.bridge = VethBridge(ports)
class VethPortFixture(PortFixture):
"""Create a veth bridge port.
:ivar port: created port
:type port: IPDevice
"""
def _create_bridge_fixture(self):
return VethBridgeFixture()
def _setUp(self):
super(VethPortFixture, self)._setUp()
self.port = self.bridge.allocate_port()
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
|
|
"""Utility meter from sensors providing raw data."""
from datetime import date, timedelta
from decimal import Decimal, DecimalException
import logging
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import (
async_track_state_change,
async_track_time_change,
)
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.dt as dt_util
from .const import (
CONF_METER,
CONF_METER_NET_CONSUMPTION,
CONF_METER_OFFSET,
CONF_METER_TYPE,
CONF_SOURCE_SENSOR,
CONF_TARIFF,
CONF_TARIFF_ENTITY,
DAILY,
DATA_UTILITY,
HOURLY,
MONTHLY,
QUARTERLY,
SIGNAL_RESET_METER,
WEEKLY,
YEARLY,
)
_LOGGER = logging.getLogger(__name__)
ATTR_SOURCE_ID = "source"
ATTR_STATUS = "status"
ATTR_PERIOD = "meter_period"
ATTR_LAST_PERIOD = "last_period"
ATTR_LAST_RESET = "last_reset"
ATTR_TARIFF = "tariff"
ICON = "mdi:counter"
PRECISION = 3
PAUSED = "paused"
COLLECTING = "collecting"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the utility meter sensor."""
if discovery_info is None:
_LOGGER.error("This platform is only available through discovery")
return
meters = []
for conf in discovery_info:
meter = conf[CONF_METER]
conf_meter_source = hass.data[DATA_UTILITY][meter][CONF_SOURCE_SENSOR]
conf_meter_type = hass.data[DATA_UTILITY][meter].get(CONF_METER_TYPE)
conf_meter_offset = hass.data[DATA_UTILITY][meter][CONF_METER_OFFSET]
conf_meter_net_consumption = hass.data[DATA_UTILITY][meter][
CONF_METER_NET_CONSUMPTION
]
conf_meter_tariff_entity = hass.data[DATA_UTILITY][meter].get(
CONF_TARIFF_ENTITY
)
meters.append(
UtilityMeterSensor(
conf_meter_source,
conf.get(CONF_NAME),
conf_meter_type,
conf_meter_offset,
conf_meter_net_consumption,
conf.get(CONF_TARIFF),
conf_meter_tariff_entity,
)
)
async_add_entities(meters)
class UtilityMeterSensor(RestoreEntity):
"""Representation of an utility meter sensor."""
def __init__(
self,
source_entity,
name,
meter_type,
meter_offset,
net_consumption,
tariff=None,
tariff_entity=None,
):
"""Initialize the Utility Meter sensor."""
self._sensor_source_id = source_entity
self._state = 0
self._last_period = 0
self._last_reset = dt_util.now()
self._collecting = None
if name:
self._name = name
else:
self._name = f"{source_entity} meter"
self._unit_of_measurement = None
self._period = meter_type
self._period_offset = meter_offset
self._sensor_net_consumption = net_consumption
self._tariff = tariff
self._tariff_entity = tariff_entity
@callback
def async_reading(self, entity, old_state, new_state):
"""Handle the sensor state changes."""
if (
old_state is None
or new_state is None
or old_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]
or new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]
):
return
if (
self._unit_of_measurement is None
and new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is not None
):
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
try:
diff = Decimal(new_state.state) - Decimal(old_state.state)
if (not self._sensor_net_consumption) and diff < 0:
# Source sensor just rolled over for unknow reasons,
return
self._state += diff
except ValueError as err:
_LOGGER.warning("While processing state changes: %s", err)
except DecimalException as err:
_LOGGER.warning(
"Invalid state (%s > %s): %s", old_state.state, new_state.state, err
)
self.async_schedule_update_ha_state()
@callback
def async_tariff_change(self, entity, old_state, new_state):
"""Handle tariff changes."""
if self._tariff == new_state.state:
self._collecting = async_track_state_change(
self.hass, self._sensor_source_id, self.async_reading
)
else:
if self._collecting:
self._collecting()
self._collecting = None
_LOGGER.debug(
"%s - %s - source <%s>",
self._name,
COLLECTING if self._collecting is not None else PAUSED,
self._sensor_source_id,
)
self.async_schedule_update_ha_state()
async def _async_reset_meter(self, event):
"""Determine cycle - Helper function for larger than daily cycles."""
now = dt_util.now().date()
if (
self._period == WEEKLY
and now != now - timedelta(days=now.weekday()) + self._period_offset
):
return
if (
self._period == MONTHLY
and now != date(now.year, now.month, 1) + self._period_offset
):
return
if (
self._period == QUARTERLY
and now
!= date(now.year, (((now.month - 1) // 3) * 3 + 1), 1) + self._period_offset
):
return
if self._period == YEARLY and now != date(now.year, 1, 1) + self._period_offset:
return
await self.async_reset_meter(self._tariff_entity)
async def async_reset_meter(self, entity_id):
"""Reset meter."""
if self._tariff_entity != entity_id:
return
_LOGGER.debug("Reset utility meter <%s>", self.entity_id)
self._last_reset = dt_util.now()
self._last_period = str(self._state)
self._state = 0
await self.async_update_ha_state()
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
if self._period == HOURLY:
async_track_time_change(
self.hass,
self._async_reset_meter,
minute=self._period_offset.seconds // 60,
second=self._period_offset.seconds % 60,
)
elif self._period in [DAILY, WEEKLY, MONTHLY, QUARTERLY, YEARLY]:
async_track_time_change(
self.hass,
self._async_reset_meter,
hour=self._period_offset.seconds // 3600,
minute=self._period_offset.seconds % 3600 // 60,
second=self._period_offset.seconds % 3600 % 60,
)
async_dispatcher_connect(self.hass, SIGNAL_RESET_METER, self.async_reset_meter)
state = await self.async_get_last_state()
if state:
self._state = Decimal(state.state)
self._unit_of_measurement = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
self._last_period = state.attributes.get(ATTR_LAST_PERIOD)
self._last_reset = state.attributes.get(ATTR_LAST_RESET)
await self.async_update_ha_state()
if state.attributes.get(ATTR_STATUS) == PAUSED:
# Fake cancellation function to init the meter paused
self._collecting = lambda: None
@callback
def async_source_tracking(event):
"""Wait for source to be ready, then start meter."""
if self._tariff_entity is not None:
_LOGGER.debug("Track %s", self._tariff_entity)
async_track_state_change(
self.hass, self._tariff_entity, self.async_tariff_change
)
tariff_entity_state = self.hass.states.get(self._tariff_entity)
if self._tariff != tariff_entity_state.state:
return
_LOGGER.debug("tracking source: %s", self._sensor_source_id)
self._collecting = async_track_state_change(
self.hass, self._sensor_source_id, self.async_reading
)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_source_tracking
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
state_attr = {
ATTR_SOURCE_ID: self._sensor_source_id,
ATTR_STATUS: PAUSED if self._collecting is None else COLLECTING,
ATTR_LAST_PERIOD: self._last_period,
ATTR_LAST_RESET: self._last_reset,
}
if self._period is not None:
state_attr[ATTR_PERIOD] = self._period
if self._tariff is not None:
state_attr[ATTR_TARIFF] = self._tariff
return state_attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import os
import sys
from contextlib import contextmanager
from distutils import sysconfig
from site import USER_SITE
import pkg_resources
from pkg_resources import EntryPoint, WorkingSet, find_distributions
from .common import die
from .compatibility import exec_function
from .environment import PEXEnvironment
from .executor import Executor
from .finders import get_entry_point_from_console_script, get_script_from_distributions
from .interpreter import PythonInterpreter
from .orderedset import OrderedSet
from .pex_info import PexInfo
from .tracer import TRACER
from .util import iter_pth_paths, merge_split
from .variables import ENV
class DevNull(object):
def __init__(self):
pass
def write(self, *args, **kw):
pass
def flush(self):
pass
class PEX(object): # noqa: T000
"""PEX, n. A self-contained python environment."""
class Error(Exception): pass
class NotFound(Error): pass
@classmethod
def clean_environment(cls):
try:
del os.environ['MACOSX_DEPLOYMENT_TARGET']
except KeyError:
pass
# Cannot change dictionary size during __iter__
filter_keys = [key for key in os.environ if key.startswith('PEX_')]
for key in filter_keys:
del os.environ[key]
def __init__(self, pex=sys.argv[0], interpreter=None, env=ENV):
self._pex = pex
self._interpreter = interpreter or PythonInterpreter.get()
self._pex_info = PexInfo.from_pex(self._pex)
self._pex_info_overrides = PexInfo.from_env(env=env)
self._vars = env
self._envs = []
self._working_set = None
def _activate(self):
if not self._working_set:
working_set = WorkingSet([])
# set up the local .pex environment
pex_info = self._pex_info.copy()
pex_info.update(self._pex_info_overrides)
pex_info.merge_pex_path(self._vars.PEX_PATH)
self._envs.append(PEXEnvironment(self._pex, pex_info))
# N.B. by this point, `pex_info.pex_path` will contain a single pex path
# merged from pex_path in `PEX-INFO` and `PEX_PATH` set in the environment.
# `PEX_PATH` entries written into `PEX-INFO` take precedence over those set
# in the environment.
if pex_info.pex_path:
# set up other environments as specified in pex_path
for pex_path in filter(None, pex_info.pex_path.split(os.pathsep)):
pex_info = PexInfo.from_pex(pex_path)
pex_info.update(self._pex_info_overrides)
self._envs.append(PEXEnvironment(pex_path, pex_info))
# activate all of them
for env in self._envs:
for dist in env.activate():
working_set.add(dist)
self._working_set = working_set
return self._working_set
@classmethod
def _extras_paths(cls):
standard_lib = sysconfig.get_python_lib(standard_lib=True)
try:
makefile = sysconfig.parse_makefile(sysconfig.get_makefile_filename())
except (AttributeError, IOError):
# This is not available by default in PyPy's distutils.sysconfig or it simply is
# no longer available on the system (IOError ENOENT)
makefile = {}
extras_paths = filter(None, makefile.get('EXTRASPATH', '').split(':'))
for path in extras_paths:
yield os.path.join(standard_lib, path)
# Handle .pth injected paths as extras.
sitedirs = cls._get_site_packages()
for pth_path in cls._scan_pth_files(sitedirs):
TRACER.log('Found .pth file: %s' % pth_path, V=3)
for extras_path in iter_pth_paths(pth_path):
yield extras_path
@staticmethod
def _scan_pth_files(dir_paths):
"""Given an iterable of directory paths, yield paths to all .pth files within."""
for dir_path in dir_paths:
if not os.path.exists(dir_path):
continue
pth_filenames = (f for f in os.listdir(dir_path) if f.endswith('.pth'))
for pth_filename in pth_filenames:
yield os.path.join(dir_path, pth_filename)
@staticmethod
def _get_site_packages():
try:
from site import getsitepackages
return set(getsitepackages())
except ImportError:
return set()
@classmethod
def site_libs(cls):
site_libs = cls._get_site_packages()
site_libs.update([sysconfig.get_python_lib(plat_specific=False),
sysconfig.get_python_lib(plat_specific=True)])
# On windows getsitepackages() returns the python stdlib too.
if sys.prefix in site_libs:
site_libs.remove(sys.prefix)
real_site_libs = set(os.path.realpath(path) for path in site_libs)
return site_libs | real_site_libs
@classmethod
def _tainted_path(cls, path, site_libs):
paths = frozenset([path, os.path.realpath(path)])
return any(path.startswith(site_lib) for site_lib in site_libs for path in paths)
@classmethod
def minimum_sys_modules(cls, site_libs, modules=None):
"""Given a set of site-packages paths, return a "clean" sys.modules.
When importing site, modules within sys.modules have their __path__'s populated with
additional paths as defined by *-nspkg.pth in site-packages, or alternately by distribution
metadata such as *.dist-info/namespace_packages.txt. This can possibly cause namespace
packages to leak into imports despite being scrubbed from sys.path.
NOTE: This method mutates modules' __path__ attributes in sys.module, so this is currently an
irreversible operation.
"""
modules = modules or sys.modules
new_modules = {}
for module_name, module in modules.items():
# builtins can stay
if not hasattr(module, '__path__'):
new_modules[module_name] = module
continue
# Unexpected objects, e.g. namespace packages, should just be dropped:
if not isinstance(module.__path__, list):
TRACER.log('Dropping %s' % (module_name,), V=3)
continue
# Pop off site-impacting __path__ elements in-place.
for k in reversed(range(len(module.__path__))):
if cls._tainted_path(module.__path__[k], site_libs):
TRACER.log('Scrubbing %s.__path__: %s' % (module_name, module.__path__[k]), V=3)
module.__path__.pop(k)
# It still contains path elements not in site packages, so it can stay in sys.modules
if module.__path__:
new_modules[module_name] = module
return new_modules
@classmethod
def minimum_sys_path(cls, site_libs, inherit_path):
scrub_paths = OrderedSet()
site_distributions = OrderedSet()
user_site_distributions = OrderedSet()
def all_distribution_paths(path):
locations = set(dist.location for dist in find_distributions(path))
return set([path]) | locations | set(os.path.realpath(path) for path in locations)
for path_element in sys.path:
if cls._tainted_path(path_element, site_libs):
TRACER.log('Tainted path element: %s' % path_element)
site_distributions.update(all_distribution_paths(path_element))
else:
TRACER.log('Not a tainted path element: %s' % path_element, V=2)
user_site_distributions.update(all_distribution_paths(USER_SITE))
if inherit_path == 'false':
scrub_paths = site_distributions | user_site_distributions
for path in user_site_distributions:
TRACER.log('Scrubbing from user site: %s' % path)
for path in site_distributions:
TRACER.log('Scrubbing from site-packages: %s' % path)
scrubbed_sys_path = list(OrderedSet(sys.path) - scrub_paths)
scrub_from_importer_cache = filter(
lambda key: any(key.startswith(path) for path in scrub_paths),
sys.path_importer_cache.keys())
scrubbed_importer_cache = dict((key, value) for (key, value) in sys.path_importer_cache.items()
if key not in scrub_from_importer_cache)
for importer_cache_entry in scrub_from_importer_cache:
TRACER.log('Scrubbing from path_importer_cache: %s' % importer_cache_entry, V=2)
return scrubbed_sys_path, scrubbed_importer_cache
@classmethod
def minimum_sys(cls, inherit_path):
"""Return the minimum sys necessary to run this interpreter, a la python -S.
:returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a
bare python installation.
"""
site_libs = set(cls.site_libs())
for site_lib in site_libs:
TRACER.log('Found site-library: %s' % site_lib)
for extras_path in cls._extras_paths():
TRACER.log('Found site extra: %s' % extras_path)
site_libs.add(extras_path)
site_libs = set(os.path.normpath(path) for path in site_libs)
sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs, inherit_path)
sys_modules = cls.minimum_sys_modules(site_libs)
return sys_path, sys_path_importer_cache, sys_modules
@classmethod
@contextmanager
def patch_pkg_resources(cls, working_set):
"""Patch pkg_resources given a new working set."""
def patch(working_set):
pkg_resources.working_set = working_set
pkg_resources.require = working_set.require
pkg_resources.iter_entry_points = working_set.iter_entry_points
pkg_resources.run_script = pkg_resources.run_main = working_set.run_script
pkg_resources.add_activation_listener = working_set.subscribe
old_working_set = pkg_resources.working_set
patch(working_set)
try:
yield
finally:
patch(old_working_set)
# Thar be dragons -- when this contextmanager exits, the interpreter is
# potentially in a wonky state since the patches here (minimum_sys_modules
# for example) actually mutate global state. This should not be
# considered a reversible operation despite being a contextmanager.
@contextmanager
def patch_sys(self, inherit_path):
"""Patch sys with all site scrubbed."""
def patch_dict(old_value, new_value):
old_value.clear()
old_value.update(new_value)
def patch_all(path, path_importer_cache, modules):
sys.path[:] = path
patch_dict(sys.path_importer_cache, path_importer_cache)
patch_dict(sys.modules, modules)
old_sys_path, old_sys_path_importer_cache, old_sys_modules = (
sys.path[:], sys.path_importer_cache.copy(), sys.modules.copy())
new_sys_path, new_sys_path_importer_cache, new_sys_modules = self.minimum_sys(inherit_path)
new_sys_path.extend(merge_split(self._pex_info.pex_path, self._vars.PEX_PATH))
patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)
yield
def _wrap_coverage(self, runner, *args):
if not self._vars.PEX_COVERAGE and self._vars.PEX_COVERAGE_FILENAME is None:
runner(*args)
return
try:
import coverage
except ImportError:
die('Could not bootstrap coverage module, aborting.')
pex_coverage_filename = self._vars.PEX_COVERAGE_FILENAME
if pex_coverage_filename is not None:
cov = coverage.coverage(data_file=pex_coverage_filename)
else:
cov = coverage.coverage(data_suffix=True)
TRACER.log('Starting coverage.')
cov.start()
try:
runner(*args)
finally:
TRACER.log('Stopping coverage')
cov.stop()
# TODO(wickman) Post-process coverage to elide $PEX_ROOT and make
# the report more useful/less noisy. #89
if pex_coverage_filename:
cov.save()
else:
cov.report(show_missing=False, ignore_errors=True, file=sys.stdout)
def _wrap_profiling(self, runner, *args):
if not self._vars.PEX_PROFILE and self._vars.PEX_PROFILE_FILENAME is None:
runner(*args)
return
pex_profile_filename = self._vars.PEX_PROFILE_FILENAME
pex_profile_sort = self._vars.PEX_PROFILE_SORT
try:
import cProfile as profile
except ImportError:
import profile
profiler = profile.Profile()
try:
return profiler.runcall(runner, *args)
finally:
if pex_profile_filename is not None:
profiler.dump_stats(pex_profile_filename)
else:
profiler.print_stats(sort=pex_profile_sort)
def path(self):
"""Return the path this PEX was built at."""
return self._pex
def execute(self):
"""Execute the PEX.
This function makes assumptions that it is the last function called by
the interpreter.
"""
teardown_verbosity = self._vars.PEX_TEARDOWN_VERBOSE
try:
pex_inherit_path = self._vars.PEX_INHERIT_PATH
if pex_inherit_path == "false":
pex_inherit_path = self._pex_info.inherit_path
with self.patch_sys(pex_inherit_path):
working_set = self._activate()
TRACER.log('PYTHONPATH contains:')
for element in sys.path:
TRACER.log(' %c %s' % (' ' if os.path.exists(element) else '*', element))
TRACER.log(' * - paths that do not exist or will be imported via zipimport')
with self.patch_pkg_resources(working_set):
self._wrap_coverage(self._wrap_profiling, self._execute)
except Exception:
# Allow the current sys.excepthook to handle this app exception before we tear things down in
# finally, then reraise so that the exit status is reflected correctly.
sys.excepthook(*sys.exc_info())
raise
except SystemExit as se:
# Print a SystemExit error message, avoiding a traceback in python3.
# This must happen here, as sys.stderr is about to be torn down
if not isinstance(se.code, int) and se.code is not None:
print(se.code, file=sys.stderr)
raise
finally:
# squash all exceptions on interpreter teardown -- the primary type here are
# atexit handlers failing to run because of things such as:
# http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit
if not teardown_verbosity:
sys.stderr.flush()
sys.stderr = DevNull()
sys.excepthook = lambda *a, **kw: None
def _execute(self):
force_interpreter = self._vars.PEX_INTERPRETER
self.clean_environment()
if force_interpreter:
TRACER.log('PEX_INTERPRETER specified, dropping into interpreter')
return self.execute_interpreter()
if self._pex_info_overrides.script and self._pex_info_overrides.entry_point:
die('Cannot specify both script and entry_point for a PEX!')
if self._pex_info.script and self._pex_info.entry_point:
die('Cannot specify both script and entry_point for a PEX!')
if self._pex_info_overrides.script:
return self.execute_script(self._pex_info_overrides.script)
elif self._pex_info_overrides.entry_point:
return self.execute_entry(self._pex_info_overrides.entry_point)
elif self._pex_info.script:
return self.execute_script(self._pex_info.script)
elif self._pex_info.entry_point:
return self.execute_entry(self._pex_info.entry_point)
else:
TRACER.log('No entry point specified, dropping into interpreter')
return self.execute_interpreter()
def execute_interpreter(self):
if sys.argv[1:]:
try:
with open(sys.argv[1]) as fp:
name, content = sys.argv[1], fp.read()
except IOError as e:
die("Could not open %s in the environment [%s]: %s" % (sys.argv[1], sys.argv[0], e))
sys.argv = sys.argv[1:]
self.execute_content(name, content)
else:
import code
code.interact()
def execute_script(self, script_name):
dists = list(self._activate())
entry_point = get_entry_point_from_console_script(script_name, dists)
if entry_point:
sys.exit(self.execute_entry(entry_point))
dist, script_path, script_content = get_script_from_distributions(script_name, dists)
if not dist:
raise self.NotFound('Could not find script %s in pex!' % script_name)
TRACER.log('Found script %s in %s' % (script_name, dist))
return self.execute_content(script_path, script_content, argv0=script_name)
@classmethod
def execute_content(cls, name, content, argv0=None):
argv0 = argv0 or name
try:
ast = compile(content, name, 'exec', flags=0, dont_inherit=1)
except SyntaxError:
die('Unable to parse %s. PEX script support only supports Python scripts.' % name)
old_name, old_file = globals().get('__name__'), globals().get('__file__')
try:
old_argv0, sys.argv[0] = sys.argv[0], argv0
globals()['__name__'] = '__main__'
globals()['__file__'] = name
exec_function(ast, globals())
finally:
if old_name:
globals()['__name__'] = old_name
else:
globals().pop('__name__')
if old_file:
globals()['__file__'] = old_file
else:
globals().pop('__file__')
sys.argv[0] = old_argv0
@classmethod
def execute_entry(cls, entry_point):
runner = cls.execute_pkg_resources if ':' in entry_point else cls.execute_module
return runner(entry_point)
@staticmethod
def execute_module(module_name):
import runpy
runpy.run_module(module_name, run_name='__main__')
@staticmethod
def execute_pkg_resources(spec):
entry = EntryPoint.parse("run = {0}".format(spec))
# See https://pythonhosted.org/setuptools/history.html#id25 for rationale here.
if hasattr(entry, 'resolve'):
# setuptools >= 11.3
runner = entry.resolve()
else:
# setuptools < 11.3
runner = entry.load(require=False)
return runner()
def cmdline(self, args=()):
"""The commandline to run this environment.
:keyword args: Additional arguments to be passed to the application being invoked by the
environment.
"""
cmds = [self._interpreter.binary]
cmds.append(self._pex)
cmds.extend(args)
return cmds
def run(self, args=(), with_chroot=False, blocking=True, setsid=False, **kwargs):
"""Run the PythonEnvironment in an interpreter in a subprocess.
:keyword args: Additional arguments to be passed to the application being invoked by the
environment.
:keyword with_chroot: Run with cwd set to the environment's working directory.
:keyword blocking: If true, return the return code of the subprocess.
If false, return the Popen object of the invoked subprocess.
:keyword setsid: If true, run the PEX in a separate operating system session.
Remaining keyword arguments are passed directly to subprocess.Popen.
"""
self.clean_environment()
cmdline = self.cmdline(args)
TRACER.log('PEX.run invoking %s' % ' '.join(cmdline))
process = Executor.open_process(cmdline,
cwd=self._pex if with_chroot else os.getcwd(),
preexec_fn=os.setsid if setsid else None,
stdin=kwargs.pop('stdin', None),
stdout=kwargs.pop('stdout', None),
stderr=kwargs.pop('stderr', None),
**kwargs)
return process.wait() if blocking else process
|
|
import mimetypes
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth import login, logout, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import UpdateView, CreateView
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.models import Group
from HexOmega.settings import BASE_DIR
from .utils import get_default_password, mail_kickoff, uploaded_file_handler
from .models import Project, AdminUser, MemberUser, LeaderUser, Task
from .backends import CustomUserAuth
from .forms.login_form import LoginForm
from .forms.project_forms import CreateProjectForm
from .forms.task_forms import CreateTaskForm, LeaderUpdateTaskForm
from .forms.member_form import MemberUpdate, MemberCreate
from .Xav.user_context import url_context
from log.Log import log
import os
def index(request):
return render(request,
'users/index.html')
def login_auth_2(request):
"""
Login page authentication using django forms.
If easier and simpler, implement this else the
stuff I threw together up there.
:param request:
:return:
"""
if request.user.is_authenticated():
return redirect('user_logged_in', request.user.username)
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = request.POST.get('username')
password = request.POST.get('password')
rem = request.POST.get('rem')
user = CustomUserAuth().authenticate(username=username, password=password)
if user is False:
form.errors['password'] = 'The username or password is incorrect.'
return render(request,
'users/login.html',
{
'form': form,
'errors': form.errors
})
if user is not None:
print('User [{}] is logging in.'.format(user.username))
login(request, user, backend='django.contrib.auth.backends.ModelBackend')
if rem is not None:
request.session.set_expiry(7200)
else:
request.session.get_expire_at_browser_close()
return redirect('user_logged_in', username=username)
else:
form = LoginForm()
return render(request, 'users/login.html', {'form': form})
@login_required
def logged_in(request, username):
if AdminUser.objects.filter(username__exact=username).count() == 1:
return redirect('open_project', username)
elif LeaderUser.objects.filter(username__exact=username).count() == 1:
return redirect('leader_home', username)
else:
user = MemberUser.objects.get(username__exact=username)
return redirect('task_list', username)
@login_required
def jump_ship(request):
print('jumping ship....')
logout(request)
return redirect('login_page')
@login_required
def delete_admin(request, username, d):
"""
Using random, crappy, no good, templates.
good enough for testing. Will add appropriate ones
soon.
"""
a = AdminUser.objects.get(username__exact=d)
a.delete()
print('deleted')
return redirect('list_users', username)
@login_required
def member_upload(request, username, task):
t = Task.objects.get(title=task)
if 'up_file' in request.FILES:
t.deliverable = request.FILES['up_file']
t.save()
mail_kickoff(MemberUser.objects.get(username__exact=username), t, var=3)
log('SUCCESS', MemberUser.objects.get(username__exact=username),
'{} uploaded a deliverable for {}'.format(username, t.title))
print(t.deliverable.url)
else:
print('No file!!')
return redirect('task_list', username)
@login_required
def list_users(request, username):
return render(request, 'list.html', {'admins': AdminUser.objects.all()})
@login_required
@url_context
def get_list_of_users(request):
"""
Display a list of admin users
/list/
:param request:
:return:
:author Caroline
"""
admin_user_list = AdminUser.objects.order_by('pk')
paginator = Paginator(admin_user_list, 1) # Show 3 admin per page
page = request.GET.get('page')
try:
admin_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
admin_list = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
admin_list = paginator.page(paginator.num_pages)
context = {'admin_list': admin_list, 'page': page}
return render(request, 'users/list_of_users.html', context)
# ============================================================================
# Release Me!
@login_required
def leader_home(request, username):
user = LeaderUser.objects.get(username__exact=username)
try:
tasks = user.project.actionlist.task_set.all()
for task in Task.objects.filter(action_list__project__leader__username=username):
print(task.title, task.action_list.project.name)
# print(task.deliverable.url)
except Exception as e:
print('Ahhhhhh')
tasks = None
return render(request, 'leader_home.html', {'user': user, 'tasks': tasks})
class CreateMember(CreateView, LoginRequiredMixin):
# fields = ['username', 'first_name', 'last_name', 'role', 'email', 'phone']
form_class = MemberCreate
username = ''
model = MemberUser
l = None
template_name = 'create_member.html'
def form_valid(self, form):
form.instance.project = self.l.project
password = get_default_password()
form.instance.set_password(password)
mail_kickoff(form.instance, password)
messages.add_message(self.request, messages.INFO, 'User [{}] created.'.format(form.instance.username))
update_session_auth_hash(self.request, self.request.user)
return super(CreateMember, self).form_valid(form)
def get_form_kwargs(self):
self.l = LeaderUser.objects.get(username__exact=self.request.user.username)
p = self.request.get_full_path()
print(p)
self.success_url = '/'.join(p.split('/')[:-1]) + '/'
kwargs = super(CreateMember, self).get_form_kwargs()
# kwargs['pn'] = l.project.name
return kwargs
class MemberHome(DetailView, LoginRequiredMixin):
model = MemberUser
username = ''
template_name = 'member_home.html'
def get_object(self, queryset=None):
return MemberUser.objects.get(username=self.kwargs.get('username'))
def get_context_data(self, **kwargs):
context = super(MemberHome, self).get_context_data(**kwargs)
return context
@login_required
def show_tasks(request, username):
ts = Task.objects.filter(members__username=username)
print(ts)
return render(request, 'list.html', {'tasks': ts})
# ============================================================================
# My project and tasks modules
# 2017-03-22
def get_project_path(p):
return os.path.join(BASE_DIR,
os.path.join('projects', p.name + '/'))
@login_required
def create_project(request, username):
if request.method == 'POST':
form = CreateProjectForm(request.POST)
if form.is_valid():
p = form.save(commit=False)
p.leader = LeaderUser.objects.get(username__exact=username)
p.save()
for a in request.POST.get('admins'):
p.admins.add(a)
path = get_project_path(p)
# os.makedirs(path, 0o755)
if not os.path.exists(path):
os.mkdir(path, 0o755)
if not os.path.exists(os.path.join(path, 'activity.log')):
f = open(os.path.join(path, 'activity.log'), 'w+')
f.close()
return redirect('display_leader', username)
else:
form = CreateProjectForm()
return render(request, 'crproj.html', {'form': form})
@login_required
def create_task(request, username):
l = LeaderUser.objects.get(username__exact=username)
if request.method == 'POST':
form = CreateTaskForm(request.POST)
if form.is_valid():
mem_dat = form.cleaned_data.get('members')
title = form.cleaned_data.get('title')
est_end = form.cleaned_data.get('est_end')
status = form.cleaned_data.get('status')
lt = form.cleaned_data.get('to_leader')
if lt is None:
lt = False
t = Task.objects.create(title=title, est_end=est_end, status=status, to_leader=lt,
action_list=l.project.actionlist)
t.save()
for m in mem_dat:
t.members.add(m)
t.save()
log('INFO', l, '{} added a new Task [{}]'.format(l.username, t.title))
return redirect('leader_home', username)
else:
print(form.errors)
else:
form = CreateTaskForm({'pn': l.project.name})
return render(request, 'crtask.html', {'form': form})
class TaskUpdate(UpdateView, LoginRequiredMixin):
username = ''
model = Task
template_name = 'crtask.html'
content_type = 'multipart-form-data'
form_class = LeaderUpdateTaskForm
def get_form_kwargs(self):
l = LeaderUser.objects.get(username__exact=self.request.user.username)
t = Task.objects.get(pk=self.kwargs['pk'])
up_flag = False
up_name = ''
if bool(t.deliverable):
up_flag = True
up_name = t.deliverable.name.split('/')[-1]
t.status = 'Completed'
t.save()
log('SUCCESS', l, '{} uploaded a deliverable to Task [{}]'.format(l.username, t.title))
mail_kickoff(l, t, var=3)
p = self.request.get_full_path()
self.success_url = '/'.join(p.split('/')[:-3]) + '/'
kwargs = super(TaskUpdate, self).get_form_kwargs()
kwargs['pn'] = l.project.name
kwargs['up_flag'] = up_flag
kwargs['up_name'] = up_name
log('INFO', l, '{} made changes to Task [{}]'.format(l.username, t.title))
return kwargs
@login_required
def update_member(request, username):
mem = MemberUser.objects.get(username__exact=username)
form = MemberUpdate(request.POST, initial={
'first_name': mem.first_name,
'last_name': mem.last_name,
'email': mem.email,
'phone': mem.phone
})
if request.method == 'POST':
if form.is_valid():
fn = request.POST.get('first_name')
ln = request.POST.get('last_name')
email = request.POST.get('email')
p = request.POST.get('password')
ph = request.POST.get('phone')
if fn is not '':
mem.first_name = fn
if ln is not '':
mem.last_name = ln
if email is not '':
mem.email = email
if (p is not None and p is not '') and len(p.strip()) >= 8:
mem.set_password(p)
if ph is not '':
mem.phone = ph
if mem.has_usable_password():
update_session_auth_hash(request, mem)
mem.save()
logout(request)
return redirect('login_page')
else:
print(form.errors)
else:
form = MemberUpdate()
return render(request, 'update_member.html', {
'form': form,
'user': mem,
'title': 'Update'
})
def get_list_of_members(request, username):
member_user_list = MemberUser.objects.order_by('pk').filter(project__leader__username=username)
user = LeaderUser.objects.get(username__iexact=username)
paginator = Paginator(member_user_list, 5) # Show 3 admin per page
page = request.GET.get('page')
try:
member_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
member_list = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
member_list = paginator.page(paginator.num_pages)
context = {'member_list': member_list, 'page': page, 'user': user}
return render(request, 'users/list_of_members.html', context)
def delete_a_member(request, username, d):
if MemberUser.objects.get(username__iexact=d):
person = MemberUser.objects.get(username__iexact=d)
person.delete()
return redirect('members_list', username)
@login_required
def project_information(request, username, p):
print('Yoohoo!')
project = Project.objects.get(name__exact=p)
for p in project.actionlist.task_set.all():
print(p.deliverable.name.split('/')[-1])
return render(request, 'users/project_information.html', {'project': project})
@login_required
def send_file(request, username, p, task):
task = Task.objects.get(title__exact=task)
file_path = '/' + task.deliverable.url
if '%20' in file_path:
file_path = file_path.replace('%20', ' ')
file_mimetype = mimetypes.guess_type(file_path)
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type=file_mimetype)
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)
return response
else:
return HttpResponse('File retrieval error.')
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from unittest import mock
import re
import yaml
from heat.common import config
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.tests import common
from heat.tests import utils
class JsonToYamlTest(common.HeatTestCase):
def setUp(self):
super(JsonToYamlTest, self).setUp()
self.expected_test_count = 2
self.longMessage = True
self.maxDiff = None
def test_convert_all_templates(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates')
template_test_count = 0
for (json_str,
yml_str) in self.convert_all_json_to_yaml(path):
self.compare_json_vs_yaml(json_str, yml_str)
template_test_count += 1
if template_test_count >= self.expected_test_count:
break
self.assertGreaterEqual(
template_test_count, self.expected_test_count,
'Expected at least %d templates to be tested, not %d' %
(self.expected_test_count, template_test_count))
def compare_json_vs_yaml(self, json_str, yml_str):
yml = template_format.parse(yml_str)
self.assertEqual(u'2012-12-12', yml[u'HeatTemplateFormatVersion'])
self.assertNotIn(u'AWSTemplateFormatVersion', yml)
del(yml[u'HeatTemplateFormatVersion'])
jsn = template_format.parse(json_str)
if u'AWSTemplateFormatVersion' in jsn:
del(jsn[u'AWSTemplateFormatVersion'])
self.assertEqual(yml, jsn)
def convert_all_json_to_yaml(self, dirpath):
for path in os.listdir(dirpath):
if not path.endswith('.template') and not path.endswith('.json'):
continue
with open(os.path.join(dirpath, path), 'r') as f:
json_str = f.read()
yml_str = template_format.convert_json_to_yaml(json_str)
yield (json_str, yml_str)
def test_integer_only_keys_get_translated_correctly(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates/WordPress_Single_Instance.template')
with open(path, 'r') as f:
json_str = f.read()
yml_str = template_format.convert_json_to_yaml(json_str)
match = re.search(r'[\s,{]\d+\s*:', yml_str)
# Check that there are no matches of integer-only keys
# lacking explicit quotes
self.assertIsNone(match)
class YamlMinimalTest(common.HeatTestCase):
def _parse_template(self, tmpl_str, msg_str):
parse_ex = self.assertRaises(ValueError,
template_format.parse,
tmpl_str)
self.assertIn(msg_str, str(parse_ex))
def test_long_yaml(self):
template = {'HeatTemplateFormatVersion': '2012-12-12'}
config.cfg.CONF.set_override('max_template_size', 10)
template['Resources'] = ['a'] * int(
config.cfg.CONF.max_template_size / 3)
limit = config.cfg.CONF.max_template_size
long_yaml = yaml.safe_dump(template)
self.assertGreater(len(long_yaml), limit)
ex = self.assertRaises(exception.RequestLimitExceeded,
template_format.parse, long_yaml)
msg = ('Request limit exceeded: Template size (%(actual_len)s '
'bytes) exceeds maximum allowed size (%(limit)s bytes).') % {
'actual_len': len(str(long_yaml)),
'limit': config.cfg.CONF.max_template_size}
self.assertEqual(msg, str(ex))
def test_parse_no_version_format(self):
yaml = ''
self._parse_template(yaml, 'Template format version not found')
yaml2 = '''Parameters: {}
Mappings: {}
Resources: {}
Outputs: {}
'''
self._parse_template(yaml2, 'Template format version not found')
def test_parse_string_template(self):
tmpl_str = 'just string'
msg = 'The template is not a JSON object or YAML mapping.'
self._parse_template(tmpl_str, msg)
def test_parse_invalid_yaml_and_json_template(self):
tmpl_str = '{test'
msg = 'line 1, column 1'
self._parse_template(tmpl_str, msg)
def test_parse_json_document(self):
tmpl_str = '["foo" , "bar"]'
msg = 'The template is not a JSON object or YAML mapping.'
self._parse_template(tmpl_str, msg)
def test_parse_empty_json_template(self):
tmpl_str = '{}'
msg = 'Template format version not found'
self._parse_template(tmpl_str, msg)
def test_parse_yaml_template(self):
tmpl_str = 'heat_template_version: 2013-05-23'
expected = {'heat_template_version': '2013-05-23'}
self.assertEqual(expected, template_format.parse(tmpl_str))
class YamlParseExceptions(common.HeatTestCase):
scenarios = [
('scanner', dict(raised_exception=yaml.scanner.ScannerError())),
('parser', dict(raised_exception=yaml.parser.ParserError())),
('reader',
dict(raised_exception=yaml.reader.ReaderError(
'', 42, 'x'.encode('latin-1'), '', ''))),
]
def test_parse_to_value_exception(self):
text = 'not important'
with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = self.raised_exception
err = self.assertRaises(ValueError,
template_format.parse, text,
'file://test.yaml')
self.assertIn('Error parsing template file://test.yaml',
str(err))
class JsonYamlResolvedCompareTest(common.HeatTestCase):
def setUp(self):
super(JsonYamlResolvedCompareTest, self).setUp()
self.longMessage = True
self.maxDiff = None
def load_template(self, file_name):
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates', file_name)
f = open(filepath)
t = template_format.parse(f.read())
f.close()
return t
def compare_stacks(self, json_file, yaml_file, parameters):
t1 = self.load_template(json_file)
t2 = self.load_template(yaml_file)
del(t1[u'AWSTemplateFormatVersion'])
t1[u'HeatTemplateFormatVersion'] = t2[u'HeatTemplateFormatVersion']
stack1 = utils.parse_stack(t1, parameters)
stack2 = utils.parse_stack(t2, parameters)
# compare resources separately so that resolved static data
# is compared
t1nr = dict(stack1.t.t)
del(t1nr['Resources'])
t2nr = dict(stack2.t.t)
del(t2nr['Resources'])
self.assertEqual(t1nr, t2nr)
self.assertEqual(set(stack1), set(stack2))
for key in stack1:
self.assertEqual(stack1[key].t, stack2[key].t)
def test_neutron_resolved(self):
self.patchobject(neutron.NeutronClientPlugin, 'has_extension',
return_value=True)
self.compare_stacks('Neutron.template', 'Neutron.yaml', {})
def test_wordpress_resolved(self):
self.compare_stacks('WordPress_Single_Instance.template',
'WordPress_Single_Instance.yaml',
{'KeyName': 'test'})
|
|
"""Support for Tado to create a climate device for each zone."""
import logging
from typing import List, Optional
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
FAN_HIGH,
FAN_LOW,
FAN_MIDDLE,
FAN_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_HOME,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_TENTHS, TEMP_CELSIUS
from homeassistant.util.temperature import convert as convert_temperature
from . import DATA_TADO
_LOGGER = logging.getLogger(__name__)
CONST_MODE_SMART_SCHEDULE = "SMART_SCHEDULE" # Default mytado mode
CONST_MODE_OFF = "OFF" # Switch off heating in a zone
# When we change the temperature setting, we need an overlay mode
# wait until tado changes the mode automatic
CONST_OVERLAY_TADO_MODE = "TADO_MODE"
# the user has change the temperature or mode manually
CONST_OVERLAY_MANUAL = "MANUAL"
# the temperature will be reset after a timespan
CONST_OVERLAY_TIMER = "TIMER"
CONST_MODE_FAN_HIGH = "HIGH"
CONST_MODE_FAN_MIDDLE = "MIDDLE"
CONST_MODE_FAN_LOW = "LOW"
FAN_MAP_TADO = {"HIGH": FAN_HIGH, "MIDDLE": FAN_MIDDLE, "LOW": FAN_LOW}
HVAC_MAP_TADO_HEAT = {
"MANUAL": HVAC_MODE_HEAT,
"TIMER": HVAC_MODE_HEAT,
"TADO_MODE": HVAC_MODE_HEAT,
"SMART_SCHEDULE": HVAC_MODE_AUTO,
"OFF": HVAC_MODE_OFF,
}
HVAC_MAP_TADO_COOL = {
"MANUAL": HVAC_MODE_COOL,
"TIMER": HVAC_MODE_COOL,
"TADO_MODE": HVAC_MODE_COOL,
"SMART_SCHEDULE": HVAC_MODE_AUTO,
"OFF": HVAC_MODE_OFF,
}
HVAC_MAP_TADO_HEAT_COOL = {
"MANUAL": HVAC_MODE_HEAT_COOL,
"TIMER": HVAC_MODE_HEAT_COOL,
"TADO_MODE": HVAC_MODE_HEAT_COOL,
"SMART_SCHEDULE": HVAC_MODE_AUTO,
"OFF": HVAC_MODE_OFF,
}
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORT_HVAC_HEAT = [HVAC_MODE_HEAT, HVAC_MODE_AUTO, HVAC_MODE_OFF]
SUPPORT_HVAC_COOL = [HVAC_MODE_COOL, HVAC_MODE_AUTO, HVAC_MODE_OFF]
SUPPORT_HVAC_HEAT_COOL = [HVAC_MODE_HEAT_COOL, HVAC_MODE_AUTO, HVAC_MODE_OFF]
SUPPORT_FAN = [FAN_HIGH, FAN_MIDDLE, FAN_LOW, FAN_OFF]
SUPPORT_PRESET = [PRESET_AWAY, PRESET_HOME]
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tado climate platform."""
tado = hass.data[DATA_TADO]
try:
zones = tado.get_zones()
except RuntimeError:
_LOGGER.error("Unable to get zone info from mytado")
return
climate_devices = []
for zone in zones:
device = create_climate_device(tado, hass, zone, zone["name"], zone["id"])
if not device:
continue
climate_devices.append(device)
if climate_devices:
add_entities(climate_devices, True)
def create_climate_device(tado, hass, zone, name, zone_id):
"""Create a Tado climate device."""
capabilities = tado.get_capabilities(zone_id)
unit = TEMP_CELSIUS
ac_device = capabilities["type"] == "AIR_CONDITIONING"
hot_water_device = capabilities["type"] == "HOT_WATER"
ac_support_heat = False
if ac_device:
# Only use heat if available
# (you don't have to setup a heat mode, but cool is required)
# Heat is preferred as it generally has a lower minimum temperature
if "HEAT" in capabilities:
temperatures = capabilities["HEAT"]["temperatures"]
ac_support_heat = True
else:
temperatures = capabilities["COOL"]["temperatures"]
elif "temperatures" in capabilities:
temperatures = capabilities["temperatures"]
else:
_LOGGER.debug("Received zone %s has no temperature; not adding", name)
return
min_temp = float(temperatures["celsius"]["min"])
max_temp = float(temperatures["celsius"]["max"])
step = temperatures["celsius"].get("step", PRECISION_TENTHS)
data_id = f"zone {name} {zone_id}"
device = TadoClimate(
tado,
name,
zone_id,
data_id,
hass.config.units.temperature(min_temp, unit),
hass.config.units.temperature(max_temp, unit),
step,
ac_device,
hot_water_device,
ac_support_heat,
)
tado.add_sensor(
data_id, {"id": zone_id, "zone": zone, "name": name, "climate": device}
)
return device
class TadoClimate(ClimateDevice):
"""Representation of a Tado climate device."""
def __init__(
self,
store,
zone_name,
zone_id,
data_id,
min_temp,
max_temp,
step,
ac_device,
hot_water_device,
ac_support_heat,
tolerance=0.3,
):
"""Initialize of Tado climate device."""
self._store = store
self._data_id = data_id
self.zone_name = zone_name
self.zone_id = zone_id
self._ac_device = ac_device
self._hot_water_device = hot_water_device
self._ac_support_heat = ac_support_heat
self._cooling = False
self._active = False
self._device_is_active = False
self._unit = TEMP_CELSIUS
self._cur_temp = None
self._cur_humidity = None
self._is_away = False
self._min_temp = min_temp
self._max_temp = max_temp
self._step = step
self._target_temp = None
self._tolerance = tolerance
self._current_fan = CONST_MODE_OFF
self._current_operation = CONST_MODE_SMART_SCHEDULE
self._overlay_mode = CONST_MODE_SMART_SCHEDULE
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the device."""
return self.zone_name
@property
def current_humidity(self):
"""Return the current humidity."""
return self._cur_humidity
def set_humidity(self, humidity: int) -> None:
"""Set new target humidity."""
pass
@property
def current_temperature(self):
"""Return the sensor temperature."""
return self._cur_temp
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
if self._ac_device and self._ac_support_heat:
return HVAC_MAP_TADO_HEAT_COOL.get(self._current_operation)
if self._ac_device and not self._ac_support_heat:
return HVAC_MAP_TADO_COOL.get(self._current_operation)
return HVAC_MAP_TADO_HEAT.get(self._current_operation)
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
if self._ac_device and self._ac_support_heat:
return SUPPORT_HVAC_HEAT_COOL
if self._ac_device and not self._ac_support_heat:
return SUPPORT_HVAC_COOL
return SUPPORT_HVAC_HEAT
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
if not self._device_is_active:
return CURRENT_HVAC_OFF
if self._ac_device and self._ac_support_heat and self._cooling:
if self._active:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_IDLE
if self._ac_device and self._ac_support_heat and not self._cooling:
if self._active:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
if self._ac_device and not self._ac_support_heat:
if self._active:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_IDLE
if self._active:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def fan_mode(self):
"""Return the fan setting."""
if self._ac_device:
return FAN_MAP_TADO.get(self._current_fan)
return None
@property
def fan_modes(self):
"""List of available fan modes."""
if self._ac_device:
return SUPPORT_FAN
return None
def set_fan_mode(self, fan_mode: str):
"""Turn fan on/off."""
pass
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
if self._is_away:
return PRESET_AWAY
return PRESET_HOME
@property
def preset_modes(self):
"""Return a list of available preset modes."""
return SUPPORT_PRESET
def set_preset_mode(self, preset_mode):
"""Set new preset mode."""
pass
@property
def temperature_unit(self):
"""Return the unit of measurement used by the platform."""
return self._unit
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._step
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
return None
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._current_operation = CONST_OVERLAY_TADO_MODE
self._overlay_mode = None
self._target_temp = temperature
self._control_heating()
def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
mode = None
if hvac_mode == HVAC_MODE_OFF:
mode = CONST_MODE_OFF
elif hvac_mode == HVAC_MODE_AUTO:
mode = CONST_MODE_SMART_SCHEDULE
elif hvac_mode == HVAC_MODE_HEAT:
mode = CONST_OVERLAY_TADO_MODE
elif hvac_mode == HVAC_MODE_COOL:
mode = CONST_OVERLAY_TADO_MODE
elif hvac_mode == HVAC_MODE_HEAT_COOL:
mode = CONST_OVERLAY_TADO_MODE
self._current_operation = mode
self._overlay_mode = None
if self._target_temp is None and self._ac_device:
self._target_temp = 27
self._control_heating()
@property
def min_temp(self):
"""Return the minimum temperature."""
return convert_temperature(
self._min_temp, self._unit, self.hass.config.units.temperature_unit
)
@property
def max_temp(self):
"""Return the maximum temperature."""
return convert_temperature(
self._max_temp, self._unit, self.hass.config.units.temperature_unit
)
def update(self):
"""Update the state of this climate device."""
self._store.update()
data = self._store.get_data(self._data_id)
if data is None:
_LOGGER.debug("Received no data for zone %s", self.zone_name)
return
if "sensorDataPoints" in data:
sensor_data = data["sensorDataPoints"]
unit = TEMP_CELSIUS
if "insideTemperature" in sensor_data:
temperature = float(sensor_data["insideTemperature"]["celsius"])
self._cur_temp = self.hass.config.units.temperature(temperature, unit)
if "humidity" in sensor_data:
humidity = float(sensor_data["humidity"]["percentage"])
self._cur_humidity = humidity
# temperature setting will not exist when device is off
if (
"temperature" in data["setting"]
and data["setting"]["temperature"] is not None
):
setting = float(data["setting"]["temperature"]["celsius"])
self._target_temp = self.hass.config.units.temperature(setting, unit)
if "tadoMode" in data:
mode = data["tadoMode"]
self._is_away = mode == "AWAY"
if "setting" in data:
power = data["setting"]["power"]
if power == "OFF":
self._current_operation = CONST_MODE_OFF
self._current_fan = CONST_MODE_OFF
# There is no overlay, the mode will always be
# "SMART_SCHEDULE"
self._overlay_mode = CONST_MODE_SMART_SCHEDULE
self._device_is_active = False
else:
self._device_is_active = True
active = False
if "activityDataPoints" in data:
activity_data = data["activityDataPoints"]
if self._ac_device:
if "acPower" in activity_data and activity_data["acPower"] is not None:
if not activity_data["acPower"]["value"] == "OFF":
active = True
else:
if (
"heatingPower" in activity_data
and activity_data["heatingPower"] is not None
):
if float(activity_data["heatingPower"]["percentage"]) > 0.0:
active = True
self._active = active
overlay = False
overlay_data = None
termination = CONST_MODE_SMART_SCHEDULE
cooling = False
fan_speed = CONST_MODE_OFF
if "overlay" in data:
overlay_data = data["overlay"]
overlay = overlay_data is not None
if overlay:
termination = overlay_data["termination"]["type"]
setting = False
setting_data = None
if "setting" in overlay_data:
setting_data = overlay_data["setting"]
setting = setting_data is not None
if setting:
if "mode" in setting_data:
cooling = setting_data["mode"] == "COOL"
if "fanSpeed" in setting_data:
fan_speed = setting_data["fanSpeed"]
if self._device_is_active:
# If you set mode manually to off, there will be an overlay
# and a termination, but we want to see the mode "OFF"
self._overlay_mode = termination
self._current_operation = termination
self._cooling = cooling
self._current_fan = fan_speed
def _control_heating(self):
"""Send new target temperature to mytado."""
if None not in (self._cur_temp, self._target_temp):
_LOGGER.info(
"Obtained current (%d) and target temperature (%d). "
"Tado thermostat active",
self._cur_temp,
self._target_temp,
)
if self._current_operation == CONST_MODE_SMART_SCHEDULE:
_LOGGER.info(
"Switching mytado.com to SCHEDULE (default) for zone %s (%d)",
self.zone_name,
self.zone_id,
)
self._store.reset_zone_overlay(self.zone_id)
self._overlay_mode = self._current_operation
return
if self._current_operation == CONST_MODE_OFF:
if self._ac_device:
_LOGGER.info(
"Switching mytado.com to OFF for zone %s (%d) - AIR_CONDITIONING",
self.zone_name,
self.zone_id,
)
self._store.set_zone_off(
self.zone_id, CONST_OVERLAY_MANUAL, "AIR_CONDITIONING"
)
elif self._hot_water_device:
_LOGGER.info(
"Switching mytado.com to OFF for zone %s (%d) - HOT_WATER",
self.zone_name,
self.zone_id,
)
self._store.set_zone_off(
self.zone_id, CONST_OVERLAY_MANUAL, "HOT_WATER"
)
else:
_LOGGER.info(
"Switching mytado.com to OFF for zone %s (%d) - HEATING",
self.zone_name,
self.zone_id,
)
self._store.set_zone_off(self.zone_id, CONST_OVERLAY_MANUAL, "HEATING")
self._overlay_mode = self._current_operation
return
if self._ac_device:
_LOGGER.info(
"Switching mytado.com to %s mode for zone %s (%d). Temp (%s) - AIR_CONDITIONING",
self._current_operation,
self.zone_name,
self.zone_id,
self._target_temp,
)
self._store.set_zone_overlay(
self.zone_id,
self._current_operation,
self._target_temp,
None,
"AIR_CONDITIONING",
"COOL",
)
elif self._hot_water_device:
_LOGGER.info(
"Switching mytado.com to %s mode for zone %s (%d). Temp (%s) - HOT_WATER",
self._current_operation,
self.zone_name,
self.zone_id,
self._target_temp,
)
self._store.set_zone_overlay(
self.zone_id,
self._current_operation,
self._target_temp,
None,
"HOT_WATER",
)
else:
_LOGGER.info(
"Switching mytado.com to %s mode for zone %s (%d). Temp (%s) - HEATING",
self._current_operation,
self.zone_name,
self.zone_id,
self._target_temp,
)
self._store.set_zone_overlay(
self.zone_id,
self._current_operation,
self._target_temp,
None,
"HEATING",
)
self._overlay_mode = self._current_operation
@property
def is_aux_heat(self) -> Optional[bool]:
"""Return true if aux heater.
Requires SUPPORT_AUX_HEAT.
"""
return None
def turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
pass
def turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
pass
@property
def swing_mode(self) -> Optional[str]:
"""Return the swing setting.
Requires SUPPORT_SWING_MODE.
"""
return None
@property
def swing_modes(self) -> Optional[List[str]]:
"""Return the list of available swing modes.
Requires SUPPORT_SWING_MODE.
"""
return None
def set_swing_mode(self, swing_mode: str) -> None:
"""Set new target swing operation."""
pass
|
|
"""
Platform that supports scanning iCloud.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.icloud/
"""
import logging
import random
import os
import voluptuous as vol
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD
from homeassistant.components.device_tracker import (
PLATFORM_SCHEMA, DOMAIN, ATTR_ATTRIBUTES, ENTITY_ID_FORMAT, DeviceScanner)
from homeassistant.components.zone import active_zone
from homeassistant.helpers.event import track_utc_time_change
import homeassistant.helpers.config_validation as cv
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from homeassistant.util.location import distance
from homeassistant.loader import get_component
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pyicloud==0.9.1']
CONF_IGNORED_DEVICES = 'ignored_devices'
CONF_ACCOUNTNAME = 'account_name'
# entity attributes
ATTR_ACCOUNTNAME = 'account_name'
ATTR_INTERVAL = 'interval'
ATTR_DEVICENAME = 'device_name'
ATTR_BATTERY = 'battery'
ATTR_DISTANCE = 'distance'
ATTR_DEVICESTATUS = 'device_status'
ATTR_LOWPOWERMODE = 'low_power_mode'
ATTR_BATTERYSTATUS = 'battery_status'
ICLOUDTRACKERS = {}
_CONFIGURING = {}
DEVICESTATUSSET = ['features', 'maxMsgChar', 'darkWake', 'fmlyShare',
'deviceStatus', 'remoteLock', 'activationLocked',
'deviceClass', 'id', 'deviceModel', 'rawDeviceModel',
'passcodeLength', 'canWipeAfterLock', 'trackingInfo',
'location', 'msg', 'batteryLevel', 'remoteWipe',
'thisDevice', 'snd', 'prsId', 'wipeInProgress',
'lowPowerMode', 'lostModeEnabled', 'isLocating',
'lostModeCapable', 'mesg', 'name', 'batteryStatus',
'lockedTimestamp', 'lostTimestamp', 'locationCapable',
'deviceDisplayName', 'lostDevice', 'deviceColor',
'wipedTimestamp', 'modelDisplayName', 'locationEnabled',
'isMac', 'locFoundEnabled']
DEVICESTATUSCODES = {'200': 'online', '201': 'offline', '203': 'pending',
'204': 'unregistered'}
SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ACCOUNTNAME): vol.All(cv.ensure_list, [cv.slugify]),
vol.Optional(ATTR_DEVICENAME): cv.slugify,
vol.Optional(ATTR_INTERVAL): cv.positive_int,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(ATTR_ACCOUNTNAME): cv.slugify,
})
def setup_scanner(hass, config: dict, see, discovery_info=None):
"""Set up the iCloud Scanner."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
account = config.get(CONF_ACCOUNTNAME, slugify(username.partition('@')[0]))
icloudaccount = Icloud(hass, username, password, account, see)
if icloudaccount.api is not None:
ICLOUDTRACKERS[account] = icloudaccount
else:
_LOGGER.error("No ICLOUDTRACKERS added")
return False
def lost_iphone(call):
"""Call the lost iphone function if the device is found."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
devicename = call.data.get(ATTR_DEVICENAME)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].lost_iphone(devicename)
hass.services.register(DOMAIN, 'icloud_lost_iphone', lost_iphone,
schema=SERVICE_SCHEMA)
def update_icloud(call):
"""Call the update function of an icloud account."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
devicename = call.data.get(ATTR_DEVICENAME)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].update_icloud(devicename)
hass.services.register(DOMAIN, 'icloud_update', update_icloud,
schema=SERVICE_SCHEMA)
def reset_account_icloud(call):
"""Reset an icloud account."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].reset_account_icloud()
hass.services.register(DOMAIN, 'icloud_reset_account',
reset_account_icloud, schema=SERVICE_SCHEMA)
def setinterval(call):
"""Call the update function of an icloud account."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
interval = call.data.get(ATTR_INTERVAL)
devicename = call.data.get(ATTR_DEVICENAME)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].setinterval(interval, devicename)
hass.services.register(DOMAIN, 'icloud_set_interval', setinterval,
schema=SERVICE_SCHEMA)
# Tells the bootstrapper that the component was successfully initialized
return True
class Icloud(DeviceScanner):
"""Represent an icloud account in Home Assistant."""
def __init__(self, hass, username, password, name, see):
"""Initialize an iCloud account."""
self.hass = hass
self.username = username
self.password = password
self.api = None
self.accountname = name
self.devices = {}
self.seen_devices = {}
self._overridestates = {}
self._intervals = {}
self.see = see
self._trusted_device = None
self._verification_code = None
self._attrs = {}
self._attrs[ATTR_ACCOUNTNAME] = name
self.reset_account_icloud()
randomseconds = random.randint(10, 59)
track_utc_time_change(
self.hass, self.keep_alive,
second=randomseconds
)
def reset_account_icloud(self):
"""Reset an icloud account."""
from pyicloud import PyiCloudService
from pyicloud.exceptions import (
PyiCloudFailedLoginException, PyiCloudNoDevicesException)
icloud_dir = self.hass.config.path('icloud')
if not os.path.exists(icloud_dir):
os.makedirs(icloud_dir)
try:
self.api = PyiCloudService(
self.username, self.password,
cookie_directory=icloud_dir,
verify=True)
except PyiCloudFailedLoginException as error:
self.api = None
_LOGGER.error('Error logging into iCloud Service: %s', error)
return
try:
self.devices = {}
self._overridestates = {}
self._intervals = {}
for device in self.api.devices:
status = device.status(DEVICESTATUSSET)
devicename = slugify(status['name'].replace(' ', '', 99))
if devicename not in self.devices:
self.devices[devicename] = device
self._intervals[devicename] = 1
self._overridestates[devicename] = None
except PyiCloudNoDevicesException:
_LOGGER.error('No iCloud Devices found!')
def icloud_trusted_device_callback(self, callback_data):
"""The trusted device is chosen."""
self._trusted_device = int(callback_data.get('trusted_device'))
self._trusted_device = self.api.trusted_devices[self._trusted_device]
if not self.api.send_verification_code(self._trusted_device):
_LOGGER.error('Failed to send verification code')
self._trusted_device = None
return
if self.accountname in _CONFIGURING:
request_id = _CONFIGURING.pop(self.accountname)
configurator = get_component('configurator')
configurator.request_done(request_id)
# Trigger the next step immediately
self.icloud_need_verification_code()
def icloud_need_trusted_device(self):
"""We need a trusted device."""
configurator = get_component('configurator')
if self.accountname in _CONFIGURING:
return
devicesstring = ''
devices = self.api.trusted_devices
for i, device in enumerate(devices):
devicename = device.get(
'deviceName',
'SMS to %s' % device.get('phoneNumber'))
devicesstring += "{}: {};".format(i, devicename)
_CONFIGURING[self.accountname] = configurator.request_config(
self.hass, 'iCloud {}'.format(self.accountname),
self.icloud_trusted_device_callback,
description=(
'Please choose your trusted device by entering'
' the index from this list: ' + devicesstring),
entity_picture="/static/images/config_icloud.png",
submit_caption='Confirm',
fields=[{'id': 'trusted_device', 'name': 'Trusted Device'}]
)
def icloud_verification_callback(self, callback_data):
"""The trusted device is chosen."""
from pyicloud.exceptions import PyiCloudException
self._verification_code = callback_data.get('code')
try:
if not self.api.validate_verification_code(
self._trusted_device, self._verification_code):
raise PyiCloudException('Unknown failure')
except PyiCloudException as error:
# Reset to the inital 2FA state to allow the user to retry
_LOGGER.error('Failed to verify verification code: %s', error)
self._trusted_device = None
self._verification_code = None
# Trigger the next step immediately
self.icloud_need_trusted_device()
if self.accountname in _CONFIGURING:
request_id = _CONFIGURING.pop(self.accountname)
configurator = get_component('configurator')
configurator.request_done(request_id)
def icloud_need_verification_code(self):
"""We need a verification code."""
configurator = get_component('configurator')
if self.accountname in _CONFIGURING:
return
_CONFIGURING[self.accountname] = configurator.request_config(
self.hass, 'iCloud {}'.format(self.accountname),
self.icloud_verification_callback,
description=('Please enter the validation code:'),
entity_picture="/static/images/config_icloud.png",
submit_caption='Confirm',
fields=[{'id': 'code', 'name': 'code'}]
)
def keep_alive(self, now):
"""Keep the api alive."""
if self.api is None:
self.reset_account_icloud()
if self.api is None:
return
if self.api.requires_2fa:
from pyicloud.exceptions import PyiCloudException
try:
if self._trusted_device is None:
self.icloud_need_trusted_device()
return
if self._verification_code is None:
self.icloud_need_verification_code()
return
self.api.authenticate()
if self.api.requires_2fa:
raise Exception('Unknown failure')
self._trusted_device = None
self._verification_code = None
except PyiCloudException as error:
_LOGGER.error("Error setting up 2fa: %s", error)
else:
self.api.authenticate()
currentminutes = dt_util.now().hour * 60 + dt_util.now().minute
for devicename in self.devices:
interval = self._intervals.get(devicename, 1)
if ((currentminutes % interval == 0) or
(interval > 10 and
currentminutes % interval in [2, 4])):
self.update_device(devicename)
def determine_interval(self, devicename, latitude, longitude, battery):
"""Calculate new interval."""
distancefromhome = None
zone_state = self.hass.states.get('zone.home')
zone_state_lat = zone_state.attributes['latitude']
zone_state_long = zone_state.attributes['longitude']
distancefromhome = distance(latitude, longitude, zone_state_lat,
zone_state_long)
distancefromhome = round(distancefromhome / 1000, 1)
currentzone = active_zone(self.hass, latitude, longitude)
if ((currentzone is not None and
currentzone == self._overridestates.get(devicename)) or
(currentzone is None and
self._overridestates.get(devicename) == 'away')):
return
self._overridestates[devicename] = None
if currentzone is not None:
self._intervals[devicename] = 30
return
if distancefromhome is None:
return
if distancefromhome > 25:
self._intervals[devicename] = round(distancefromhome / 2, 0)
elif distancefromhome > 10:
self._intervals[devicename] = 5
else:
self._intervals[devicename] = 1
if battery is not None and battery <= 33 and distancefromhome > 3:
self._intervals[devicename] = self._intervals[devicename] * 2
def update_device(self, devicename):
"""Update the device_tracker entity."""
from pyicloud.exceptions import PyiCloudNoDevicesException
# An entity will not be created by see() when track=false in
# 'known_devices.yaml', but we need to see() it at least once
entity = self.hass.states.get(ENTITY_ID_FORMAT.format(devicename))
if entity is None and devicename in self.seen_devices:
return
attrs = {}
kwargs = {}
if self.api is None:
return
try:
for device in self.api.devices:
if str(device) != str(self.devices[devicename]):
continue
status = device.status(DEVICESTATUSSET)
dev_id = status['name'].replace(' ', '', 99)
dev_id = slugify(dev_id)
attrs[ATTR_DEVICESTATUS] = DEVICESTATUSCODES.get(
status['deviceStatus'], 'error')
attrs[ATTR_LOWPOWERMODE] = status['lowPowerMode']
attrs[ATTR_BATTERYSTATUS] = status['batteryStatus']
attrs[ATTR_ACCOUNTNAME] = self.accountname
status = device.status(DEVICESTATUSSET)
battery = status.get('batteryLevel', 0) * 100
location = status['location']
if location:
self.determine_interval(
devicename, location['latitude'],
location['longitude'], battery)
interval = self._intervals.get(devicename, 1)
attrs[ATTR_INTERVAL] = interval
accuracy = location['horizontalAccuracy']
kwargs['dev_id'] = dev_id
kwargs['host_name'] = status['name']
kwargs['gps'] = (location['latitude'],
location['longitude'])
kwargs['battery'] = battery
kwargs['gps_accuracy'] = accuracy
kwargs[ATTR_ATTRIBUTES] = attrs
self.see(**kwargs)
self.seen_devices[devicename] = True
except PyiCloudNoDevicesException:
_LOGGER.error('No iCloud Devices found!')
def lost_iphone(self, devicename):
"""Call the lost iphone function if the device is found."""
if self.api is None:
return
self.api.authenticate()
for device in self.api.devices:
if devicename is None or device == self.devices[devicename]:
device.play_sound()
def update_icloud(self, devicename=None):
"""Authenticate against iCloud and scan for devices."""
from pyicloud.exceptions import PyiCloudNoDevicesException
if self.api is None:
return
try:
if devicename is not None:
if devicename in self.devices:
self.devices[devicename].location()
else:
_LOGGER.error("devicename %s unknown for account %s",
devicename, self._attrs[ATTR_ACCOUNTNAME])
else:
for device in self.devices:
self.devices[device].location()
except PyiCloudNoDevicesException:
_LOGGER.error('No iCloud Devices found!')
def setinterval(self, interval=None, devicename=None):
"""Set the interval of the given devices."""
devs = [devicename] if devicename else self.devices
for device in devs:
devid = DOMAIN + '.' + device
devicestate = self.hass.states.get(devid)
if interval is not None:
if devicestate is not None:
self._overridestates[device] = active_zone(
self.hass,
float(devicestate.attributes.get('latitude', 0)),
float(devicestate.attributes.get('longitude', 0)))
if self._overridestates[device] is None:
self._overridestates[device] = 'away'
self._intervals[device] = interval
else:
self._overridestates[device] = None
self.update_device(device)
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants
from neutron_lib import exceptions
from oslo_serialization import jsonutils
import testtools
from neutron.api.v2 import attributes
from neutron.common import constants as n_const
from neutron.common import topics
from neutron import context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_hamode_db
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron import manager
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
from neutron.plugins.ml2.drivers.l2pop import mech_driver as l2pop_mech_driver
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import rpc
from neutron.scheduler import l3_agent_scheduler
from neutron.tests import base
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.ml2 import test_plugin
HOST = 'my_l2_host'
HOST_2 = HOST + '_2'
HOST_3 = HOST + '_3'
HOST_4 = HOST + '_4'
HOST_5 = HOST + '_5'
TEST_ROUTER_ID = 'router_id'
NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
class FakeL3PluginWithAgents(common_db_mixin.CommonDbMixin,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agents_db.AgentDbMixin):
pass
class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['openvswitch', 'fake_agent', 'l2population']
def setUp(self):
super(TestL2PopulationRpcTestCase, self).setUp()
self.adminContext = context.get_admin_context()
self.type_manager = managers.TypeManager()
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.callbacks = rpc.RpcCallbacks(self.notifier, self.type_manager)
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: '1'}
self._network = self._make_network(self.fmt, 'net1', True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
net_arg = {pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '2'}
self._network2 = self._make_network(self.fmt, 'net2', True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
**net_arg)
net_arg = {pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'noagent'}
self._network3 = self._make_network(self.fmt, 'net3', True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,),
**net_arg)
notifier_patch = mock.patch(NOTIFIER)
notifier_patch.start()
self.fanout_topic = topics.get_topic_name(topics.AGENT,
topics.L2POPULATION,
topics.UPDATE)
fanout = ('neutron.plugins.ml2.drivers.l2pop.rpc.'
'L2populationAgentNotifyAPI._notification_fanout')
fanout_patch = mock.patch(fanout)
self.mock_fanout = fanout_patch.start()
cast = ('neutron.plugins.ml2.drivers.l2pop.rpc.'
'L2populationAgentNotifyAPI._notification_host')
cast_patch = mock.patch(cast)
self.mock_cast = cast_patch.start()
uptime = ('neutron.plugins.ml2.drivers.l2pop.db.get_agent_uptime')
uptime_patch = mock.patch(uptime, return_value=190)
uptime_patch.start()
def _setup_l3(self):
notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_ha_interfaces_updated')
self.notif_m = notif_p.start()
self.plugin = FakeL3PluginWithAgents()
self._register_ml2_agents()
self._register_l3_agents()
def _register_l3_agents(self):
self.agent1 = helpers.register_l3_agent(host=HOST)
self.agent2 = helpers.register_l3_agent(host=HOST_2)
def _register_ml2_agents(self):
helpers.register_ovs_agent(host=HOST, tunneling_ip='20.0.0.1')
helpers.register_ovs_agent(host=HOST_2, tunneling_ip='20.0.0.2')
helpers.register_ovs_agent(host=HOST_3, tunneling_ip='20.0.0.3',
tunnel_types=[])
helpers.register_ovs_agent(host=HOST_4, tunneling_ip='20.0.0.4')
helpers.register_ovs_agent(host=HOST_5, tunneling_ip='20.0.0.5',
binary='neutron-fake-agent',
tunnel_types=[],
interface_mappings={'physnet1': 'eth9'},
agent_type=constants.AGENT_TYPE_OFA,
l2pop_network_types=['vlan'])
def test_port_info_compare(self):
# An assumption the code makes is that PortInfo compares equal to
# equivalent regular tuples.
self.assertEqual(("mac", "ip"), l2pop_rpc.PortInfo("mac", "ip"))
flooding_entry = l2pop_rpc.PortInfo(*constants.FLOODING_ENTRY)
self.assertEqual(constants.FLOODING_ENTRY, flooding_entry)
def test__unmarshall_fdb_entries(self):
entries = {'foouuid': {
'segment_id': 1001,
'ports': {'192.168.0.10': [['00:00:00:00:00:00', '0.0.0.0'],
['fa:16:3e:ff:8c:0f', '10.0.0.6']]},
'network_type': 'vxlan'}}
entries['chg_ip'] = {
'foouuid': {
'192.168.0.1': {'before': [['fa:16:3e:ff:8c:0f', '10.0.0.6']],
'after': [['fa:16:3e:ff:8c:0f', '10.0.0.7']]},
'192.168.0.2': {'before': [['fa:16:3e:ff:8c:0e', '10.0.0.8']]}
},
'foouuid2': {
'192.168.0.1': {'before': [['ff:16:3e:ff:8c:0e', '1.0.0.8']]}
}
}
mixin = l2population_rpc.L2populationRpcCallBackMixin
entries = mixin._unmarshall_fdb_entries(entries)
port_info_list = entries['foouuid']['ports']['192.168.0.10']
# Check that the lists have been properly converted to PortInfo
self.assertIsInstance(port_info_list[0], l2pop_rpc.PortInfo)
self.assertIsInstance(port_info_list[1], l2pop_rpc.PortInfo)
self.assertEqual(('00:00:00:00:00:00', '0.0.0.0'), port_info_list[0])
self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.6'), port_info_list[1])
agt1 = entries['chg_ip']['foouuid']['192.168.0.1']
self.assertIsInstance(agt1['before'][0], l2pop_rpc.PortInfo)
self.assertIsInstance(agt1['after'][0], l2pop_rpc.PortInfo)
self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.6'), agt1['before'][0])
self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.7'), agt1['after'][0])
agt1_net2 = entries['chg_ip']['foouuid2']['192.168.0.1']
self.assertEqual(('ff:16:3e:ff:8c:0e', '1.0.0.8'),
agt1_net2['before'][0])
self.assertIsInstance(agt1_net2['before'][0], l2pop_rpc.PortInfo)
agt2 = entries['chg_ip']['foouuid']['192.168.0.2']
self.assertIsInstance(agt2['before'][0], l2pop_rpc.PortInfo)
self.assertEqual(('fa:16:3e:ff:8c:0e', '10.0.0.8'), agt2['before'][0])
def test_portinfo_marshalled_as_list(self):
entry = ['fa:16:3e:ff:8c:0f', '10.0.0.6']
payload = {'netuuid': {'ports': {'1': [l2pop_rpc.PortInfo(*entry)]}}}
result = jsonutils.loads(jsonutils.dumps(payload))
self.assertEqual(entry, result['netuuid']['ports']['1'][0])
def _create_router(self, ha=True, tenant_id='tenant1',
distributed=None, ctx=None):
if ctx is None:
ctx = self.adminContext
ctx.tenant_id = tenant_id
router = {'name': TEST_ROUTER_ID, 'admin_state_up': True,
'tenant_id': ctx.tenant_id}
if ha is not None:
router['ha'] = ha
if distributed is not None:
router['distributed'] = distributed
return self.plugin.create_router(ctx, {'router': router})
def _bind_router(self, router_id):
with self.adminContext.session.begin(subtransactions=True):
scheduler = l3_agent_scheduler.ChanceScheduler()
filters = {'agent_type': [constants.AGENT_TYPE_L3]}
agents_db = self.plugin.get_agents_db(self.adminContext,
filters=filters)
scheduler._bind_ha_router_to_agents(
self.plugin,
self.adminContext,
router_id,
agents_db)
self._bind_ha_network_ports(router_id)
def _bind_ha_network_ports(self, router_id):
port_bindings = self.plugin.get_ha_router_port_bindings(
self.adminContext, [router_id])
plugin = manager.NeutronManager.get_plugin()
for port_binding in port_bindings:
filters = {'id': [port_binding.port_id]}
port = plugin.get_ports(self.adminContext, filters=filters)[0]
if port_binding.l3_agent_id == self.agent1['id']:
port[portbindings.HOST_ID] = self.agent1['host']
else:
port[portbindings.HOST_ID] = self.agent2['host']
plugin.update_port(self.adminContext, port['id'],
{attributes.PORT: port})
def _get_first_interface(self, net_id, router_id):
plugin = manager.NeutronManager.get_plugin()
device_filter = {'device_id': [router_id],
'device_owner':
[constants.DEVICE_OWNER_HA_REPLICATED_INT]}
return plugin.get_ports(self.adminContext, filters=device_filter)[0]
def _add_router_interface(self, subnet, router, host):
interface_info = {'subnet_id': subnet['id']}
self.plugin.add_router_interface(self.adminContext,
router['id'], interface_info)
self.plugin.update_routers_states(
self.adminContext,
{router['id']: n_const.HA_ROUTER_STATE_ACTIVE}, host)
port = self._get_first_interface(subnet['network_id'], router['id'])
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext, agent_id=host,
device=port['id'], host=host)
return port
def _create_ha_router(self):
self._setup_l3()
router = self._create_router()
self._bind_router(router['id'])
return router
def _verify_remove_fdb(self, expected, agent_id, device, host=None):
self.mock_fanout.reset_mock()
self.callbacks.update_device_down(self.adminContext, agent_id=host,
device=device, host=host)
self.mock_fanout.assert_called_with(
mock.ANY, 'remove_fdb_entries', expected)
def test_other_agents_get_flood_entries_for_ha_agents(self):
# First HA router port is added on HOST and HOST2, then network port
# is added on HOST4.
# HOST4 should get flood entries for HOST1 and HOST2
router = self._create_ha_router()
service_plugins = manager.NeutronManager.get_service_plugins()
service_plugins[service_constants.L3_ROUTER_NAT] = self.plugin
with self.subnet(network=self._network, enable_dhcp=False) as snet, \
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=service_plugins):
subnet = snet['subnet']
port = self._add_router_interface(subnet, router, HOST)
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
device1 = 'tap' + p1['id']
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(
self.adminContext, agent_id=HOST_4, device=device1)
cast_expected = {
port['network_id']: {
'ports': {'20.0.0.1': [constants.FLOODING_ENTRY],
'20.0.0.2': [constants.FLOODING_ENTRY]},
'network_type': 'vxlan', 'segment_id': 1}}
self.assertEqual(1, self.mock_cast.call_count)
self.mock_cast.assert_called_with(
mock.ANY, 'add_fdb_entries', cast_expected, HOST_4)
def test_delete_ha_port(self):
# First network port is added on HOST, and then HA router port
# is added on HOST and HOST2.
# Remove_fdb should carry flood entry of only HOST2 and not HOST
router = self._create_ha_router()
service_plugins = manager.NeutronManager.get_service_plugins()
service_plugins[service_constants.L3_ROUTER_NAT] = self.plugin
with self.subnet(network=self._network, enable_dhcp=False) as snet, \
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=service_plugins):
host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True}
with self.port(subnet=snet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
device1 = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST, device=device1)
subnet = snet['subnet']
port = self._add_router_interface(subnet, router, HOST)
expected = {port['network_id']:
{'ports': {'20.0.0.2': [constants.FLOODING_ENTRY]},
'network_type': 'vxlan', 'segment_id': 1}}
self.mock_fanout.reset_mock()
interface_info = {'subnet_id': subnet['id']}
self.plugin.remove_router_interface(self.adminContext,
router['id'], interface_info)
self.mock_fanout.assert_called_with(
mock.ANY, 'remove_fdb_entries', expected)
def test_ha_agents_get_other_fdb(self):
# First network port is added on HOST4, then HA router port is
# added on HOST and HOST2.
# Both HA agents should create tunnels to HOST4 and among themselves.
# Both HA agents should be notified to other agents.
router = self._create_ha_router()
service_plugins = manager.NeutronManager.get_service_plugins()
service_plugins[service_constants.L3_ROUTER_NAT] = self.plugin
with self.subnet(network=self._network, enable_dhcp=False) as snet, \
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=service_plugins):
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
device1 = 'tap' + p1['id']
self.callbacks.update_device_up(
self.adminContext, agent_id=HOST_4, device=device1)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
subnet = snet['subnet']
port = self._add_router_interface(subnet, router, HOST)
fanout_expected = {port['network_id']: {
'ports': {'20.0.0.1': [constants.FLOODING_ENTRY]},
'network_type': 'vxlan', 'segment_id': 1}}
cast_expected_host = {port['network_id']: {
'ports': {
'20.0.0.4': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(p1['mac_address'],
p1_ips[0])],
'20.0.0.2': [constants.FLOODING_ENTRY]},
'network_type': 'vxlan', 'segment_id': 1}}
self.mock_cast.assert_called_with(
mock.ANY, 'add_fdb_entries', cast_expected_host, HOST)
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', fanout_expected)
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(
self.adminContext, agent_id=HOST_2,
device=port['id'], host=HOST_2)
cast_expected_host2 = {port['network_id']: {
'ports': {
'20.0.0.4': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(p1['mac_address'],
p1_ips[0])],
'20.0.0.1': [constants.FLOODING_ENTRY]},
'network_type': 'vxlan', 'segment_id': 1}}
fanout_expected = {port['network_id']: {
'ports': {'20.0.0.2': [constants.FLOODING_ENTRY]},
'network_type': 'vxlan', 'segment_id': 1}}
self.mock_cast.assert_called_with(
mock.ANY, 'add_fdb_entries', cast_expected_host2, HOST_2)
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', fanout_expected)
def test_fdb_add_called(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected)
def test_fdb_add_not_called_type_local(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_3'}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self.assertFalse(self.mock_fanout.called)
def test_fdb_add_called_for_l2pop_network_types(self):
self._register_ml2_agents()
host = HOST + '_5'
with self.subnet(network=self._network2) as subnet:
host_arg = {portbindings.HOST_ID: host}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=host,
device=device)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {p1['network_id']:
{'ports':
{'20.0.0.5': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vlan',
'segment_id': 2}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected)
def test_fdb_called_for_active_ports(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
device1 = 'tap' + p1['id']
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device1)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
self.assertFalse(self.mock_cast.called)
expected2 = {p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected2)
def test_fdb_add_two_agents(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST,
'admin_state_up': True}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2',
'admin_state_up': True}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,
'admin_state_up',),
**host_arg) as port2:
p1 = port1['port']
p2 = port2['port']
device1 = 'tap' + p1['id']
device2 = 'tap' + p2['id']
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST + '_2',
device=device2)
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device1)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
expected1 = {p1['network_id']:
{'ports':
{'20.0.0.2': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p2['mac_address'],
p2_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_cast.assert_called_with(mock.ANY,
'add_fdb_entries',
expected1, HOST)
expected2 = {p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected2)
def test_fdb_add_called_two_networks(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.subnet(cidr='10.1.0.0/24') as subnet2:
with self.port(subnet=subnet2,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
p1 = port1['port']
p3 = port3['port']
device1 = 'tap' + p1['id']
device3 = 'tap' + p3['id']
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(
self.adminContext, agent_id=HOST + '_2',
device=device1)
self.callbacks.update_device_up(
self.adminContext, agent_id=HOST,
device=device3)
p1_ips = [p['ip_address']
for p in p1['fixed_ips']]
expected1 = {p1['network_id']:
{'ports':
{'20.0.0.2':
[constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_cast.assert_called_with(
mock.ANY, 'add_fdb_entries', expected1,
HOST)
p3_ips = [p['ip_address']
for p in p3['fixed_ips']]
expected2 = {p1['network_id']:
{'ports':
{'20.0.0.1':
[constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p3['mac_address'],
p3_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected2)
def test_fdb_add_called_dualstack(self):
self._register_ml2_agents()
host_arg = {portbindings.HOST_ID: HOST,
'admin_state_up': True}
with self.subnet(self._network) as subnet,\
self.subnet(
self._network,
cidr='2001:db8::/64',
ip_version=6,
gateway_ip='fe80::1',
ipv6_address_mode=constants.IPV6_SLAAC) as subnet2:
with self.port(
subnet,
fixed_ips=[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}],
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg
) as port:
p1 = port['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0]),
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[1])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected)
def test_update_port_down(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
p2 = port2['port']
device2 = 'tap' + p2['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device2)
p1 = port1['port']
device1 = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device1)
self.mock_fanout.reset_mock()
self.callbacks.update_device_down(self.adminContext,
agent_id=HOST,
device=device2)
p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
expected = {p2['network_id']:
{'ports':
{'20.0.0.1': [l2pop_rpc.PortInfo(
p2['mac_address'],
p2_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'remove_fdb_entries', expected)
def test_update_port_down_last_port_up(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
p2 = port2['port']
device2 = 'tap' + p2['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device2)
self.callbacks.update_device_down(self.adminContext,
agent_id=HOST,
device=device2)
p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
expected = {p2['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p2['mac_address'],
p2_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'remove_fdb_entries', expected)
def test_delete_port(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
p1 = port['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
p2 = port2['port']
device1 = 'tap' + p2['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device1)
self._delete('ports', port2['port']['id'])
p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
expected = {p2['network_id']:
{'ports':
{'20.0.0.1': [l2pop_rpc.PortInfo(
p2['mac_address'],
p2_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_any_call(
mock.ANY, 'remove_fdb_entries', expected)
def test_delete_port_last_port_up(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
p1 = port['port']
device = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self._delete('ports', port['port']['id'])
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_any_call(
mock.ANY, 'remove_fdb_entries', expected)
def test_mac_addr_changed(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_5'}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
p1_ip = p1['fixed_ips'][0]['ip_address']
self.mock_fanout.reset_mock()
device = 'tap' + p1['id']
old_mac = p1['mac_address']
mac = old_mac.split(':')
mac[5] = '01' if mac[5] != '01' else '00'
new_mac = ':'.join(mac)
data = {'port': {'mac_address': new_mac,
portbindings.HOST_ID: HOST}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertIn('port', res)
self.assertEqual(new_mac, res['port']['mac_address'])
# port was not bound before, so no fdb call expected yet
self.assertFalse(self.mock_fanout.called)
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self.assertEqual(1, self.mock_fanout.call_count)
add_expected = {
p1['network_id']: {
'segment_id': 1,
'network_type': 'vxlan',
'ports': {
'20.0.0.1': [
l2pop_rpc.PortInfo('00:00:00:00:00:00',
'0.0.0.0'),
l2pop_rpc.PortInfo(new_mac, p1_ip)
]
}
}
}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', add_expected)
def test_fixed_ips_changed(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
fixed_ips=fixed_ips,
**host_arg) as port1:
p1 = port1['port']
device = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.10'}]}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
add_expected = {'chg_ip':
{p1['network_id']:
{'20.0.0.1':
{'after': [(p1['mac_address'],
'10.0.0.10')]}}}}
self.mock_fanout.assert_any_call(
mock.ANY, 'update_fdb_entries', add_expected)
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.16'}]}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
upd_expected = {'chg_ip':
{p1['network_id']:
{'20.0.0.1':
{'before': [(p1['mac_address'],
'10.0.0.10')],
'after': [(p1['mac_address'],
'10.0.0.16')]}}}}
self.mock_fanout.assert_any_call(
mock.ANY, 'update_fdb_entries', upd_expected)
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(1, len(ips))
del_expected = {'chg_ip':
{p1['network_id']:
{'20.0.0.1':
{'before': [(p1['mac_address'],
'10.0.0.2')]}}}}
self.mock_fanout.assert_any_call(
mock.ANY, 'update_fdb_entries', del_expected)
def test_no_fdb_updates_without_port_updates(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
device = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
p1['status'] = 'ACTIVE'
self.mock_fanout.reset_mock()
plugin = manager.NeutronManager.get_plugin()
plugin.update_port(self.adminContext, p1['id'], port1)
self.assertFalse(self.mock_fanout.called)
def test_get_device_details_port_id(self):
self._register_ml2_agents()
host_arg = {portbindings.HOST_ID: HOST}
with self.port(arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
port_id = port['port']['id']
# ensure various formats all result in correct port_id
formats = ['tap' + port_id[0:8], port_id,
port['port']['mac_address']]
for device in formats:
details = self.callbacks.get_device_details(
self.adminContext, device=device,
agent_id=HOST_2)
self.assertEqual(port_id, details['port_id'])
def _update_and_check_portbinding(self, port_id, host_id):
data = {'port': {portbindings.HOST_ID: host_id}}
req = self.new_update_request('ports', data, port_id)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(host_id, res['port'][portbindings.HOST_ID])
def _test_host_changed(self, twice):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
tunnel_ip = '20.0.0.1'
p1 = port1['port']
device1 = 'tap' + p1['id']
self.callbacks.update_device_up(
self.adminContext,
agent_id=HOST,
device=device1)
if twice:
tunnel_ip = '20.0.0.4'
self._update_and_check_portbinding(p1['id'], HOST_4)
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST_4,
device=device1)
self.mock_fanout.reset_mock()
self._update_and_check_portbinding(p1['id'], HOST_2)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {p1['network_id']:
{'ports':
{tunnel_ip: [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'remove_fdb_entries', expected)
def test_host_changed(self):
self._test_host_changed(twice=False)
def test_host_changed_twice(self):
self._test_host_changed(twice=True)
def test_delete_port_invokes_update_device_down(self):
l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
l2pop_mech.L2PopulationAgentNotify = mock.Mock()
l2pop_mech.rpc_ctx = mock.Mock()
port = {'device_owner': ''}
context = mock.Mock()
context.current = port
with mock.patch.object(l2pop_mech,
'_get_agent_fdb',
return_value=None) as upd_port_down,\
mock.patch.object(l2pop_mech.L2PopulationAgentNotify,
'remove_fdb_entries'):
l2pop_mech.delete_port_postcommit(context)
self.assertTrue(upd_port_down.called)
def test_delete_unbound_port(self):
l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
l2pop_mech.initialize()
with self.port() as port:
port_context = driver_context.PortContext(
self.driver, self.context, port['port'],
self.driver.get_network(
self.context, port['port']['network_id']),
None, None)
# The point is to provide coverage and to assert that no exceptions
# are raised.
l2pop_mech.delete_port_postcommit(port_context)
def test_fixed_ips_change_unbound_port_no_rpc(self):
l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
l2pop_mech.initialize()
l2pop_mech.L2populationAgentNotify = mock.Mock()
with self.port() as port:
port_context = driver_context.PortContext(
self.driver, self.context, port['port'],
self.driver.get_network(
self.context, port['port']['network_id']),
None, None)
l2pop_mech._fixed_ips_changed(
port_context, None, port['port'], (set(['10.0.0.1']), set()))
# There's no need to send an RPC update if the IP address for an
# unbound port changed.
self.assertFalse(
l2pop_mech.L2populationAgentNotify.update_fdb_entries.called)
class TestL2PopulationMechDriver(base.BaseTestCase):
def _test_get_tunnels(self, agent_ip, exclude_host=True):
mech_driver = l2pop_mech_driver.L2populationMechanismDriver()
agent = mock.Mock()
agent.host = HOST
network_ports = ((None, agent),)
with mock.patch.object(l2pop_db, 'get_agent_ip',
return_value=agent_ip):
excluded_host = HOST + '-EXCLUDE' if exclude_host else HOST
return mech_driver._get_tunnels(network_ports, excluded_host)
def test_get_tunnels(self):
tunnels = self._test_get_tunnels('20.0.0.1')
self.assertIn('20.0.0.1', tunnels)
def test_get_tunnels_no_ip(self):
tunnels = self._test_get_tunnels(None)
self.assertEqual(0, len(tunnels))
def test_get_tunnels_dont_exclude_host(self):
tunnels = self._test_get_tunnels(None, exclude_host=False)
self.assertEqual(0, len(tunnels))
def _test_create_agent_fdb(self, fdb_network_ports, agent_ips):
mech_driver = l2pop_mech_driver.L2populationMechanismDriver()
tunnel_network_ports, tunnel_agent = (
self._mock_network_ports(HOST + '1', [None]))
agent_ips[tunnel_agent] = '10.0.0.1'
def agent_ip_side_effect(agent):
return agent_ips[agent]
with mock.patch.object(l2pop_db, 'get_agent_ip',
side_effect=agent_ip_side_effect),\
mock.patch.object(l2pop_db,
'get_nondistributed_active_network_ports',
return_value=fdb_network_ports),\
mock.patch.object(l2pop_db,
'get_distributed_active_network_ports',
return_value=tunnel_network_ports):
session = mock.Mock()
agent = mock.Mock()
agent.host = HOST
segment = {'segmentation_id': 1, 'network_type': 'vxlan'}
return mech_driver._create_agent_fdb(session,
agent,
segment,
'network_id')
def _mock_network_ports(self, host_name, bindings):
agent = mock.Mock()
agent.host = host_name
return [(binding, agent) for binding in bindings], agent
def test_create_agent_fdb(self):
binding = mock.Mock()
binding.port = {'mac_address': '00:00:DE:AD:BE:EF',
'fixed_ips': [{'ip_address': '1.1.1.1'}]}
fdb_network_ports, fdb_agent = (
self._mock_network_ports(HOST + '2', [binding]))
agent_ips = {fdb_agent: '20.0.0.1'}
agent_fdb = self._test_create_agent_fdb(fdb_network_ports,
agent_ips)
result = agent_fdb['network_id']
expected_result = {'segment_id': 1,
'network_type': 'vxlan',
'ports':
{'10.0.0.1':
[constants.FLOODING_ENTRY],
'20.0.0.1':
[constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
mac_address='00:00:DE:AD:BE:EF',
ip_address='1.1.1.1')]}}
self.assertEqual(expected_result, result)
def test_create_agent_fdb_only_tunnels(self):
agent_fdb = self._test_create_agent_fdb([], {})
result = agent_fdb['network_id']
expected_result = {'segment_id': 1,
'network_type': 'vxlan',
'ports':
{'10.0.0.1':
[constants.FLOODING_ENTRY]}}
self.assertEqual(expected_result, result)
def test_create_agent_fdb_concurrent_port_deletion(self):
binding = mock.Mock()
binding.port = {'mac_address': '00:00:DE:AD:BE:EF',
'fixed_ips': [{'ip_address': '1.1.1.1'}]}
binding2 = mock.Mock()
# the port was deleted
binding2.port = None
fdb_network_ports, fdb_agent = (
self._mock_network_ports(HOST + '2', [binding, binding2]))
agent_ips = {fdb_agent: '20.0.0.1'}
agent_fdb = self._test_create_agent_fdb(fdb_network_ports,
agent_ips)
result = agent_fdb['network_id']
expected_result = {'segment_id': 1,
'network_type': 'vxlan',
'ports':
{'10.0.0.1':
[constants.FLOODING_ENTRY],
'20.0.0.1':
[constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
mac_address='00:00:DE:AD:BE:EF',
ip_address='1.1.1.1')]}}
self.assertEqual(expected_result, result)
def test_update_port_precommit_mac_address_changed_raises(self):
port = {'status': u'ACTIVE',
'device_owner': DEVICE_OWNER_COMPUTE,
'mac_address': u'12:34:56:78:4b:0e',
'id': u'1'}
original_port = port.copy()
original_port['mac_address'] = u'12:34:56:78:4b:0f'
with mock.patch.object(driver_context.segments_db,
'get_network_segments'):
ctx = driver_context.PortContext(mock.Mock(),
mock.Mock(),
port,
mock.MagicMock(),
mock.Mock(),
None,
original_port=original_port)
mech_driver = l2pop_mech_driver.L2populationMechanismDriver()
with testtools.ExpectedException(exceptions.InvalidInput):
mech_driver.update_port_precommit(ctx)
|
|
# -*- coding: utf-8 -*-
"""
Public Python API to create CMS contents.
WARNING: None of the functions defined in this module checks for permissions.
You must implement the necessary permission checks in your own code before
calling these methods!
"""
import datetime
from cms.utils.conf import get_cms_setting
from django.core.exceptions import PermissionDenied
from cms.utils.i18n import get_language_list
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.db.models import Max
from django.template.defaultfilters import slugify
from menus.menu_pool import menu_pool
from cms.admin.forms import save_permissions
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms.models.pagemodel import Page
from cms.models.permissionmodels import (PageUser, PagePermission,
GlobalPagePermission, ACCESS_PAGE_AND_DESCENDANTS)
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.models.titlemodels import Title
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.utils import moderator
from cms.utils.permissions import _thread_locals
#===============================================================================
# Constants
#===============================================================================
VISIBILITY_ALL = None
VISIBILITY_USERS = 1
VISIBILITY_STAFF = 2
#===============================================================================
# Helpers/Internals
#===============================================================================
def _generate_valid_slug(source, parent, language):
"""
Generate a valid slug for a page from source for the given language.
Parent is passed so we can make sure the slug is unique for this level in
the page tree.
"""
if parent:
qs = Title.objects.filter(language=language, page__parent=parent)
else:
qs = Title.objects.filter(language=language, page__parent__isnull=True)
used = qs.values_list('slug', flat=True)
baseslug = slugify(source)
slug = baseslug
i = 1
while slug in used:
slug = '%s-%s' % (baseslug, i)
i += 1
return slug
def _verify_apphook(apphook):
"""
Verifies the apphook given is valid and returns the normalized form (name)
"""
if hasattr(apphook, '__module__') and issubclass(apphook, CMSApp):
apphook_pool.discover_apps()
assert apphook in apphook_pool.apps.values()
return apphook.__name__
elif isinstance(apphook, basestring):
apphook_pool.discover_apps()
assert apphook in apphook_pool.apps
return apphook
else:
raise TypeError("apphook must be string or CMSApp instance")
def _verify_plugin_type(plugin_type):
"""
Verifies the given plugin_type is valid and returns a tuple of
(plugin_model, plugin_type)
"""
if (hasattr(plugin_type, '__module__') and
issubclass(plugin_type, CMSPluginBase)):
plugin_model = plugin_type.model
assert plugin_type in plugin_pool.plugins.values()
plugin_type = plugin_type.__name__
elif isinstance(plugin_type, basestring):
try:
plugin_model = plugin_pool.get_plugin(plugin_type).model
except KeyError:
raise TypeError(
'plugin_type must be CMSPluginBase subclass or string'
)
else:
raise TypeError('plugin_type must be CMSPluginBase subclass or string')
return plugin_model, plugin_type
#===============================================================================
# Public API
#===============================================================================
def create_page(title, template, language, menu_title=None, slug=None,
apphook=None, redirect=None, meta_description=None,
meta_keywords=None, created_by='python-api', parent=None,
publication_date=None, publication_end_date=None,
in_navigation=False, soft_root=False, reverse_id=None,
navigation_extenders=None, published=False, site=None,
login_required=False, limit_visibility_in_menu=VISIBILITY_ALL,
position="last-child", overwrite_url=None):
"""
Create a CMS Page and it's title for the given language
See docs/extending_cms/api_reference.rst for more info
"""
# ugly permissions hack
if created_by and isinstance(created_by, get_user_model()):
_thread_locals.user = created_by
created_by = created_by.username
else:
_thread_locals.user = None
# validate template
assert template in [tpl[0] for tpl in get_cms_setting('TEMPLATES')]
# validate site
if not site:
site = Site.objects.get_current()
else:
assert isinstance(site, Site)
# validate language:
assert language in get_language_list(site), get_cms_setting('LANGUAGES').get(site.pk)
# set default slug:
if not slug:
slug = _generate_valid_slug(title, parent, language)
# validate and normalize apphook
if apphook:
application_urls = _verify_apphook(apphook)
else:
application_urls = None
# validate parent
if parent:
assert isinstance(parent, Page)
parent = Page.objects.get(pk=parent.pk)
# validate publication date
if publication_date:
assert isinstance(publication_date, datetime.date)
# validate publication end date
if publication_end_date:
assert isinstance(publication_end_date, datetime.date)
# validate softroot
assert get_cms_setting('SOFTROOT') or not soft_root
if navigation_extenders:
raw_menus = menu_pool.get_menus_by_attribute("cms_enabled", True)
menus = [menu[0] for menu in raw_menus]
assert navigation_extenders in menus
# validate menu visibility
accepted_limitations = (VISIBILITY_ALL, VISIBILITY_USERS, VISIBILITY_STAFF)
assert limit_visibility_in_menu in accepted_limitations
# validate position
assert position in ('last-child', 'first-child', 'left', 'right')
page = Page(
created_by=created_by,
changed_by=created_by,
parent=parent,
publication_date=publication_date,
publication_end_date=publication_end_date,
in_navigation=in_navigation,
soft_root=soft_root,
reverse_id=reverse_id,
navigation_extenders=navigation_extenders,
published=False, # will be published later
template=template,
site=site,
login_required=login_required,
limit_visibility_in_menu=limit_visibility_in_menu,
)
page.insert_at(parent, position)
page.save()
create_title(
language=language,
title=title,
menu_title=menu_title,
slug=slug,
apphook=application_urls,
redirect=redirect,
meta_description=meta_description,
meta_keywords=meta_keywords,
page=page,
overwrite_url=overwrite_url
)
if published:
page.publish()
del _thread_locals.user
return page.reload()
def create_title(language, title, page, menu_title=None, slug=None,
apphook=None, redirect=None, meta_description=None,
meta_keywords=None, parent=None, overwrite_url=None):
"""
Create a title.
Parent is only used if slug=None.
See docs/extending_cms/api_reference.rst for more info
"""
# validate page
assert isinstance(page, Page)
# validate language:
assert language in get_language_list(page.site_id)
# set default slug:
if not slug:
slug = _generate_valid_slug(title, parent, language)
# validate and normalize apphook
if apphook:
application_urls = _verify_apphook(apphook)
else:
application_urls = None
title = Title.objects.create(
language=language,
title=title,
menu_title=menu_title,
slug=slug,
application_urls=application_urls,
redirect=redirect,
meta_description=meta_description,
meta_keywords=meta_keywords,
page=page
)
if overwrite_url:
title.has_url_overwrite = True
title.path = overwrite_url
title.save()
return title
def add_plugin(placeholder, plugin_type, language, position='last-child',
target=None, **data):
"""
Add a plugin to a placeholder
See docs/extending_cms/api_reference.rst for more info
"""
# validate placeholder
assert isinstance(placeholder, Placeholder)
# validate and normalize plugin type
plugin_model, plugin_type = _verify_plugin_type(plugin_type)
max_pos = CMSPlugin.objects.filter(language=language,
placeholder=placeholder).aggregate(Max('position'))['position__max'] or 0
plugin_base = CMSPlugin(
plugin_type=plugin_type,
placeholder=placeholder,
position=max_pos + 1,
language=language
)
plugin_base.insert_at(target, position=position, save=False)
plugin = plugin_model(**data)
plugin_base.set_base_attr(plugin)
plugin.save()
return plugin
def create_page_user(created_by, user,
can_add_page=True, can_view_page=True,
can_change_page=True, can_delete_page=True,
can_recover_page=True, can_add_pageuser=True,
can_change_pageuser=True, can_delete_pageuser=True,
can_add_pagepermission=True,
can_change_pagepermission=True,
can_delete_pagepermission=True, grant_all=False):
"""
Creates a page user.
See docs/extending_cms/api_reference.rst for more info
"""
if grant_all:
# just be lazy
return create_page_user(created_by, user, True, True, True, True,
True, True, True, True, True, True, True)
# validate created_by
assert isinstance(created_by, get_user_model())
data = {
'can_add_page': can_add_page,
'can_view_page': can_view_page,
'can_change_page': can_change_page,
'can_delete_page': can_delete_page,
'can_recover_page': can_recover_page,
'can_add_pageuser': can_add_pageuser,
'can_change_pageuser': can_change_pageuser,
'can_delete_pageuser': can_delete_pageuser,
'can_add_pagepermission': can_add_pagepermission,
'can_change_pagepermission': can_change_pagepermission,
'can_delete_pagepermission': can_delete_pagepermission,
}
user.is_staff = True
user.is_active = True
page_user = PageUser(created_by=created_by)
for field in [f.name for f in get_user_model()._meta.local_fields]:
setattr(page_user, field, getattr(user, field))
user.save()
page_user.save()
save_permissions(data, page_user)
return user
def assign_user_to_page(page, user, grant_on=ACCESS_PAGE_AND_DESCENDANTS,
can_add=False, can_change=False, can_delete=False,
can_change_advanced_settings=False, can_publish=False,
can_change_permissions=False, can_move_page=False,
can_recover_page=True, can_view=False,
grant_all=False, global_permission=False):
"""
Assigns given user to page, and gives him requested permissions.
See docs/extending_cms/api_reference.rst for more info
"""
grant_all = grant_all and not global_permission
data = {
'can_add': can_add or grant_all,
'can_change': can_change or grant_all,
'can_delete': can_delete or grant_all,
'can_change_advanced_settings': can_change_advanced_settings or grant_all,
'can_publish': can_publish or grant_all,
'can_change_permissions': can_change_permissions or grant_all,
'can_move_page': can_move_page or grant_all,
'can_view': can_view or grant_all,
}
page_permission = PagePermission(page=page, user=user,
grant_on=grant_on, **data)
page_permission.save()
if global_permission:
page_permission = GlobalPagePermission(
user=user, can_recover_page=can_recover_page, **data)
page_permission.save()
page_permission.sites.add(Site.objects.get_current())
return page_permission
def publish_page(page, user):
"""
Publish a page. This sets `page.published` to `True` and calls publish()
which does the actual publishing.
See docs/extending_cms/api_reference.rst for more info
"""
page = page.reload()
class FakeRequest(object):
def __init__(self, user):
self.user = user
request = FakeRequest(user)
if not page.has_publish_permission(request):
raise PermissionDenied()
page.published = True
page.save()
page.publish()
return page.reload()
|
|
# Copyright 2015 iWeb Technologies Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import mock
from mock import call
from osc_lib.cli import format_columns
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes
from openstackclient.volume.v1 import qos_specs
class TestQos(volume_fakes.TestVolumev1):
def setUp(self):
super(TestQos, self).setUp()
self.qos_mock = self.app.client_manager.volume.qos_specs
self.qos_mock.reset_mock()
self.types_mock = self.app.client_manager.volume.volume_types
self.types_mock.reset_mock()
class TestQosAssociate(TestQos):
volume_type = volume_fakes.FakeType.create_one_type()
qos_spec = volume_fakes.FakeQos.create_one_qos()
def setUp(self):
super(TestQosAssociate, self).setUp()
self.qos_mock.get.return_value = self.qos_spec
self.types_mock.get.return_value = self.volume_type
# Get the command object to test
self.cmd = qos_specs.AssociateQos(self.app, None)
def test_qos_associate(self):
arglist = [
self.qos_spec.id,
self.volume_type.id
]
verifylist = [
('qos_spec', self.qos_spec.id),
('volume_type', self.volume_type.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.qos_mock.associate.assert_called_with(
self.qos_spec.id,
self.volume_type.id
)
self.assertIsNone(result)
class TestQosCreate(TestQos):
columns = (
'consumer',
'id',
'name',
'properties'
)
def setUp(self):
super(TestQosCreate, self).setUp()
self.new_qos_spec = volume_fakes.FakeQos.create_one_qos()
self.datalist = (
self.new_qos_spec.consumer,
self.new_qos_spec.id,
self.new_qos_spec.name,
format_columns.DictColumn(self.new_qos_spec.specs)
)
self.qos_mock.create.return_value = self.new_qos_spec
# Get the command object to test
self.cmd = qos_specs.CreateQos(self.app, None)
def test_qos_create_without_properties(self):
arglist = [
self.new_qos_spec.name,
]
verifylist = [
('name', self.new_qos_spec.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.qos_mock.create.assert_called_with(
self.new_qos_spec.name,
{'consumer': 'both'}
)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.datalist, data)
def test_qos_create_with_consumer(self):
arglist = [
'--consumer', self.new_qos_spec.consumer,
self.new_qos_spec.name,
]
verifylist = [
('consumer', self.new_qos_spec.consumer),
('name', self.new_qos_spec.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.qos_mock.create.assert_called_with(
self.new_qos_spec.name,
{'consumer': self.new_qos_spec.consumer}
)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.datalist, data)
def test_qos_create_with_properties(self):
arglist = [
'--consumer', self.new_qos_spec.consumer,
'--property', 'foo=bar',
'--property', 'iops=9001',
self.new_qos_spec.name,
]
verifylist = [
('consumer', self.new_qos_spec.consumer),
('property', self.new_qos_spec.specs),
('name', self.new_qos_spec.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.new_qos_spec.specs.update(
{'consumer': self.new_qos_spec.consumer})
self.qos_mock.create.assert_called_with(
self.new_qos_spec.name,
self.new_qos_spec.specs
)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.datalist, data)
class TestQosDelete(TestQos):
qos_specs = volume_fakes.FakeQos.create_qoses(count=2)
def setUp(self):
super(TestQosDelete, self).setUp()
self.qos_mock.get = (
volume_fakes.FakeQos.get_qoses(self.qos_specs))
# Get the command object to test
self.cmd = qos_specs.DeleteQos(self.app, None)
def test_qos_delete_with_id(self):
arglist = [
self.qos_specs[0].id
]
verifylist = [
('qos_specs', [self.qos_specs[0].id])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.qos_mock.delete.assert_called_with(self.qos_specs[0].id, False)
self.assertIsNone(result)
def test_qos_delete_with_name(self):
arglist = [
self.qos_specs[0].name
]
verifylist = [
('qos_specs', [self.qos_specs[0].name])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.qos_mock.delete.assert_called_with(self.qos_specs[0].id, False)
self.assertIsNone(result)
def test_qos_delete_with_force(self):
arglist = [
'--force',
self.qos_specs[0].id
]
verifylist = [
('force', True),
('qos_specs', [self.qos_specs[0].id])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.qos_mock.delete.assert_called_with(self.qos_specs[0].id, True)
self.assertIsNone(result)
def test_delete_multiple_qoses(self):
arglist = []
for q in self.qos_specs:
arglist.append(q.id)
verifylist = [
('qos_specs', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
calls = []
for q in self.qos_specs:
calls.append(call(q.id, False))
self.qos_mock.delete.assert_has_calls(calls)
self.assertIsNone(result)
def test_delete_multiple_qoses_with_exception(self):
arglist = [
self.qos_specs[0].id,
'unexist_qos',
]
verifylist = [
('qos_specs', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
find_mock_result = [self.qos_specs[0], exceptions.CommandError]
with mock.patch.object(utils, 'find_resource',
side_effect=find_mock_result) as find_mock:
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual(
'1 of 2 QoS specifications failed to delete.', str(e))
find_mock.assert_any_call(self.qos_mock, self.qos_specs[0].id)
find_mock.assert_any_call(self.qos_mock, 'unexist_qos')
self.assertEqual(2, find_mock.call_count)
self.qos_mock.delete.assert_called_once_with(
self.qos_specs[0].id, False
)
class TestQosDisassociate(TestQos):
volume_type = volume_fakes.FakeType.create_one_type()
qos_spec = volume_fakes.FakeQos.create_one_qos()
def setUp(self):
super(TestQosDisassociate, self).setUp()
self.qos_mock.get.return_value = self.qos_spec
self.types_mock.get.return_value = self.volume_type
# Get the command object to test
self.cmd = qos_specs.DisassociateQos(self.app, None)
def test_qos_disassociate_with_volume_type(self):
arglist = [
'--volume-type', self.volume_type.id,
self.qos_spec.id,
]
verifylist = [
('volume_type', self.volume_type.id),
('qos_spec', self.qos_spec.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.qos_mock.disassociate.assert_called_with(
self.qos_spec.id,
self.volume_type.id
)
self.assertIsNone(result)
def test_qos_disassociate_with_all_volume_types(self):
arglist = [
'--all',
self.qos_spec.id,
]
verifylist = [
('qos_spec', self.qos_spec.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.qos_mock.disassociate_all.assert_called_with(self.qos_spec.id)
self.assertIsNone(result)
class TestQosList(TestQos):
qos_specs = volume_fakes.FakeQos.create_qoses(count=2)
qos_association = volume_fakes.FakeQos.create_one_qos_association()
columns = (
'ID',
'Name',
'Consumer',
'Associations',
'Properties',
)
data = []
for q in qos_specs:
data.append((
q.id,
q.name,
q.consumer,
format_columns.ListColumn([qos_association.name]),
format_columns.DictColumn(q.specs),
))
def setUp(self):
super(TestQosList, self).setUp()
self.qos_mock.list.return_value = self.qos_specs
self.qos_mock.get_associations.return_value = [self.qos_association]
# Get the command object to test
self.cmd = qos_specs.ListQos(self.app, None)
def test_qos_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.qos_mock.list.assert_called_with()
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_qos_list_no_association(self):
self.qos_mock.reset_mock()
self.qos_mock.get_associations.side_effect = [
[self.qos_association],
exceptions.NotFound("NotFound"),
]
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.qos_mock.list.assert_called_with()
self.assertEqual(self.columns, columns)
ex_data = copy.deepcopy(self.data)
ex_data[1] = (
self.qos_specs[1].id,
self.qos_specs[1].name,
self.qos_specs[1].consumer,
format_columns.ListColumn(None),
format_columns.DictColumn(self.qos_specs[1].specs),
)
self.assertListItemEqual(ex_data, list(data))
class TestQosSet(TestQos):
qos_spec = volume_fakes.FakeQos.create_one_qos()
def setUp(self):
super(TestQosSet, self).setUp()
self.qos_mock.get.return_value = self.qos_spec
# Get the command object to test
self.cmd = qos_specs.SetQos(self.app, None)
def test_qos_set_with_properties_with_id(self):
arglist = [
'--property', 'foo=bar',
'--property', 'iops=9001',
self.qos_spec.id,
]
verifylist = [
('property', self.qos_spec.specs),
('qos_spec', self.qos_spec.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.qos_mock.set_keys.assert_called_with(
self.qos_spec.id,
self.qos_spec.specs
)
self.assertIsNone(result)
class TestQosShow(TestQos):
qos_spec = volume_fakes.FakeQos.create_one_qos()
qos_association = volume_fakes.FakeQos.create_one_qos_association()
def setUp(self):
super(TestQosShow, self).setUp()
self.qos_mock.get.return_value = self.qos_spec
self.qos_mock.get_associations.return_value = [self.qos_association]
# Get the command object to test
self.cmd = qos_specs.ShowQos(self.app, None)
def test_qos_show(self):
arglist = [
self.qos_spec.id
]
verifylist = [
('qos_spec', self.qos_spec.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.qos_mock.get.assert_called_with(
self.qos_spec.id
)
collist = (
'associations',
'consumer',
'id',
'name',
'properties'
)
self.assertEqual(collist, columns)
datalist = (
format_columns.ListColumn([self.qos_association.name]),
self.qos_spec.consumer,
self.qos_spec.id,
self.qos_spec.name,
format_columns.DictColumn(self.qos_spec.specs),
)
self.assertItemEqual(datalist, tuple(data))
class TestQosUnset(TestQos):
qos_spec = volume_fakes.FakeQos.create_one_qos()
def setUp(self):
super(TestQosUnset, self).setUp()
self.qos_mock.get.return_value = self.qos_spec
# Get the command object to test
self.cmd = qos_specs.UnsetQos(self.app, None)
def test_qos_unset_with_properties(self):
arglist = [
'--property', 'iops',
'--property', 'foo',
self.qos_spec.id,
]
verifylist = [
('property', ['iops', 'foo']),
('qos_spec', self.qos_spec.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.qos_mock.unset_keys.assert_called_with(
self.qos_spec.id,
['iops', 'foo']
)
self.assertIsNone(result)
def test_qos_unset_nothing(self):
arglist = [
self.qos_spec.id,
]
verifylist = [
('qos_spec', self.qos_spec.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
|
|
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
Author(s):
Norton Luo
This test validate the system level function of RackHD image-service. This test include VMware ESXi install and node
rediscover.It use image server to store ESXi image and microkernel used for RackHD discovery.
You need put an config file in the /config directory
'''
import os
import sys
import time
import flogging
import random
import fit_common
import urllib2
import pexpect
import unittest
import subprocess
import test_api_utils
from nose.plugins.attrib import attr
logs = flogging.get_loggers()
@attr(all=False, regression=False, smoke=False, imageservice=False)
class test_image_service_system(fit_common.unittest.TestCase):
def _get_serverip(self):
args = fit_common.fitargs()['unhandled_arguments']
for arg in args:
if "imageserver" in arg:
serverip = arg.split("=")[1]
return serverip
def _apply_obmsetting_to_node(self, nodeid):
usr = ''
pwd = ''
response = fit_common.rackhdapi(
'/api/2.0/nodes/' + nodeid + '/catalogs/bmc')
if response['status'] in range(200, 205):
bmcip = response['json']['data']['IP Address']
else:
bmcip = "0.0.0.0"
if bmcip == "0.0.0.0":
response = fit_common.rackhdapi(
'/api/2.0/nodes/' + nodeid + '/catalogs/rmm')
if response['status'] in range(200, 205):
bmcip = response['json']['data']['IP Address']
else:
return False
# Try credential record in config file
for creds in fit_common.fitcreds()['bmc']:
if fit_common.remote_shell(
'ipmitool -I lanplus -H ' + bmcip + ' -U ' +
creds['username'] + ' -P ' + creds['password'] + ' fru')['exitcode'] == 0:
usr = creds['username']
pwd = creds['password']
break
# Put the credential to OBM settings
if usr != "":
payload = {
"service": "ipmi-obm-service",
"config": {
"host": bmcip,
"user": usr,
"password": pwd},
"nodeId": nodeid}
api_data = fit_common.rackhdapi("/api/2.0/obms", action='put', payload=payload)
if api_data['status'] == 201:
return True
return False
def _upload_os_by_network(self, osname, osversion, source_url):
mon_url = '/images?name=' + osname + '&version=' + osversion + '&isoweb=' + source_url
serverip = self._get_serverip()
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful(
"http://" +
serverip +
":" + control_port +
mon_url,
rest_action="put",
rest_payload={},
rest_timeout=None,
rest_headers={})
if response['status'] in range(200, 205):
return response['json']
else:
logs.error('Incorrect HTTP return code, expected 201, got:' + str(response['status']))
return "fail"
def _list_file(self, mon_url):
serverip = self._get_serverip()
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful("http://" + serverip + ":" + control_port + mon_url)
if response['status'] in range(200, 205):
return response['json']
else:
logs.error('Incorrect HTTP return code, expected 201-205, got:' + str(response['status']))
return "fail"
def _delete_os_image(self, osname, osversion):
mon_url = '/images?name=' + osname + '&version=' + osversion
serverip = self._get_serverip()
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful("http://" + serverip + ":" + control_port + mon_url, rest_action="delete")
if response['status'] in range(200, 205):
return response['json']
else:
logs.error('Incorrect HTTP return code, expected 201-205, got:' + str(response['status']))
return "fail"
def _delete_os_iso(self, isoname):
mon_url = '/iso?name=' + isoname
serverip = self._get_serverip()
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful("http://" + serverip + ":" + control_port + mon_url, rest_action="delete")
if response['status'] in range(200, 205):
return response['json']
else:
logs.error('Incorrect HTTP return code, expected 201-205, got:' + str(response['status']))
return "fail"
def _wait_for_task_complete(self, taskid, retries=60):
for dummy in range(0, retries):
result = fit_common.rackhdapi('/api/2.0/workflows/' + taskid)
if result['json']["status"] == 'running' or result['json']["status"] == 'pending':
logs.debug("OS Install workflow state: {}".format(result['json']["status"]))
fit_common.time.sleep(30)
elif result['json']["status"] == 'succeeded':
logs.debug("OS Install workflow state: {}".format(result['json']["status"]))
return True
else:
break
logs.error("Task failed with the following state: " + result['json']["status"])
return False
def _get_tester_ip(self):
serverip = self._get_serverip()
monip = fit_common.fitcfg()["rackhd-config"]["apiServerAddress"]
cmd = "ping -R -c 1 " + monip + ""
(command_output, exitstatus) = pexpect.run(
"ssh -q -o StrictHostKeyChecking=no -t " + fit_common.fitcfg()["image_service"]['usr'] + "@" + serverip +
" sudo bash -c \\\"" + cmd + "\\\"", withexitstatus=1,
events={"assword": fit_common.fitcfg()["image_service"]['pwd'] + "\n"}, timeout=300)
uud = command_output.split("\t")
myip = uud[1].split("\r\n")[0]
logs.debug('My IP address is: ' + myip)
return myip
def _create_esxi_repo(self):
logs.debug("create a ESXi repo")
for osrepo in fit_common.fitcfg()["image_service"]["os_image"]:
if osrepo["osname"] == "ESXi" and osrepo["version"] == "6.0":
os_name = osrepo["osname"]
os_version = osrepo["version"]
http_iso_url = osrepo["url"]
self._upload_os_by_network(os_name, os_version, http_iso_url)
logs.debug("create ESXi repo successfully")
return
logs.error("No ESXi source found in config")
def _upload_microkernel(self, filename):
myfile = open(filename, 'rb')
serverip = self._get_serverip()
mon_url = '/microkernel?name=' + filename
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful("http://" + serverip + ":" + control_port + mon_url, rest_action="binary-put",
rest_payload=myfile)
if response['status'] in range(200, 205):
return response['json']
else:
logs.debug_3('Incorrect HTTP return code, expected 201, got:' + str(response['status']))
return "fail"
def _delete_microkernel(self, filename):
mon_url = '/microkernel?name=' + filename
serverip = self._get_serverip()
control_port = str(fit_common.fitcfg()["image_service"]["control_port"])
response = fit_common.restful("http://" + serverip + ":" + control_port + mon_url, rest_action="delete")
if response['status'] in range(200, 205):
return response['json']
else:
logs.debug_3('Incorrect HTTP return code, expected 201-205, got:' + str(response['status']))
return "fail"
def _scp_file(self, url):
file_name = url.split('/')[-1]
logs.debug_3("scp file %s from RackHD" % url)
if not os.path.exists(file_name):
path = url[6:]
rackhd_hostname = fit_common.fitargs()['rackhd_host']
scp_file = fit_common.fitcreds()['rackhd_host'][0]['username'] + '@{0}:{1}'.format(rackhd_hostname, path)
cmd = 'scp -o StrictHostKeyChecking=no {0} .'.format(scp_file)
logs.debug_3("scp command : '{0}'".format(cmd))
logfile_redirect = None
if fit_common.VERBOSITY >= 9:
logfile_redirect = sys.stdout
(command_output, ecode) = pexpect.run(
cmd, withexitstatus=1,
events={'(?i)assword: ': fit_common.fitcreds()['rackhd_host'][0]['password'] + '\n'},
logfile=logfile_redirect)
assert ecode == 0, 'failed "{0}" because {1}. Output={2}'.format(cmd, ecode, command_output)
return file_name
def _download_file(self, url):
logs.debug_3("downloading url=%s" % url)
file_name = url.split('/')[-1]
if os.path.exists(file_name) is False:
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
logs.debug_3("Downloading: %s Bytes: %s" % (file_name, file_size))
file_size_dl = 0
block_sz = 2097152
while True:
file_buffer = u.read(block_sz)
if not file_buffer:
break
file_size_dl += len(file_buffer)
f.write(file_buffer)
# logs dose not have ability to draw digital in original place. use print instead.
if fit_common.VERBOSITY >= 9:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8) * (len(status) + 1) + "\r"
print(status)
f.close()
return file_name
def _upload_all_microkernels(self):
for microkernelrepo in fit_common.fitcfg()["image_service"]["microkernel"]:
if microkernelrepo[:3] == "scp":
file_name = self._scp_file(microkernelrepo)
else:
file_name = self._download_file(microkernelrepo)
self._upload_microkernel(file_name)
self._release(file_name)
def _release(self, file_name):
try:
logs.debug_3("rm " + file_name)
subprocess.check_output("rm " + file_name, shell=True)
return True
except OSError:
return False
def _delete_all_microkernels(self):
microkernel_list = self._list_file('/microkernel')
for microkernel in microkernel_list:
self.assertNotEqual(self._delete_microkernel(microkernel["name"]), "fail", "delete image failed!")
microkernel_list_clear = self._list_file('/microkernel')
self.assertTrue(microkernel_list_clear == [])
logs.debug_3("All microkernels are cleared!")
def _delete_all_images(self):
os_image_list = self._list_os_image()
serverip = self._get_serverip()
for image_repo in os_image_list:
self.assertNotEqual(
self._delete_os_image(image_repo["name"], image_repo["version"]), "fail", "delete image failed!")
file_port = str(fit_common.fitcfg()["image_service"]["file_port"])
fileurlprefix = "http://" + serverip + ":" + file_port + "/" + image_repo["name"] + '/' + \
image_repo["version"] + '/'
self.assertFalse(self._file_exists(fileurlprefix), "The repo url does not deleted completely")
os_image_list_clear = self._list_os_image()
self.assertTrue(os_image_list_clear == [])
os_iso_list = self._list_os_iso()
for iso_repo in os_iso_list:
self.assertNotEqual(self._delete_os_iso(iso_repo["name"]), "fail", "delete iso failed!")
os_iso_list_clear = self._list_os_iso()
self.assertTrue(os_iso_list_clear == [], "The iso does not deleted completely")
logs.debug("All repo is cleared!")
def _wait_for_discover(self, node_uuid):
for dummy in range(0, 30):
fit_common.time.sleep(30)
rest_data = fit_common.rackhdapi('/redfish/v1/Systems/')
if rest_data['json']['Members@odata.count'] == 0:
continue
node_collection = rest_data['json']['Members']
for computenode in node_collection:
nodeidurl = computenode['@odata.id']
api_data = fit_common.rackhdapi(nodeidurl)
if api_data['status'] > 399:
break
if node_uuid == api_data['json']['UUID']:
return True
logs.error("Timeout in rediscovery!")
return False
def test_bootstrapping_ext_esxi6(self):
self._create_esxi_repo()
node_collection = test_api_utils.get_node_list_by_type("compute")
fileserver_ip = self._get_tester_ip()
file_port = str(fit_common.fitcfg()["image_service"]["file_port"])
repourl = "http://" + fileserver_ip + ':' + file_port + '/ESXi' + '/' + '6.0' + '/'
# Select one node at random
for dummy in node_collection:
node = node_collection[random.randint(0, len(node_collection) - 1)]
logs.debug('Running ESXI 6.0 bootstrap from external file server.')
node_obm = fit_common.rackhdapi(
'/api/2.0/nodes/' + node)['json']['obms']
if node_obm == []:
self.assertTrue(self._apply_obmsetting_to_node(node), "Fail to apply obm setting!")
fit_common.rackhdapi(
'/api/2.0/nodes/' + node + '/workflows/action', action='put',
payload={
"command": "cancel",
"options": {}
})
nodehostname = 'esxi60'
payload_data = {"options": {
"defaults": {
"version": "6.0",
"repo": repourl,
"rootPassword": "1234567",
"hostname": nodehostname
}}}
result = fit_common.rackhdapi(
'/api/2.0/nodes/' + node + '/workflows?name=Graph.InstallEsxi', action='post', payload=payload_data)
self.assertEqual(
result['status'], 201, 'Was expecting code 201. Got ' + str(result['status']))
self.assertEqual(
self._wait_for_task_complete(result['json']["instanceId"], retries=80), True,
'TaskID ' + result['json']["instanceId"] + ' not successfully completed.')
self._delete_all_images()
def test_rediscover(self):
# Select one node at random that's not a management server
self._upload_all_microkernels()
node_collection = test_api_utils.get_node_list_by_type("compute")
for dummy in node_collection:
node = node_collection[random.randint(0, len(node_collection) - 1)]
if fit_common.rackhdapi('/api/2.0/nodes/' + node)['json']['name'] != "Management Server":
break
logs.debug_3('Checking OBM setting...')
node_obm = fit_common.rackhdapi('/api/2.0/nodes/' + node)['json']['obms']
if node_obm == []:
self.assertTrue(self._apply_obmsetting_to_node(node), "Fail to apply obm setting!")
node_uuid = fit_common.rackhdapi('/redfish/v1/Systems/' + node)['json']['UUID']
logs.debug_3('UUID of selected Node is:' + node_uuid)
# Cancel all active workflow on target node
fit_common.rackhdapi(
'/api/2.0/nodes/' + node + '/workflows/action', action='put',
payload={
"command": "cancel",
"options": {}
})
logs.debug_3('Running rediscover, resetting system node...')
# Reboot the node to begin rediscover.
resetresponse = fit_common.rackhdapi(
'/redfish/v1/Systems/' + node + '/Actions/ComputerSystem.Reset', action='post',
payload={"reset_type": "ForceRestart"})
self.assertTrue(resetresponse['status'] < 209,
'Incorrect HTTP return code, expected <209 , got:' + str(resetresponse['status']))
# Delete original node
for dummy in range(0, 30):
time.sleep(2)
result = fit_common.rackhdapi('/api/2.0/nodes/' + node, action='delete')
if result['status'] < 209:
break
self.assertTrue(result['status'] < 209, 'Was expecting response code < 209. Got ' + str(result['status']))
logs.debug_3("Waiting node reboot and boot into microkernel........")
self.assertTrue(self._wait_for_discover(node_uuid), "Fail to find the orignial node after reboot!")
logs.debug_3("Found the orignial node. It is rediscovered succesfully!")
self._delete_all_microkernels()
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lookup table operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import lookup_ops
# pylint: disable=unused-import
from tensorflow.python.ops.lookup_ops import FastHashSpec
from tensorflow.python.ops.lookup_ops import HasherSpec
from tensorflow.python.ops.lookup_ops import HashTable
from tensorflow.python.ops.lookup_ops import IdTableWithHashBuckets
from tensorflow.python.ops.lookup_ops import index_table_from_file
from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file
from tensorflow.python.ops.lookup_ops import InitializableLookupTableBase
from tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer
from tensorflow.python.ops.lookup_ops import LookupInterface
from tensorflow.python.ops.lookup_ops import StrongHashSpec
from tensorflow.python.ops.lookup_ops import TableInitializerBase
from tensorflow.python.ops.lookup_ops import TextFileIdTableInitializer
from tensorflow.python.ops.lookup_ops import TextFileIndex
from tensorflow.python.ops.lookup_ops import TextFileInitializer
from tensorflow.python.ops.lookup_ops import TextFileStringTableInitializer
# pylint: enable=unused-import
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.saver import BaseSaverBuilder
from tensorflow.python.util.deprecation import deprecated
@deprecated("2017-04-10", "Use `index_table_from_file`.")
def string_to_index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
return index_table_from_file(
vocabulary_file, num_oov_buckets, vocab_size, default_value, hasher_spec,
key_dtype=dtypes.string, name=name)
@deprecated("2017-04-10", "Use `index_table_from_tensor`.")
def string_to_index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
with ops.name_scope(name, "string_to_index") as scope:
mapping = ops.convert_to_tensor(mapping)
if dtypes.string != mapping.dtype.base_dtype:
raise ValueError("string_to_index_table_from_tensor requires string.")
return index_table_from_tensor(
mapping, num_oov_buckets, default_value, hasher_spec, name=scope)
def index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `mapping` 1-D tensor
where each element is a key and corresponding index within the tensor is the
value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
table = tf.contrib.lookup.index_table_from_tensor(
mapping=mapping_strings, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2]
```
Args:
mapping: A 1-D `Tensor` that specifies the mapping of keys to indices. The
type of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_table_from_tensor(
vocabulary_list=mapping,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
hasher_spec=hasher_spec,
dtype=dtype,
name=name)
@deprecated(
"2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_table_from_tensor and call the lookup "
"method of the returned table.")
def string_to_index(tensor, mapping, default_value=-1, name=None):
"""Maps `tensor` of strings into `int64` indices based on `mapping`.
This operation converts `tensor` of strings into `int64` indices.
The mapping is initialized from a string `mapping` tensor where each element
is a key and corresponding index within the tensor is the value.
Any entry in the input which does not have a corresponding entry in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
Elements in `mapping` cannot be duplicated, otherwise the initialization
will throw a FailedPreconditionError.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` once.
For example:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
feats = tf.constant(["emerson", "lake", "and", "palmer"])
ids = tf.contrib.lookup.string_to_index(
feats, mapping=mapping_strings, default_value=-1)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, -1, 2]
```
Args:
tensor: A 1-D input `Tensor` with the strings to map to indices.
mapping: A 1-D string `Tensor` that specifies the mapping of strings to
indices.
default_value: The `int64` value to use for out-of-vocabulary strings.
Defaults to -1.
name: A name for this op (optional).
Returns:
The mapped indices. It has the same shape and tensor type (dense or sparse)
as `tensor`.
"""
table = index_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
def index_to_string_table_from_tensor(mapping, default_value="UNK", name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_string = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_string, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `mapping` is not set.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=mapping, default_value=default_value, name=name)
@deprecated(
"2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_to_string_table_from_tensor and call the lookup "
"method of the returned table.")
def index_to_string(tensor, mapping, default_value="UNK", name=None):
"""Maps `tensor` of indices into string values based on `mapping`.
This operation converts `int64` indices into string values. The mapping is
initialized from a string `mapping` tensor where each element is a value and
the corresponding index within the tensor is the key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` once.
For example:
```python
mapping_string = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
values = tf.contrib.lookup.index_to_string(
indices, mapping=mapping_string, default_value="UNKNOWN")
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
tensor: A `int64` `Tensor` with the indices to map to strings.
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The string value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The strings values associated to the indices. The resultant dense
feature value tensor has the same shape as the corresponding `indices`.
"""
table = index_to_string_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
class MutableHashTable(LookupInterface, checkpointable.CheckpointableBase):
"""A generic mutable hash table implementation.
Data can be inserted by calling the insert method. It does not support
initialization via the init method.
Example usage:
```python
table = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string,
value_dtype=tf.int64,
default_value=-1)
sess.run(table.insert(keys, values))
out = table.lookup(query_keys)
print(out.eval())
```
"""
def __init__(self,
key_dtype,
value_dtype,
default_value,
shared_name=None,
name="MutableHashTable",
checkpoint=True):
"""Creates an empty `MutableHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
Returns:
A `MutableHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(default_value,
dtype=value_dtype)
self._value_shape = self._default_value.get_shape()
executing_eagerly = context.executing_eagerly()
if executing_eagerly and shared_name is None:
# TODO(allenl): This will leak memory due to kernel caching by the
# shared_name attribute value (but is better than the alternative of
# sharing everything by default when executing eagerly; hopefully creating
# tables in a loop is uncommon).
shared_name = "table_%d" % (ops.uid(),)
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = checkpoint and shared_name is None
if self._default_value.get_shape().ndims == 0:
self._table_ref = gen_lookup_ops.mutable_hash_table_v2(
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
key_dtype=key_dtype,
value_dtype=value_dtype,
name=name)
else:
self._table_ref = gen_lookup_ops.mutable_hash_table_of_tensors_v2(
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
key_dtype=key_dtype,
value_dtype=value_dtype,
value_shape=self._default_value.get_shape(),
name=name)
if executing_eagerly:
op_name = None
else:
op_name = self._table_ref.op.name.split("/")[-1]
super(MutableHashTable, self).__init__(key_dtype, value_dtype,
op_name)
if checkpoint:
saveable = MutableHashTable._Saveable(self, name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
return gen_lookup_ops.lookup_table_size_v2(self._table_ref, name=name)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
with ops.name_scope(name, "%s_lookup_table_find" % self._name,
(self._table_ref, keys, self._default_value)) as name:
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys")
with ops.colocate_with(self._table_ref):
values = gen_lookup_ops.lookup_table_find_v2(
self._table_ref, keys, self._default_value, name=name)
return values
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the
table's key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
with ops.name_scope(name, "%s_lookup_table_insert" % self._name,
[self._table_ref, keys, values]) as name:
keys = ops.convert_to_tensor(keys, self._key_dtype, name="keys")
values = ops.convert_to_tensor(values, self._value_dtype, name="values")
with ops.colocate_with(self._table_ref):
# pylint: disable=protected-access
op = gen_lookup_ops.lookup_table_insert_v2(
self._table_ref, keys, values, name=name)
return op
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(
self._table_ref, self._key_dtype, self._value_dtype, name=name)
return exported_keys, exported_values
def _gather_saveables_for_checkpoint(self):
"""For object-based checkpointing."""
return {"table": functools.partial(MutableHashTable._Saveable, table=self)}
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for MutableHashTable."""
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
# pylint: disable=protected-access
super(MutableHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, restored_shapes):
del restored_shapes # unused
# pylint: disable=protected-access
with ops.colocate_with(self.op._table_ref):
return gen_lookup_ops.lookup_table_import_v2(
self.op._table_ref, restored_tensors[0], restored_tensors[1])
class MutableDenseHashTable(LookupInterface, checkpointable.CheckpointableBase):
"""A generic mutable hash table implementation using tensors as backing store.
Data can be inserted by calling the insert method. It does not support
initialization via the init method.
It uses "open addressing" with quadratic reprobing to resolve collisions.
Compared to `MutableHashTable` the insert and lookup operations in a
`MutableDenseHashTable` are typically faster, but memory usage can be higher.
However, `MutableDenseHashTable` does not require additional memory for
temporary tensors created during checkpointing and restore operations.
Example usage:
```python
table = tf.contrib.lookup.MutableDenseHashTable(key_dtype=tf.int64,
value_dtype=tf.int64,
default_value=-1,
empty_key=0)
sess.run(table.insert(keys, values))
out = table.lookup(query_keys)
print(out.eval())
```
"""
# TODO(andreasst): consider extracting common code with MutableHashTable into
# a common superclass.
def __init__(self,
key_dtype,
value_dtype,
default_value,
empty_key,
initial_num_buckets=None,
shared_name=None,
name="MutableDenseHashTable",
checkpoint=True):
"""Creates an empty `MutableDenseHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
empty_key: the key to use to represent empty buckets internally. Must not
be used in insert or lookup operations.
initial_num_buckets: the initial number of buckets.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
Returns:
A `MutableHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(
default_value, dtype=value_dtype, name="default_value")
self._value_shape = self._default_value.get_shape()
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = checkpoint and shared_name is None
empty_key = ops.convert_to_tensor(
empty_key, dtype=key_dtype, name="empty_key")
executing_eagerly = context.executing_eagerly()
if executing_eagerly and shared_name is None:
# TODO(allenl): This will leak memory due to kernel caching by the
# shared_name attribute value (but is better than the alternative of
# sharing everything by default when executing eagerly; hopefully creating
# tables in a loop is uncommon).
shared_name = "table_%d" % (ops.uid(),)
self._table_ref = gen_lookup_ops.mutable_dense_hash_table_v2(
empty_key=empty_key,
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
value_dtype=value_dtype,
value_shape=self._value_shape,
initial_num_buckets=initial_num_buckets,
name=name)
if executing_eagerly:
op_name = None
else:
op_name = self._table_ref.op.name.split("/")[-1]
super(MutableDenseHashTable, self).__init__(
key_dtype, value_dtype, op_name)
if checkpoint:
saveable = MutableDenseHashTable._Saveable(self, name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
return gen_lookup_ops.lookup_table_size_v2(self._table_ref, name=name)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
with ops.name_scope(name, "%s_lookup_table_find" % self._name,
[self._table_ref, keys]) as name:
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys")
with ops.colocate_with(self._table_ref):
values = gen_lookup_ops.lookup_table_find_v2(
self._table_ref, keys, self._default_value, name=name)
return values
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the
table's key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
with ops.name_scope(name, "%s_lookup_table_insert" % self._name,
[self._table_ref, keys, values]) as name:
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys")
values = ops.convert_to_tensor(
values, dtype=self._value_dtype, name="values")
with ops.colocate_with(self._table_ref):
op = gen_lookup_ops.lookup_table_insert_v2(
self._table_ref, keys, values, name=name)
return op
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(
self._table_ref, self._key_dtype, self._value_dtype, name=name)
return exported_keys, exported_values
def _gather_saveables_for_checkpoint(self):
"""For object-based checkpointing."""
return {"table": functools.partial(
MutableDenseHashTable._Saveable, table=self)}
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for MutableDenseHashTable."""
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
# pylint: disable=protected-access
super(MutableDenseHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, restored_shapes):
del restored_shapes # unused
# pylint: disable=protected-access
with ops.colocate_with(self.op._table_ref):
return gen_lookup_ops.lookup_table_import_v2(
self.op._table_ref, restored_tensors[0], restored_tensors[1])
|
|
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict', 'ChainMap']
# For backwards compatibility, continue to make the collections ABCs
# available through the collections module.
from _collections_abc import *
import _collections_abc
__all__ += _collections_abc.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
################################################################################
### namedtuple
################################################################################
_class_template = """\
from builtins import property as _property, tuple as _tuple
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '({repr_fmt})' % self
@property
def __dict__(self):
'A new OrderedDict mapping field names to their values'
return OrderedDict(zip(self._fields, self))
def _asdict(self):
'''Return a new OrderedDict which maps field names to their values.
This method is obsolete. Use vars(nt) or nt.__dict__ instead.
'''
return self.__dict__
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
def __getstate__(self):
'Exclude the OrderedDict from pickling'
return None
{field_defs}
"""
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
'identifiers: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super().__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
return self + Counter()
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
return Counter() - self
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
########################################################################
### ChainMap (helper for configparser and string.Template)
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''
New ChainMap with a new map followed by all previous maps. If no
map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def clear(self): self.data.clear()
def copy(self): return self.__class__(self)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __ne__(self, string):
if isinstance(string, UserString):
return self.data != string.data
return self.data != string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=False): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
|
|
""" OriginalPostSearcher bot """
import herokuDB
import ignoredSubs
import praw
import time
from sqlalchemy import create_engine
from sqlalchemy import text
REDDIT_CLIENT = praw.Reddit(user_agent="OriginalPostSearcher 2.0.0")
REDDIT_CLIENT.login(disable_warning=True)
# a list of words that might be an "xpost"
X_POST_DICTIONARY = set()
X_POST_DICTIONARY.update(["xpost", "x-post", "crosspost","cross-post",
"xposted", "crossposted", "x-posted"])
# list of words to check for so we don't post if source is already there
ORIGINAL_COMMENTS = set()
ORIGINAL_COMMENTS.update(['source', 'original', 'original post', 'sauce', 'link',
'x-post', 'xpost', 'x-post', 'crosspost', 'cross post',
'cross-post', 'referenced', 'credit', 'credited', 'other',
'post'])
# create the ENGINE for the database
ENGINE = create_engine(herokuDB.url)
# don't bother these subs
IGNORED_SUBS = set()
IGNORED_SUBS.update(ignoredSubs.ignore_list)
class SearchBot(object):
def __init__(self):
self.xpost_dict = X_POST_DICTIONARY
self.ignored_subs = IGNORED_SUBS
# cache for database
self.cache = set()
self.temp_cache = set()
self.xpost_submissions = set()
# fields for the xposted submission
self.xpost_url = None # link shared in the submission
self.xpost_permalink = None
self.xpost_title = None
self.xpost_author = None
self.xpost_sub = None # subreddit object of xpost
self.xpost_sub_title = None # the string of the subreddit
# fields for the original subreddit
self.original_sub = None # subreddit object
self.original_sub_title = None # title of the subreddit
self.original_title = None
self.original_permalink = None
self.original_author = None
# -------- Main Bot Methods --------- #
def create_comment(self, submission):
print "Making comment\n"
if not self.original_author:
self.original_author = "a [deleted] user"
else:
self.original_author = "/u/" + str(self.original_author)
# make links np links
original_link_list = self.original_link.split("https://www.")
self.original_link = "http://np." + original_link_list[1]
# create the string to comment with
comment_string = ("X-Post referenced from [/r/" +
self.original_sub_title + "](http://np.reddit.com/r/" +
self.original_sub_title + ") by " + self.original_author +
" \n[" + self.original_title.encode('utf-8') +
"](" + self.original_link.encode('utf-8') +
")\n***** \n \n^^I ^^am ^^a ^^bot. ^^I" +
" ^^delete ^^my ^^negative ^^comments. ^^[Contact]" +
"(https://www.reddit.com/message/" +
"compose/?to=OriginalPostSearcher)" +
" ^^| ^^[Code](https://github.com/" +
"papernotes/Reddit-OriginalPostSearcher)" +
" ^^| ^^[FAQ](https://github.com/papernotes/" +
"Reddit-OriginalPostSearcher#faq)")
print comment_string
# double check
if self.has_source(submission):
print "Source found"
else:
submission.add_comment(comment_string)
print "\nCommented!"
def delete_negative(self):
print "Checking previous comments for deletion"
user = REDDIT_CLIENT.get_redditor('OriginalPostSearcher')
submitted = user.get_comments(limit=200)
for item in submitted:
if int(item.score) < -1:
print("\nDeleted negative comment\n " + str(item))
item.delete()
def get_original_sub(self):
try:
self.xpost_title = self.xpost_title.split()
except:
print "Failed split"
pass
self.original_sub_title = None
return
try:
for word in self.xpost_title:
if '/r/' in word:
# split from /r/
word = word.split('/r/')[1]
word = word.split(')')[0] # try for parentheses first
word = word.split(']')[0] # try for brackets
print("/r/ word = " + word.encode('utf-8'))
self.original_sub_title = word
break
# split for "r/" only format
elif 'r/' in word:
word = word.split('r/')[1]
word = word.split(')')[0] # try for parentheses first
word = word.split(']')[0] # try for brackets
print("r/ word = " + word.encode('utf-8'))
self.original_sub_title = word
break
else:
self.original_sub_title = None
except:
print("Could not get original subreddit")
self.original_sub_title = None
def reset_fields(self):
self.original_sub_title = None
self.original_found = False
def search_for_post(self, submission, lim):
duplicates = submission.get_duplicates(limit=lim)
print "Searching Dupes"
for submission in duplicates:
if self.is_original(submission):
self.original_permalink = submission.permalink
return True
poster_name = self.xpost_author.encode('utf-8')
poster = REDDIT_CLIENT.get_redditor(poster_name)
user_submissions = poster.get_submitted(limit=lim)
print "Searching User"
for submission in user_submissions:
if self.is_original(submission):
self.original_permalink = submission.permalink
return True
# in case the subreddit doesn't exist
try:
self.original_sub = REDDIT_CLIENT.get_subreddit(self.original_sub_title)
print "Searching New"
for submission in self.original_sub.get_new(limit=lim):
if self.is_original(submission):
self.original_permalink = submission.permalink
return True
print "Searching Hot"
for submission in self.original_sub.get_hot(limit=lim):
if self.is_original(submission):
self.original_permalink = submission.permalink
return True
except:
pass
return False
print "--------------Failed all searches"
return False
def set_original_fields(self, submission):
try:
self.original_title = submission.title.encode('utf-8')
self.original_link = submission.permalink
self.original_author = submission.author
self.original_found = True
except:
pass
def set_xpost_fields(self, submission):
try:
self.xpost_url = submission.url.encode('utf-8')
self.xpost_permalink = submission.permalink
self.xpost_author = submission.author.name
self.xpost_title = submission.title.lower().encode('utf-8')
self.xpost_sub = submission.subreddit
self.xpost_sub_title = str(submission.subreddit.display_name.lower())
except:
pass
def set_xpost_submissions(self, search_terms, client):
"""
Searches for the most recent xposts and sets it
"""
print "Finding xposts"
for entry in search_terms:
for title in client.search(entry, sort="new"):
self.xpost_submissions.add(title)
def get_xpost_title(self, title):
# format TITLE(xpost)
if (len(title) == title.find(')') + 1):
return title.split('(')[0]
# format TITLE[xpost]
elif (len(title) == title.find(']') + 1):
return title.split('[')[0]
# format (xpost)TITLE
elif (title.find('(') == 0):
return title.split(')')[1]
# format [xpost]TITLE
elif (title.find('[') == 0):
return title.split('[')[1]
# weird format, return false
else:
print ("Couldn't get title correctly")
return None
# -------- Boolean Methods --------- #
def has_source(self, submission):
for comment in submission.comments:
try:
if (any(string in str(comment.body).lower()
for string in ORIGINAL_COMMENTS)):
print("Source in comments found: ")
print(" " + str(comment.body) + "\n")
return True
except:
pass
print "No 'source' comments found"
return False
def is_ignored_or_nsfw(self, submission):
return not (submission.subreddit.display_name.lower() in self.ignored_subs or
submission.over_18 is True)
def is_original(self, submission):
try:
if (self.xpost_url == str(submission.url).encode('utf-8') and
submission.subreddit.display_name.lower().encode('utf-8') == self.original_sub_title and
submission.over_18 is False and
not self.xpost_permalink in submission.permalink):
self.set_original_fields(submission)
return True
return False
except:
pass
return False
def is_same_ref(self):
"""
If the original submission's title is an x-post referencing
the xpost sub, then return True
"""
if self.xpost_sub_title in self.original_title:
print "True Ref"
return True
print "False Ref"
return False
def is_xpost(self, submission):
submission_title = submission.title.lower()
try:
submission_title = submission_title.encode('utf-8')
except:
pass
return False
return any(string in submission_title for string in self.xpost_dict)
# -------- Database --------- #
def clear_database(self):
num_rows = ENGINE.execute("select * from searched_posts")
if num_rows.rowcount > 1000:
ENGINE.execute("delete from searched_posts")
print "Cleared database"
if len(self.cache) > 1000:
self.cache = self.cache[int(len(self.cache))/2:]
print "Halved cache"
def id_added(self, sub_id):
id_added_text = text("select * from searched_posts where post_id = :postID")
return ENGINE.execute(id_added_text, postID=sub_id).rowcount != 0
def setup_database_cache(self):
result = ENGINE.execute("select * from searched_posts")
for row in result:
self.temp_cache.add(str(row[0]))
for value in self.temp_cache:
if value not in self.cache:
self.cache.add(str(value))
def write_to_file(self, sub_id):
"""
Saves the submission we just searched
"""
if not self.id_added(sub_id):
temp_text = text('insert into searched_posts (post_id) values(:postID)')
ENGINE.execute(temp_text, postID=sub_id)
if __name__ == '__main__':
bot = SearchBot()
print "Created bot"
while True:
bot.set_xpost_submissions(X_POST_DICTIONARY, REDDIT_CLIENT)
bot.setup_database_cache()
for submission in bot.xpost_submissions:
# NSFW content or ignored subreddit
if not bot.is_ignored_or_nsfw(submission) and submission.id not in bot.cache:
bot.write_to_file(submission.id)
bot.reset_fields()
continue
if bot.is_xpost(submission) and submission.id not in bot.cache:
bot.set_xpost_fields(submission)
try:
if "reddit" in bot.xpost_url.encode('utf-8'):
print "Post links to Reddit"
bot.write_to_file(submission.id)
bot.reset_fields()
continue
except:
bot.write_to_file(submission.id)
bot.reset_fields()
continue
print("\nXPost found!")
print("subreddit = " + bot.xpost_sub_title)
print("post title = " + bot.xpost_title)
print("xpost_url = " + bot.xpost_url)
print("xpost_permalink = " + bot.xpost_permalink.encode('utf-8'))
bot.write_to_file(submission.id)
bot.get_original_sub()
if (bot.original_sub_title == None or
bot.original_sub_title == bot.xpost_sub.display_name.lower().encode('utf-8')):
print "Failed original subreddit or same subreddit"
bot.reset_fields()
else:
if not bot.has_source(submission) and bot.search_for_post(submission, 150) and not bot.is_same_ref():
try:
bot.create_comment(submission)
bot.write_to_file(submission.id)
bot.reset_fields()
except:
print "Failed to comment"
bot.write_to_file(submission.id)
bot.reset_fields()
else:
print "Failed to find source"
bot.write_to_file(submission.id)
bot.reset_fields()
# the submission is not an xpost or submission id is in cache already
else:
bot.reset_fields()
bot.delete_negative()
bot.temp_cache.clear()
bot.xpost_submissions.clear()
print "\nSleeping\n"
time.sleep(10)
if len(bot.cache) > 1000:
bot.clear_database()
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnglobal_vpntrafficpolicy_binding(base_resource) :
""" Binding class showing the vpntrafficpolicy that can be bound to vpnglobal.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._secondary = False
self._groupextraction = False
self.___count = 0
@property
def priority(self) :
ur"""The priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority of the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
ur"""The name of the policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""The name of the policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def secondary(self) :
ur"""Bind the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only to a primary authentication server but also to a secondary authentication server. User groups are aggregated across both authentication servers. The user name must be exactly the same on both authentication servers, but the authentication servers can require different passwords.
"""
try :
return self._secondary
except Exception as e:
raise e
@secondary.setter
def secondary(self, secondary) :
ur"""Bind the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only to a primary authentication server but also to a secondary authentication server. User groups are aggregated across both authentication servers. The user name must be exactly the same on both authentication servers, but the authentication servers can require different passwords.
"""
try :
self._secondary = secondary
except Exception as e:
raise e
@property
def groupextraction(self) :
ur"""Bind the Authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called it primary and/or secondary authentication has succeeded.
"""
try :
return self._groupextraction
except Exception as e:
raise e
@groupextraction.setter
def groupextraction(self, groupextraction) :
ur"""Bind the Authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called it primary and/or secondary authentication has succeeded.
"""
try :
self._groupextraction = groupextraction
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnglobal_vpntrafficpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnglobal_vpntrafficpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vpnglobal_vpntrafficpolicy_binding()
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.secondary = resource.secondary
updateresource.groupextraction = resource.groupextraction
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnglobal_vpntrafficpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].secondary = resource[i].secondary
updateresources[i].groupextraction = resource[i].groupextraction
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vpnglobal_vpntrafficpolicy_binding()
deleteresource.policyname = resource.policyname
deleteresource.secondary = resource.secondary
deleteresource.groupextraction = resource.groupextraction
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnglobal_vpntrafficpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].secondary = resource[i].secondary
deleteresources[i].groupextraction = resource[i].groupextraction
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
ur""" Use this API to fetch a vpnglobal_vpntrafficpolicy_binding resources.
"""
try :
obj = vpnglobal_vpntrafficpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
ur""" Use this API to fetch filtered set of vpnglobal_vpntrafficpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_vpntrafficpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
ur""" Use this API to count vpnglobal_vpntrafficpolicy_binding resources configued on NetScaler.
"""
try :
obj = vpnglobal_vpntrafficpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
ur""" Use this API to count the filtered set of vpnglobal_vpntrafficpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_vpntrafficpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Staaddresstype:
IPV4 = "IPV4"
IPV6 = "IPV6"
class vpnglobal_vpntrafficpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnglobal_vpntrafficpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnglobal_vpntrafficpolicy_binding = [vpnglobal_vpntrafficpolicy_binding() for _ in range(length)]
|
|
"""Integration providing core pieces of infrastructure."""
import asyncio
import itertools as it
import logging
import voluptuous as vol
from homeassistant.auth.permissions.const import CAT_ENTITIES, POLICY_CONTROL
import homeassistant.config as conf_util
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
RESTART_EXIT_CODE,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
SERVICE_SAVE_PERSISTENT_STATES,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
import homeassistant.core as ha
from homeassistant.exceptions import HomeAssistantError, Unauthorized, UnknownUser
from homeassistant.helpers import config_validation as cv, recorder, restore_state
from homeassistant.helpers.service import (
async_extract_config_entry_ids,
async_extract_referenced_entity_ids,
)
from homeassistant.helpers.typing import ConfigType
ATTR_ENTRY_ID = "entry_id"
_LOGGER = logging.getLogger(__name__)
DOMAIN = ha.DOMAIN
SERVICE_RELOAD_CORE_CONFIG = "reload_core_config"
SERVICE_RELOAD_CONFIG_ENTRY = "reload_config_entry"
SERVICE_CHECK_CONFIG = "check_config"
SERVICE_UPDATE_ENTITY = "update_entity"
SERVICE_SET_LOCATION = "set_location"
SCHEMA_UPDATE_ENTITY = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
SCHEMA_RELOAD_CONFIG_ENTRY = vol.All(
vol.Schema(
{
vol.Optional(ATTR_ENTRY_ID): str,
**cv.ENTITY_SERVICE_FIELDS,
},
),
cv.has_at_least_one_key(ATTR_ENTRY_ID, *cv.ENTITY_SERVICE_FIELDS),
)
SHUTDOWN_SERVICES = (SERVICE_HOMEASSISTANT_STOP, SERVICE_HOMEASSISTANT_RESTART)
async def async_setup(hass: ha.HomeAssistant, config: ConfigType) -> bool: # noqa: C901
"""Set up general services related to Home Assistant."""
async def async_save_persistent_states(service):
"""Handle calls to homeassistant.save_persistent_states."""
await restore_state.RestoreStateData.async_save_persistent_states(hass)
async def async_handle_turn_service(service):
"""Handle calls to homeassistant.turn_on/off."""
referenced = await async_extract_referenced_entity_ids(hass, service)
all_referenced = referenced.referenced | referenced.indirectly_referenced
# Generic turn on/off method requires entity id
if not all_referenced:
_LOGGER.error(
"The service homeassistant.%s cannot be called without a target",
service.service,
)
return
# Group entity_ids by domain. groupby requires sorted data.
by_domain = it.groupby(
sorted(all_referenced), lambda item: ha.split_entity_id(item)[0]
)
tasks = []
unsupported_entities = set()
for domain, ent_ids in by_domain:
# This leads to endless loop.
if domain == DOMAIN:
_LOGGER.warning(
"Called service homeassistant.%s with invalid entities %s",
service.service,
", ".join(ent_ids),
)
continue
if not hass.services.has_service(domain, service.service):
unsupported_entities.update(set(ent_ids) & referenced.referenced)
continue
# Create a new dict for this call
data = dict(service.data)
# ent_ids is a generator, convert it to a list.
data[ATTR_ENTITY_ID] = list(ent_ids)
tasks.append(
hass.services.async_call(
domain,
service.service,
data,
blocking=True,
context=service.context,
)
)
if unsupported_entities:
_LOGGER.warning(
"The service homeassistant.%s does not support entities %s",
service.service,
", ".join(sorted(unsupported_entities)),
)
if tasks:
await asyncio.gather(*tasks)
hass.services.async_register(
ha.DOMAIN, SERVICE_SAVE_PERSISTENT_STATES, async_save_persistent_states
)
service_schema = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids}, extra=vol.ALLOW_EXTRA)
hass.services.async_register(
ha.DOMAIN, SERVICE_TURN_OFF, async_handle_turn_service, schema=service_schema
)
hass.services.async_register(
ha.DOMAIN, SERVICE_TURN_ON, async_handle_turn_service, schema=service_schema
)
hass.services.async_register(
ha.DOMAIN, SERVICE_TOGGLE, async_handle_turn_service, schema=service_schema
)
async def async_handle_core_service(call):
"""Service handler for handling core services."""
if (
call.service in SHUTDOWN_SERVICES
and await recorder.async_migration_in_progress(hass)
):
_LOGGER.error(
"The system cannot %s while a database upgrade is in progress",
call.service,
)
raise HomeAssistantError(
f"The system cannot {call.service} "
"while a database upgrade is in progress."
)
if call.service == SERVICE_HOMEASSISTANT_STOP:
asyncio.create_task(hass.async_stop())
return
errors = await conf_util.async_check_ha_config_file(hass)
if errors:
_LOGGER.error(
"The system cannot %s because the configuration is not valid: %s",
call.service,
errors,
)
hass.components.persistent_notification.async_create(
"Config error. See [the logs](/config/logs) for details.",
"Config validating",
f"{ha.DOMAIN}.check_config",
)
raise HomeAssistantError(
f"The system cannot {call.service} "
f"because the configuration is not valid: {errors}"
)
if call.service == SERVICE_HOMEASSISTANT_RESTART:
asyncio.create_task(hass.async_stop(RESTART_EXIT_CODE))
async def async_handle_update_service(call):
"""Service handler for updating an entity."""
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
)
for entity in call.data[ATTR_ENTITY_ID]:
if not user.permissions.check_entity(entity, POLICY_CONTROL):
raise Unauthorized(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
perm_category=CAT_ENTITIES,
)
tasks = [
hass.helpers.entity_component.async_update_entity(entity)
for entity in call.data[ATTR_ENTITY_ID]
]
if tasks:
await asyncio.wait(tasks)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_HOMEASSISTANT_STOP, async_handle_core_service
)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_HOMEASSISTANT_RESTART, async_handle_core_service
)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_CHECK_CONFIG, async_handle_core_service
)
hass.services.async_register(
ha.DOMAIN,
SERVICE_UPDATE_ENTITY,
async_handle_update_service,
schema=SCHEMA_UPDATE_ENTITY,
)
async def async_handle_reload_config(call):
"""Service handler for reloading core config."""
try:
conf = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(err)
return
# auth only processed during startup
await conf_util.async_process_ha_core_config(hass, conf.get(ha.DOMAIN) or {})
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_RELOAD_CORE_CONFIG, async_handle_reload_config
)
async def async_set_location(call):
"""Service handler to set location."""
await hass.config.async_update(
latitude=call.data[ATTR_LATITUDE], longitude=call.data[ATTR_LONGITUDE]
)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN,
SERVICE_SET_LOCATION,
async_set_location,
vol.Schema({ATTR_LATITUDE: cv.latitude, ATTR_LONGITUDE: cv.longitude}),
)
async def async_handle_reload_config_entry(call):
"""Service handler for reloading a config entry."""
reload_entries = set()
if ATTR_ENTRY_ID in call.data:
reload_entries.add(call.data[ATTR_ENTRY_ID])
reload_entries.update(await async_extract_config_entry_ids(hass, call))
if not reload_entries:
raise ValueError("There were no matching config entries to reload")
await asyncio.gather(
*(
hass.config_entries.async_reload(config_entry_id)
for config_entry_id in reload_entries
)
)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN,
SERVICE_RELOAD_CONFIG_ENTRY,
async_handle_reload_config_entry,
schema=SCHEMA_RELOAD_CONFIG_ENTRY,
)
return True
|
|
"""Support for Insteon Thermostats via ISY994 Platform."""
from __future__ import annotations
from pyisy.constants import (
CMD_CLIMATE_FAN_SETTING,
CMD_CLIMATE_MODE,
PROP_HEAT_COOL_STATE,
PROP_HUMIDITY,
PROP_SETPOINT_COOL,
PROP_SETPOINT_HEAT,
PROP_UOM,
PROTO_INSTEON,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DOMAIN as CLIMATE,
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_TENTHS,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
_LOGGER,
DOMAIN as ISY994_DOMAIN,
HA_FAN_TO_ISY,
HA_HVAC_TO_ISY,
ISY994_NODES,
ISY_HVAC_MODES,
UOM_FAN_MODES,
UOM_HVAC_ACTIONS,
UOM_HVAC_MODE_GENERIC,
UOM_HVAC_MODE_INSTEON,
UOM_ISY_CELSIUS,
UOM_ISY_FAHRENHEIT,
UOM_ISYV4_NONE,
UOM_TO_STATES,
)
from .entity import ISYNodeEntity
from .helpers import convert_isy_value_to_hass, migrate_old_unique_ids
ISY_SUPPORTED_FEATURES = (
SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_RANGE
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> bool:
"""Set up the ISY994 thermostat platform."""
entities = []
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
for node in hass_isy_data[ISY994_NODES][CLIMATE]:
entities.append(ISYThermostatEntity(node))
await migrate_old_unique_ids(hass, CLIMATE, entities)
async_add_entities(entities)
class ISYThermostatEntity(ISYNodeEntity, ClimateEntity):
"""Representation of an ISY994 thermostat entity."""
def __init__(self, node) -> None:
"""Initialize the ISY Thermostat entity."""
super().__init__(node)
self._node = node
self._uom = self._node.uom
if isinstance(self._uom, list):
self._uom = self._node.uom[0]
self._hvac_action = None
self._hvac_mode = None
self._fan_mode = None
self._temp_unit = None
self._current_humidity = 0
self._target_temp_low = 0
self._target_temp_high = 0
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return ISY_SUPPORTED_FEATURES
@property
def precision(self) -> str:
"""Return the precision of the system."""
return PRECISION_TENTHS
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
if not (uom := self._node.aux_properties.get(PROP_UOM)):
return self.hass.config.units.temperature_unit
if uom.value == UOM_ISY_CELSIUS:
return TEMP_CELSIUS
if uom.value == UOM_ISY_FAHRENHEIT:
return TEMP_FAHRENHEIT
@property
def current_humidity(self) -> int | None:
"""Return the current humidity."""
if not (humidity := self._node.aux_properties.get(PROP_HUMIDITY)):
return None
return int(humidity.value)
@property
def hvac_mode(self) -> str | None:
"""Return hvac operation ie. heat, cool mode."""
if not (hvac_mode := self._node.aux_properties.get(CMD_CLIMATE_MODE)):
return None
# Which state values used depends on the mode property's UOM:
uom = hvac_mode.uom
# Handle special case for ISYv4 Firmware:
if uom in (UOM_ISYV4_NONE, ""):
uom = (
UOM_HVAC_MODE_INSTEON
if self._node.protocol == PROTO_INSTEON
else UOM_HVAC_MODE_GENERIC
)
return UOM_TO_STATES[uom].get(hvac_mode.value)
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available hvac operation modes."""
return ISY_HVAC_MODES
@property
def hvac_action(self) -> str | None:
"""Return the current running hvac operation if supported."""
hvac_action = self._node.aux_properties.get(PROP_HEAT_COOL_STATE)
if not hvac_action:
return None
return UOM_TO_STATES[UOM_HVAC_ACTIONS].get(hvac_action.value)
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return convert_isy_value_to_hass(
self._node.status, self._uom, self._node.prec, 1
)
@property
def target_temperature_step(self) -> float | None:
"""Return the supported step of target temperature."""
return 1.0
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_COOL:
return self.target_temperature_high
if self.hvac_mode == HVAC_MODE_HEAT:
return self.target_temperature_low
return None
@property
def target_temperature_high(self) -> float | None:
"""Return the highbound target temperature we try to reach."""
target = self._node.aux_properties.get(PROP_SETPOINT_COOL)
if not target:
return None
return convert_isy_value_to_hass(target.value, target.uom, target.prec, 1)
@property
def target_temperature_low(self) -> float | None:
"""Return the lowbound target temperature we try to reach."""
target = self._node.aux_properties.get(PROP_SETPOINT_HEAT)
if not target:
return None
return convert_isy_value_to_hass(target.value, target.uom, target.prec, 1)
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return [FAN_AUTO, FAN_ON]
@property
def fan_mode(self) -> str:
"""Return the current fan mode ie. auto, on."""
fan_mode = self._node.aux_properties.get(CMD_CLIMATE_FAN_SETTING)
if not fan_mode:
return None
return UOM_TO_STATES[UOM_FAN_MODES].get(fan_mode.value)
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self.hvac_mode == HVAC_MODE_COOL:
target_temp_high = target_temp
if self.hvac_mode == HVAC_MODE_HEAT:
target_temp_low = target_temp
if target_temp_low is not None:
await self._node.set_climate_setpoint_heat(int(target_temp_low))
# Presumptive setting--event stream will correct if cmd fails:
self._target_temp_low = target_temp_low
if target_temp_high is not None:
await self._node.set_climate_setpoint_cool(int(target_temp_high))
# Presumptive setting--event stream will correct if cmd fails:
self._target_temp_high = target_temp_high
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
_LOGGER.debug("Requested fan mode %s", fan_mode)
await self._node.set_fan_mode(HA_FAN_TO_ISY.get(fan_mode))
# Presumptive setting--event stream will correct if cmd fails:
self._fan_mode = fan_mode
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
_LOGGER.debug("Requested operation mode %s", hvac_mode)
await self._node.set_climate_mode(HA_HVAC_TO_ISY.get(hvac_mode))
# Presumptive setting--event stream will correct if cmd fails:
self._hvac_mode = hvac_mode
self.async_write_ha_state()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.channel_v1.types import channel_partner_links
from google.cloud.channel_v1.types import customers
from google.cloud.channel_v1.types import entitlements
from google.cloud.channel_v1.types import offers
from google.cloud.channel_v1.types import service
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-channel",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CloudChannelServiceTransport(abc.ABC):
"""Abstract transport class for CloudChannelService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/apps.order",)
DEFAULT_HOST: str = "cloudchannel.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_customers: gapic_v1.method.wrap_method(
self.list_customers, default_timeout=None, client_info=client_info,
),
self.get_customer: gapic_v1.method.wrap_method(
self.get_customer, default_timeout=None, client_info=client_info,
),
self.check_cloud_identity_accounts_exist: gapic_v1.method.wrap_method(
self.check_cloud_identity_accounts_exist,
default_timeout=None,
client_info=client_info,
),
self.create_customer: gapic_v1.method.wrap_method(
self.create_customer, default_timeout=None, client_info=client_info,
),
self.update_customer: gapic_v1.method.wrap_method(
self.update_customer, default_timeout=None, client_info=client_info,
),
self.delete_customer: gapic_v1.method.wrap_method(
self.delete_customer, default_timeout=None, client_info=client_info,
),
self.import_customer: gapic_v1.method.wrap_method(
self.import_customer, default_timeout=None, client_info=client_info,
),
self.provision_cloud_identity: gapic_v1.method.wrap_method(
self.provision_cloud_identity,
default_timeout=60.0,
client_info=client_info,
),
self.list_entitlements: gapic_v1.method.wrap_method(
self.list_entitlements, default_timeout=None, client_info=client_info,
),
self.list_transferable_skus: gapic_v1.method.wrap_method(
self.list_transferable_skus,
default_timeout=None,
client_info=client_info,
),
self.list_transferable_offers: gapic_v1.method.wrap_method(
self.list_transferable_offers,
default_timeout=None,
client_info=client_info,
),
self.get_entitlement: gapic_v1.method.wrap_method(
self.get_entitlement, default_timeout=None, client_info=client_info,
),
self.create_entitlement: gapic_v1.method.wrap_method(
self.create_entitlement, default_timeout=60.0, client_info=client_info,
),
self.change_parameters: gapic_v1.method.wrap_method(
self.change_parameters, default_timeout=60.0, client_info=client_info,
),
self.change_renewal_settings: gapic_v1.method.wrap_method(
self.change_renewal_settings,
default_timeout=60.0,
client_info=client_info,
),
self.change_offer: gapic_v1.method.wrap_method(
self.change_offer, default_timeout=60.0, client_info=client_info,
),
self.start_paid_service: gapic_v1.method.wrap_method(
self.start_paid_service, default_timeout=60.0, client_info=client_info,
),
self.suspend_entitlement: gapic_v1.method.wrap_method(
self.suspend_entitlement, default_timeout=60.0, client_info=client_info,
),
self.cancel_entitlement: gapic_v1.method.wrap_method(
self.cancel_entitlement, default_timeout=60.0, client_info=client_info,
),
self.activate_entitlement: gapic_v1.method.wrap_method(
self.activate_entitlement,
default_timeout=60.0,
client_info=client_info,
),
self.transfer_entitlements: gapic_v1.method.wrap_method(
self.transfer_entitlements,
default_timeout=60.0,
client_info=client_info,
),
self.transfer_entitlements_to_google: gapic_v1.method.wrap_method(
self.transfer_entitlements_to_google,
default_timeout=60.0,
client_info=client_info,
),
self.list_channel_partner_links: gapic_v1.method.wrap_method(
self.list_channel_partner_links,
default_timeout=None,
client_info=client_info,
),
self.get_channel_partner_link: gapic_v1.method.wrap_method(
self.get_channel_partner_link,
default_timeout=None,
client_info=client_info,
),
self.create_channel_partner_link: gapic_v1.method.wrap_method(
self.create_channel_partner_link,
default_timeout=None,
client_info=client_info,
),
self.update_channel_partner_link: gapic_v1.method.wrap_method(
self.update_channel_partner_link,
default_timeout=None,
client_info=client_info,
),
self.lookup_offer: gapic_v1.method.wrap_method(
self.lookup_offer, default_timeout=None, client_info=client_info,
),
self.list_products: gapic_v1.method.wrap_method(
self.list_products, default_timeout=None, client_info=client_info,
),
self.list_skus: gapic_v1.method.wrap_method(
self.list_skus, default_timeout=None, client_info=client_info,
),
self.list_offers: gapic_v1.method.wrap_method(
self.list_offers, default_timeout=None, client_info=client_info,
),
self.list_purchasable_skus: gapic_v1.method.wrap_method(
self.list_purchasable_skus,
default_timeout=None,
client_info=client_info,
),
self.list_purchasable_offers: gapic_v1.method.wrap_method(
self.list_purchasable_offers,
default_timeout=None,
client_info=client_info,
),
self.register_subscriber: gapic_v1.method.wrap_method(
self.register_subscriber, default_timeout=None, client_info=client_info,
),
self.unregister_subscriber: gapic_v1.method.wrap_method(
self.unregister_subscriber,
default_timeout=None,
client_info=client_info,
),
self.list_subscribers: gapic_v1.method.wrap_method(
self.list_subscribers, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_customers(
self,
) -> Callable[
[service.ListCustomersRequest],
Union[service.ListCustomersResponse, Awaitable[service.ListCustomersResponse]],
]:
raise NotImplementedError()
@property
def get_customer(
self,
) -> Callable[
[service.GetCustomerRequest],
Union[customers.Customer, Awaitable[customers.Customer]],
]:
raise NotImplementedError()
@property
def check_cloud_identity_accounts_exist(
self,
) -> Callable[
[service.CheckCloudIdentityAccountsExistRequest],
Union[
service.CheckCloudIdentityAccountsExistResponse,
Awaitable[service.CheckCloudIdentityAccountsExistResponse],
],
]:
raise NotImplementedError()
@property
def create_customer(
self,
) -> Callable[
[service.CreateCustomerRequest],
Union[customers.Customer, Awaitable[customers.Customer]],
]:
raise NotImplementedError()
@property
def update_customer(
self,
) -> Callable[
[service.UpdateCustomerRequest],
Union[customers.Customer, Awaitable[customers.Customer]],
]:
raise NotImplementedError()
@property
def delete_customer(
self,
) -> Callable[
[service.DeleteCustomerRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def import_customer(
self,
) -> Callable[
[service.ImportCustomerRequest],
Union[customers.Customer, Awaitable[customers.Customer]],
]:
raise NotImplementedError()
@property
def provision_cloud_identity(
self,
) -> Callable[
[service.ProvisionCloudIdentityRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def list_entitlements(
self,
) -> Callable[
[service.ListEntitlementsRequest],
Union[
service.ListEntitlementsResponse,
Awaitable[service.ListEntitlementsResponse],
],
]:
raise NotImplementedError()
@property
def list_transferable_skus(
self,
) -> Callable[
[service.ListTransferableSkusRequest],
Union[
service.ListTransferableSkusResponse,
Awaitable[service.ListTransferableSkusResponse],
],
]:
raise NotImplementedError()
@property
def list_transferable_offers(
self,
) -> Callable[
[service.ListTransferableOffersRequest],
Union[
service.ListTransferableOffersResponse,
Awaitable[service.ListTransferableOffersResponse],
],
]:
raise NotImplementedError()
@property
def get_entitlement(
self,
) -> Callable[
[service.GetEntitlementRequest],
Union[entitlements.Entitlement, Awaitable[entitlements.Entitlement]],
]:
raise NotImplementedError()
@property
def create_entitlement(
self,
) -> Callable[
[service.CreateEntitlementRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def change_parameters(
self,
) -> Callable[
[service.ChangeParametersRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def change_renewal_settings(
self,
) -> Callable[
[service.ChangeRenewalSettingsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def change_offer(
self,
) -> Callable[
[service.ChangeOfferRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def start_paid_service(
self,
) -> Callable[
[service.StartPaidServiceRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def suspend_entitlement(
self,
) -> Callable[
[service.SuspendEntitlementRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_entitlement(
self,
) -> Callable[
[service.CancelEntitlementRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def activate_entitlement(
self,
) -> Callable[
[service.ActivateEntitlementRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def transfer_entitlements(
self,
) -> Callable[
[service.TransferEntitlementsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def transfer_entitlements_to_google(
self,
) -> Callable[
[service.TransferEntitlementsToGoogleRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def list_channel_partner_links(
self,
) -> Callable[
[service.ListChannelPartnerLinksRequest],
Union[
service.ListChannelPartnerLinksResponse,
Awaitable[service.ListChannelPartnerLinksResponse],
],
]:
raise NotImplementedError()
@property
def get_channel_partner_link(
self,
) -> Callable[
[service.GetChannelPartnerLinkRequest],
Union[
channel_partner_links.ChannelPartnerLink,
Awaitable[channel_partner_links.ChannelPartnerLink],
],
]:
raise NotImplementedError()
@property
def create_channel_partner_link(
self,
) -> Callable[
[service.CreateChannelPartnerLinkRequest],
Union[
channel_partner_links.ChannelPartnerLink,
Awaitable[channel_partner_links.ChannelPartnerLink],
],
]:
raise NotImplementedError()
@property
def update_channel_partner_link(
self,
) -> Callable[
[service.UpdateChannelPartnerLinkRequest],
Union[
channel_partner_links.ChannelPartnerLink,
Awaitable[channel_partner_links.ChannelPartnerLink],
],
]:
raise NotImplementedError()
@property
def lookup_offer(
self,
) -> Callable[
[service.LookupOfferRequest], Union[offers.Offer, Awaitable[offers.Offer]]
]:
raise NotImplementedError()
@property
def list_products(
self,
) -> Callable[
[service.ListProductsRequest],
Union[service.ListProductsResponse, Awaitable[service.ListProductsResponse]],
]:
raise NotImplementedError()
@property
def list_skus(
self,
) -> Callable[
[service.ListSkusRequest],
Union[service.ListSkusResponse, Awaitable[service.ListSkusResponse]],
]:
raise NotImplementedError()
@property
def list_offers(
self,
) -> Callable[
[service.ListOffersRequest],
Union[service.ListOffersResponse, Awaitable[service.ListOffersResponse]],
]:
raise NotImplementedError()
@property
def list_purchasable_skus(
self,
) -> Callable[
[service.ListPurchasableSkusRequest],
Union[
service.ListPurchasableSkusResponse,
Awaitable[service.ListPurchasableSkusResponse],
],
]:
raise NotImplementedError()
@property
def list_purchasable_offers(
self,
) -> Callable[
[service.ListPurchasableOffersRequest],
Union[
service.ListPurchasableOffersResponse,
Awaitable[service.ListPurchasableOffersResponse],
],
]:
raise NotImplementedError()
@property
def register_subscriber(
self,
) -> Callable[
[service.RegisterSubscriberRequest],
Union[
service.RegisterSubscriberResponse,
Awaitable[service.RegisterSubscriberResponse],
],
]:
raise NotImplementedError()
@property
def unregister_subscriber(
self,
) -> Callable[
[service.UnregisterSubscriberRequest],
Union[
service.UnregisterSubscriberResponse,
Awaitable[service.UnregisterSubscriberResponse],
],
]:
raise NotImplementedError()
@property
def list_subscribers(
self,
) -> Callable[
[service.ListSubscribersRequest],
Union[
service.ListSubscribersResponse, Awaitable[service.ListSubscribersResponse]
],
]:
raise NotImplementedError()
__all__ = ("CloudChannelServiceTransport",)
|
|
from __future__ import print_function
import ipywidgets as widgets
from IPython.display import HTML, Javascript, display
from string import Template
from traitlets import Unicode, Bool, Int
# NOT FINISHED. DO NOT USE
# https://ace.c9.io/build/kitchen-sink.html
js_load = """
<script>
requirejs.config({
paths: {
'ace': ['//cdnjs.cloudflare.com/ajax/libs/ace/1.2.6/ace'],
},
});
require(['ace'], function(ace) {
console.log("ACE loaded :)");
return {};
});
</script>
"""
css_template = """
<style type="text/css" media="screen">
#${editor} {
margin-left: 15px;
margin-top: 15px;
height: ${height};
width: ${width};
border-style: ${border};
}
</style>
"""
js_template = """
<script>
requirejs.undef('editor');
define('editor', ["jupyter-js-widgets"], function(widgets) {
var EditorView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.ignorex = false;
this.ignorev = false;
//console.log('RENDER '+this.model.get('name'));
this.div = document.createElement('div');
this.div.setAttribute('id', this.model.get('name'));
this.el.appendChild(this.div);
this.listenTo(this.model, 'change:state', this._state_changed, this);
this.listenTo(this.model, 'change:theme', this._theme_changed, this);
this.listenTo(this.model, 'change:mode', this._mode_changed, this);
this.listenTo(this.model, 'change:value2', this._value2_changed, this);
this.listenTo(this.model, 'change:showmargin', this._showmargin_changed, this);
this.listenTo(this.model, 'change:fontsize', this._fontsize_changed, this);
this.update();
},
update: function() {
return EditorView.__super__.update.apply(this);
},
// Tell Backbone to listen to the change event of input controls
events: {
"change": "handle_color_change"
},
_state_changed: function() {
var state = this.model.get('state')
//console.log('state: ' + state );
if (state == 'start') {
var that = this;
this._ed = ace.edit(this.model.get('name'));
this._ed.getSession().on('change', function(e) {
if (that.ignorev == true) { return }
that.ignorex = true;
//console.log('CHANGE1');
that.model.set('value2', that._ed.getValue());
//console.log('CHANGE2');
that.touch();
//console.log('CHANGE3');
that.ignorex = false;
});
};
},
_theme_changed: function() {
//console.log("theme " + this.model.get('theme'));
this._ed.setTheme("ace/theme/"+this.model.get('theme'));
},
_mode_changed: function() {
//console.log("mode " + this.model.get('mode'));
this._ed.getSession().setMode("ace/mode/"+this.model.get('mode'));
},
_value2_changed: function() {
//console.log('value2 ' + this.ignorex);
if (this.ignorex == true) { return };
var val = this.model.get('value2');
//console.log('VALUE2 ' + val);
this.ignorev = true;
this._ed.setValue(val);
this.ignorev = false;
//console.log('VALUE2 DONE');
},
_showmargin_changed: function() {
this._ed.setShowPrintMargin(this.model.get('showmargin'));
},
_fontsize_changed: function() {
document.getElementById(this.model.get('name')).style.fontSize=this.model.get('fontsize');
},
// Callback for when the color is changed.
handle_color_change: function(event) {
console.log('SOMETHING CHNAGED');
console.log(event);
},
});
return {
EditorView: EditorView
};
});
</script>
"""
class EditorWidget(widgets.DOMWidget):
display(HTML(js_load + js_template))
_view_name = Unicode('EditorView').tag(sync=True)
_view_module = Unicode('editor').tag(sync=True)
name = Unicode('').tag(sync=True)
theme = Unicode('').tag(sync=True)
mode = Unicode('').tag(sync=True)
showmargin = Bool(True).tag(sync=True)
fontsize = Unicode('').tag(sync=True)
state = Unicode('').tag(sync=True)
value2 = Unicode('').tag(sync=True)
def __init__(self, **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.errors = widgets.CallbackDispatcher(accepted_nargs=[0, 1])
self.on_msg(self._handle_custom_msg)
def _handle_custom_msg(self, content):
if 'event' in content and content['event'] == 'error':
self.errors()
self.errors(self)
class Editor(widgets.DOMWidget):
num = 0
def __init__(self, **kwargs):
self.name = 'editor' + str(Editor.num)
Editor.num += 1
height = kwargs.get('height', '500px')
width = kwargs.get('width', 'auto')
border = kwargs.get('border', 'solid')
self._theme = kwargs.get('theme', 'xcode')
self._mode = kwargs.get('mode', 'python')
self._fontsize = kwargs.get('fontsize', '14px')
d = dict(height=height,
width=width,
border=border,
editor=self.name)
temp = Template(css_template).substitute(d)
display(HTML(temp))
self.ed = EditorWidget()
self.ed.name = self.name
# self.ed.observe(self.value_loading, names='value2')
@property
def value(self):
return self.ed.value2
@value.setter
def value(self, val):
self.ed.value2 = val
@property
def theme(self):
return self.ed.theme
@theme.setter
def theme(self, val):
self.ed.theme = val
@property
def mode(self):
return self.ed.mode
@mode.setter
def mode(self, val):
self.ed.mode = val
@property
def fontsize(self):
return self.ed.fontsize
@fontsize.setter
def fontsize(self, val):
self.ed.fontsize = val
def _ipython_display_(self):
self.ed._ipython_display_()
self.ed.state = 'start'
self.ed.theme = self._theme
self.ed.mode = self._mode
self.ed.showmargin = False
self.ed.fontsize = self._fontsize
self.ed.state = ''
|
|
from __future__ import absolute_import
import sys
from six import StringIO
from mock import patch
from .testcases import DockerClientTestCase
from fig.cli.main import TopLevelCommand
class CLITestCase(DockerClientTestCase):
def setUp(self):
super(CLITestCase, self).setUp()
self.old_sys_exit = sys.exit
sys.exit = lambda code=0: None
self.command = TopLevelCommand()
self.command.base_dir = 'tests/fixtures/simple-figfile'
def tearDown(self):
sys.exit = self.old_sys_exit
self.project.kill()
self.project.remove_stopped()
@property
def project(self):
return self.command.get_project(self.command.get_config_path())
def test_help(self):
old_base_dir = self.command.base_dir
self.command.base_dir = 'tests/fixtures/no-figfile'
with self.assertRaises(SystemExit) as exc_context:
self.command.dispatch(['help', 'up'], None)
self.assertIn('Usage: up [options] [SERVICE...]', str(exc_context.exception))
# self.project.kill() fails during teardown
# unless there is a figfile.
self.command.base_dir = old_base_dir
@patch('sys.stdout', new_callable=StringIO)
def test_ps(self, mock_stdout):
self.project.get_service('simple').create_container()
self.command.dispatch(['ps'], None)
self.assertIn('simplefigfile_simple_1', mock_stdout.getvalue())
@patch('sys.stdout', new_callable=StringIO)
def test_ps_default_figfile(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/multiple-figfiles'
self.command.dispatch(['up', '-d'], None)
self.command.dispatch(['ps'], None)
output = mock_stdout.getvalue()
self.assertIn('multiplefigfiles_simple_1', output)
self.assertIn('multiplefigfiles_another_1', output)
self.assertNotIn('multiplefigfiles_yetanother_1', output)
@patch('sys.stdout', new_callable=StringIO)
def test_ps_alternate_figfile(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/multiple-figfiles'
self.command.dispatch(['-f', 'fig2.yml', 'up', '-d'], None)
self.command.dispatch(['-f', 'fig2.yml', 'ps'], None)
output = mock_stdout.getvalue()
self.assertNotIn('multiplefigfiles_simple_1', output)
self.assertNotIn('multiplefigfiles_another_1', output)
self.assertIn('multiplefigfiles_yetanother_1', output)
@patch('fig.service.log')
def test_pull(self, mock_logging):
self.command.dispatch(['pull'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call('Pulling another (busybox:latest)...')
@patch('sys.stdout', new_callable=StringIO)
def test_build_no_cache(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
mock_stdout.truncate(0)
cache_indicator = 'Using cache'
self.command.dispatch(['build', 'simple'], None)
output = mock_stdout.getvalue()
self.assertIn(cache_indicator, output)
mock_stdout.truncate(0)
self.command.dispatch(['build', '--no-cache', 'simple'], None)
output = mock_stdout.getvalue()
self.assertNotIn(cache_indicator, output)
def test_up(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
def test_up_with_links(self):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['up', '-d', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
def test_up_with_no_deps(self):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['up', '-d', '--no-deps', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 0)
self.assertEqual(len(console.containers()), 0)
def test_up_with_recreate(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertNotEqual(old_ids, new_ids)
def test_up_with_keep_old(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d', '--no-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertEqual(old_ids, new_ids)
@patch('dockerpty.start')
def test_run_service_without_links(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['run', 'console', '/bin/true'], None)
self.assertEqual(len(self.project.containers()), 0)
@patch('dockerpty.start')
def test_run_service_with_links(self, __):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
@patch('dockerpty.start')
def test_run_with_no_deps(self, __):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['run', '--no-deps', 'web', '/bin/true'], None)
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 0)
@patch('dockerpty.start')
def test_run_does_not_recreate_linked_containers(self, __):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['up', '-d', 'db'], None)
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 1)
old_ids = [c.id for c in db.containers()]
self.command.dispatch(['run', 'web', '/bin/true'], None)
self.assertEqual(len(db.containers()), 1)
new_ids = [c.id for c in db.containers()]
self.assertEqual(old_ids, new_ids)
@patch('dockerpty.start')
def test_run_without_command(self, __):
self.command.base_dir = 'tests/fixtures/commands-figfile'
self.check_build('tests/fixtures/simple-dockerfile', tag='figtest_test')
for c in self.project.containers(stopped=True, one_off=True):
c.remove()
self.command.dispatch(['run', 'implicit'], None)
service = self.project.get_service('implicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/sh -c echo "success"'],
)
self.command.dispatch(['run', 'explicit'], None)
service = self.project.get_service('explicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/true'],
)
@patch('dockerpty.start')
def test_run_service_with_entrypoint_overridden(self, _):
self.command.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
name = 'service'
self.command.dispatch(
['run', '--entrypoint', '/bin/echo', name, 'helloworld'],
None
)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(
container.human_readable_command,
u'/bin/echo helloworld'
)
@patch('dockerpty.start')
def test_run_service_with_environement_overridden(self, _):
name = 'service'
self.command.base_dir = 'tests/fixtures/environment-figfile'
self.command.dispatch(
['run', '-e', 'foo=notbar', '-e', 'allo=moto=bobo',
'-e', 'alpha=beta', name],
None
)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
# env overriden
self.assertEqual('notbar', container.environment['foo'])
# keep environement from yaml
self.assertEqual('world', container.environment['hello'])
# added option from command line
self.assertEqual('beta', container.environment['alpha'])
# make sure a value with a = don't crash out
self.assertEqual('moto=bobo', container.environment['allo'])
@patch('dockerpty.start')
def test_run_service_without_map_ports(self, __):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-figfile'
self.command.dispatch(['run', '-d', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_random, None)
self.assertEqual(port_assigned, None)
@patch('dockerpty.start')
def test_run_service_with_map_ports(self, __):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-figfile'
self.command.dispatch(['run', '-d', '--service-ports', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertNotEqual(port_random, None)
self.assertIn("0.0.0.0", port_random)
self.assertEqual(port_assigned, "0.0.0.0:9999")
def test_rm(self):
service = self.project.get_service('simple')
service.create_container()
service.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.command.dispatch(['rm', '--force'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_kill(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_kill_signal_sigint(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGINT'], None)
self.assertEqual(len(service.containers()), 1)
# The container is still running. It has been only interrupted
self.assertTrue(service.containers()[0].is_running)
def test_kill_interrupted_service(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.command.dispatch(['kill', '-s', 'SIGINT'], None)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGKILL'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_restart(self):
service = self.project.get_service('simple')
container = service.create_container()
service.start_container(container)
started_at = container.dictionary['State']['StartedAt']
self.command.dispatch(['restart'], None)
container.inspect()
self.assertNotEqual(
container.dictionary['State']['FinishedAt'],
'0001-01-01T00:00:00Z',
)
self.assertNotEqual(
container.dictionary['State']['StartedAt'],
started_at,
)
def test_scale(self):
project = self.project
self.command.scale(project, {'SERVICE=NUM': ['simple=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=3', 'another=2']})
self.assertEqual(len(project.get_service('simple').containers()), 3)
self.assertEqual(len(project.get_service('another').containers()), 2)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=0', 'another=0']})
self.assertEqual(len(project.get_service('simple').containers()), 0)
self.assertEqual(len(project.get_service('another').containers()), 0)
def test_port(self):
self.command.base_dir = 'tests/fixtures/ports-figfile'
self.command.dispatch(['up', '-d'], None)
container = self.project.get_service('simple').get_container()
@patch('sys.stdout', new_callable=StringIO)
def get_port(number, mock_stdout):
self.command.dispatch(['port', 'simple', str(number)], None)
return mock_stdout.getvalue().rstrip()
self.assertEqual(get_port(3000), container.get_local_port(3000))
self.assertEqual(get_port(3001), "0.0.0.0:9999")
self.assertEqual(get_port(3002), "")
|
|
"""Loads dodo file (a python module) and convert them to 'tasks' """
import os
import sys
import inspect
import importlib
from collections import OrderedDict
from .exceptions import InvalidTask, InvalidCommand, InvalidDodoFile
from .task import DelayedLoader, Task, dict_to_task
# Directory path from where doit was executed.
# Set by loader, to be used on dodo.py by users.
initial_workdir = None
# TASK_STRING: (string) prefix used to identify python function
# that are task generators in a dodo file.
TASK_STRING = "task_"
def flat_generator(gen, gen_doc=''):
"""return only values from generators
if any generator yields another generator it is recursivelly called
"""
for item in gen:
if inspect.isgenerator(item):
item_doc = item.gi_code.co_consts[0]
for value, value_doc in flat_generator(item, item_doc):
yield value, value_doc
else:
yield item, gen_doc
def get_module(dodo_file, cwd=None, seek_parent=False):
"""
Find python module defining tasks, it is called "dodo" file.
@param dodo_file(str): path to file containing the tasks
@param cwd(str): path to be used cwd, if None use path from dodo_file
@param seek_parent(bool): search for dodo_file in parent paths if not found
@return (module) dodo module
"""
global initial_workdir
initial_workdir = os.getcwd()
def exist_or_raise(path):
"""raise exception if file on given path doesnt exist"""
if not os.path.exists(path):
msg = ("Could not find dodo file '%s'.\n" +
"Please use '-f' to specify file name.\n")
raise InvalidDodoFile(msg % path)
# get absolute path name
if os.path.isabs(dodo_file):
dodo_path = dodo_file
exist_or_raise(dodo_path)
else:
if not seek_parent:
dodo_path = os.path.abspath(dodo_file)
exist_or_raise(dodo_path)
else:
# try to find file in any folder above
current_dir = initial_workdir
dodo_path = os.path.join(current_dir, dodo_file)
file_name = os.path.basename(dodo_path)
parent = os.path.dirname(dodo_path)
while not os.path.exists(dodo_path):
new_parent = os.path.dirname(parent)
if new_parent == parent: # reached root path
exist_or_raise(dodo_file)
parent = new_parent
dodo_path = os.path.join(parent, file_name)
## load module dodo file and set environment
base_path, file_name = os.path.split(dodo_path)
# make sure dodo path is on sys.path so we can import it
sys.path.insert(0, base_path)
if cwd is None:
# by default cwd is same as dodo.py base path
full_cwd = base_path
else:
# insert specified cwd into sys.path
full_cwd = os.path.abspath(cwd)
if not os.path.isdir(full_cwd):
msg = "Specified 'dir' path must be a directory.\nGot '%s'(%s)."
raise InvalidCommand(msg % (cwd, full_cwd))
sys.path.insert(0, full_cwd)
# file specified on dodo file are relative to cwd
os.chdir(full_cwd)
# get module containing the tasks
return importlib.import_module(os.path.splitext(file_name)[0])
def create_after(executed=None, target_regex=None, creates=None):
"""Annotate a task-creator function with delayed loader info"""
def decorated(func):
func.doit_create_after = DelayedLoader(
func,
executed=executed,
target_regex=target_regex,
creates=creates
)
return func
return decorated
def load_tasks(namespace, command_names=(), allow_delayed=False):
"""Find task-creators and create tasks
@param namespace: (dict) containing the task creators, it might
contain other stuff
@param command_names: (list - str) blacklist for task names
@param load_all: (bool) if True ignore doit_crate_after['executed']
`load_all == False` is used by the runner to delay the creation of
tasks until a dependent task is executed. This is only used by the `run`
command, other commands should always load all tasks since it wont execute
any task.
@return task_list (list) of Tasks in the order they were defined on the file
"""
funcs = _get_task_creators(namespace, command_names)
# sort by the order functions were defined (line number)
# TODO: this ordering doesnt make sense when generators come
# from different modules
funcs.sort(key=lambda obj: obj[2])
task_list = []
def _process_gen():
task_list.extend(generate_tasks(name, ref(), ref.__doc__))
def _add_delayed(tname):
task_list.append(Task(tname, None, loader=delayed,
doc=delayed.creator.__doc__))
for name, ref, _ in funcs:
delayed = getattr(ref, 'doit_create_after', None)
if not delayed: # not a delayed task, just run creator
_process_gen()
elif delayed.creates: # delayed with explicit task basename
for tname in delayed.creates:
_add_delayed(tname)
elif allow_delayed: # delayed no explicit name, cmd run
_add_delayed(name)
else: # delayed no explicit name, cmd list (run creator)
_process_gen()
return task_list
def _get_task_creators(namespace, command_names):
"""get functions defined in the `namespace` and select the task-creators
A task-creator is a function that:
- name starts with the string TASK_STRING
- has the attribute `create_doit_tasks`
@return (list - func) task-creators
"""
funcs = []
prefix_len = len(TASK_STRING)
# get all functions that are task-creators
for name, ref in namespace.items():
# function is a task creator because of its name
if ((inspect.isfunction(ref) or inspect.ismethod(ref)) and
name.startswith(TASK_STRING)):
# remove TASK_STRING prefix from name
task_name = name[prefix_len:]
# object is a task creator because it contains the special method
elif hasattr(ref, 'create_doit_tasks'):
ref = ref.create_doit_tasks
# If create_doit_tasks is a method, it should be called only
# if it is bounded to an object.
# This avoids calling it for the class definition.
if inspect.signature(ref).parameters:
continue
task_name = name
# ignore functions that are not a task creator
else: # pragma: no cover
# coverage can't get "else: continue"
continue
# tasks can't have the same name of a commands
if task_name in command_names:
msg = ("Task can't be called '%s' because this is a command name."+
" Please choose another name.")
raise InvalidDodoFile(msg % task_name)
# get line number where function is defined
line = inspect.getsourcelines(ref)[1]
# add to list task generator functions
funcs.append((task_name, ref, line))
return funcs
def load_doit_config(dodo_module):
"""
@param dodo_module (dict) dict with module members
"""
doit_config = dodo_module.get('DOIT_CONFIG', {})
if not isinstance(doit_config, dict):
msg = ("DOIT_CONFIG must be a dict. got:'%s'%s")
raise InvalidDodoFile(msg % (repr(doit_config), type(doit_config)))
return doit_config
def _generate_task_from_return(func_name, task_dict, gen_doc):
"""generate a single task from a dict return'ed by a task generator"""
if 'name' in task_dict:
raise InvalidTask("Task '%s'. Only subtasks use field name." %
func_name)
task_dict['name'] = task_dict.pop('basename', func_name)
# Use task generator docstring
# if no doc present in task dict
if not 'doc' in task_dict:
task_dict['doc'] = gen_doc
return dict_to_task(task_dict)
def _generate_task_from_yield(tasks, func_name, task_dict, gen_doc):
"""generate a single task from a dict yield'ed by task generator
@param tasks: dictionary with created tasks
@return None: the created task is added to 'tasks' dict
"""
# check valid input
if not isinstance(task_dict, dict):
raise InvalidTask("Task '%s' must yield dictionaries" %
func_name)
msg_dup = "Task generation '%s' has duplicated definition of '%s'"
basename = task_dict.pop('basename', None)
# if has 'name' this is a sub-task
if 'name' in task_dict:
basename = basename or func_name
# if subname is None attributes from group task
if task_dict['name'] is None:
task_dict['name'] = basename
task_dict['actions'] = None
group_task = dict_to_task(task_dict)
group_task.has_subtask = True
tasks[basename] = group_task
return
# name is '<task>.<subtask>'
full_name = "%s:%s"% (basename, task_dict['name'])
if full_name in tasks:
raise InvalidTask(msg_dup % (func_name, full_name))
task_dict['name'] = full_name
sub_task = dict_to_task(task_dict)
sub_task.is_subtask = True
# get/create task group
group_task = tasks.get(basename)
if group_task:
if not group_task.has_subtask:
raise InvalidTask(msg_dup % (func_name, basename))
else:
group_task = Task(basename, None, doc=gen_doc, has_subtask=True)
tasks[basename] = group_task
group_task.task_dep.append(sub_task.name)
tasks[sub_task.name] = sub_task
# NOT a sub-task
else:
if not basename:
raise InvalidTask(
"Task '%s' must contain field 'name' or 'basename'. %s"%
(func_name, task_dict))
if basename in tasks:
raise InvalidTask(msg_dup % (func_name, basename))
task_dict['name'] = basename
# Use task generator docstring if no doc present in task dict
if not 'doc' in task_dict:
task_dict['doc'] = gen_doc
tasks[basename] = dict_to_task(task_dict)
def generate_tasks(func_name, gen_result, gen_doc=None):
"""Create tasks from a task generator result.
@param func_name: (string) name of taskgen function
@param gen_result: value returned by a task generator function
it can be a dict or generator (generating dicts)
@param gen_doc: (string/None) docstring from the task generator function
@return: (list - Task)
"""
# a task instance, just return it without any processing
if isinstance(gen_result, Task):
return (gen_result,)
# task described as a dictionary
if isinstance(gen_result, dict):
return [_generate_task_from_return(func_name, gen_result, gen_doc)]
# a generator
if inspect.isgenerator(gen_result):
tasks = OrderedDict() # task_name: task
# the generator return subtasks as dictionaries
for task_dict, x_doc in flat_generator(gen_result, gen_doc):
if isinstance(task_dict, Task):
tasks[task_dict.name] = task_dict
else:
_generate_task_from_yield(tasks, func_name, task_dict, x_doc)
if tasks:
return list(tasks.values())
else:
# special case task_generator did not generate any task
# create an empty group task
return [Task(func_name, None, doc=gen_doc, has_subtask=True)]
if gen_result is None:
return ()
raise InvalidTask(
"Task '%s'. Must return a dictionary or generator. Got %s" %
(func_name, type(gen_result)))
|
|
import sys
import time
sys.stdout=sys.stderr
import numpy as np
import tensorflow as tf
from tensorflow.python.client import timeline
from language_model import LM
from common import CheckpointLoader,print_debug
def run_train(dataset, hps, logdir, ps_device, task=0, master=""):
with tf.variable_scope("model"):
print_debug('loading LM model')
model = LM(hps, "train", ps_device)
stime = time.time()
print("Current time: %s" % stime)
print("ALL VARIABLES")
for v in tf.all_variables():
print("%s %s %s %s" % (v.name, v.get_shape(), v.dtype, v.device))
print("TRAINABLE VARIABLES")
for v in tf.trainable_variables():
print("%s %s %s %s" % (v.name, v.get_shape(), v.dtype, v.device))
print("LOCAL VARIABLES")
for v in tf.local_variables():
print("%s %s %s %s" % (v.name, v.get_shape(), v.dtype, v.device))
sv = tf.train.Supervisor(is_chief=(task == 0),
logdir=logdir, # logdir=None, # logdir=logdir,
summary_op=None, # Automatic summaries don't work with placeholders.
global_step=model.global_step,
save_summaries_secs=60*hps.save_summary_every_min,
save_model_secs=60*hps.save_model_every_min)
#save_summaries_secs=30,
#save_model_secs=120 * 5)
#config = tf.ConfigProto(allow_soft_placement=True,
# intra_op_parallelism_threads=2,
# inter_op_parallelism_threads=20)
config = tf.ConfigProto(allow_soft_placement=True)
close_summary_writer = False
with sv.managed_session(master, config=config, start_standard_services=True, close_summary_writer=False) as sess:
# Slowly increase the number of workers during beginning of the training.
#while not sv.should_stop() and (time.time() - stime) < hps.max_time:
# step = int(sess.run(model.global_step))
# waiting_until_step = task * hps.num_delayed_steps
# if step >= waiting_until_step:
# break
# else:
# print("Current step is %d. Waiting until: %d" % (step, waiting_until_step))
# time.sleep(20.0)
local_step = 0
prev_global_step = sess.run(model.global_step)
cur_global_step = 0
prev_time = time.time()
data_iterator = dataset.iterate_forever(hps.batch_size * hps.num_gpus, hps.num_steps)
print_debug('before looping model, sv.save_path=%s , sv.should_stop()=%d, (time.time() - stime)=%.2fs, hps.max_time=%.2fs ' %(sv.save_path, sv.should_stop(), (time.time() - stime), hps.max_time))
while not sv.should_stop() and (time.time() - stime) < hps.max_time:
if (int(time.time()) - int(stime)) % 10 == 0:
print_debug('While In looping model, sv.should_stop()=%d, (time.time() - stime)=%.2fs, hps.max_time=%.2fs ' %(sv.should_stop(), (time.time() - stime), hps.max_time))
fetches = [model.global_step, model.loss, model.train_op]
# Chief worker computes summaries every 100 steps.
should_compute_summary = (task == 0 and local_step % 100 == 0)
if should_compute_summary:
fetches += [model.summary_op]
#x, y, w = next(data_iterator)
x, y = next(data_iterator)
should_run_profiler = (hps.run_profiler and task == 0 and local_step % 1000 == 13)
if should_run_profiler:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
#fetched = sess.run(fetches, {model.x: x, model.y: y, model.w: w},
fetched = sess.run(fetches, {model.x: x, model.y: y},
options=run_options, run_metadata=run_metadata)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
print("Running profiler")
with open(logdir + "/timeline.json", 'w') as f:
f.write(ctf)
print("Finished profiling!")
else:
#fetched = sess.run(fetches, {model.x: x, model.y: y, model.w: w})
fetched = sess.run(fetches, {model.x: x, model.y: y})
cur_global_step = fetched[0]
local_step += 1
if should_compute_summary:
#print_debug('should_compute_summary!!! BUT WE DROPED THIS MODE TO SAVE MEMORY SPACE sv.should_stop()=%d, (time.time() - stime)=%.2fs, hps.max_time=%.2fs ' %(sv.should_stop(), (time.time() - stime), hps.max_time))
sv.summary_computed(sess, fetched[-1])
if local_step < 10 or local_step % 20 == 0:
cur_time = time.time()
num_words = hps.batch_size * hps.num_gpus * hps.num_steps
wps = (cur_global_step - prev_global_step) * num_words / (cur_time - prev_time)
prev_global_step = cur_global_step
print("Iteration %d, time = %.2fs, wps = %.0f, train loss = %.4f" % (
cur_global_step, cur_time - prev_time, wps, fetched[1]))
prev_time = cur_time
#save last model
print_debug('Supervisor Begin Save after training period')
sv._saver.save(sess, sv.save_path, cur_global_step)
print_debug('Supervisor DONE Save after training period')
# close sv with close summery flag
sv.stop(None, close_summary_writer)
sess.close()
tf.reset_default_graph()
def run_eval(dataset, hps, logdir, mode, num_eval_steps):
print_debug('run_eval logdir=%s ' % (logdir))
with tf.variable_scope("model"):
hps.num_sampled = 0 # Always using full softmax at evaluation.
hps.keep_prob = 1.0
#model = LM(hps, "eval", "/cpu:0")
model = LM(hps, "eval", "/gpu:0")
if hps.average_params:
print("Averaging parameters for evaluation.")
saver = tf.train.Saver(model.avg_dict)
else:
saver = tf.train.Saver()
# Use only 4 threads for the evaluation.
#config = tf.ConfigProto(allow_soft_placement=True,
# intra_op_parallelism_threads=20,
# inter_op_parallelism_threads=1)
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
sw = tf.summary.FileWriter(logdir + "/" + mode, sess.graph)
print_debug('run_eval tf.summary.FileWriter=%s ' % (logdir + "/" + mode))
ckpt_loader = CheckpointLoader(saver, model.global_step, logdir + "/train")
print_debug('run_eval ckpt_loader=%s ' % (ckpt_loader.logdir))
with sess.as_default():
print_debug('run_eval sess.as_default iteration')
while ckpt_loader.load_checkpoint():
print_debug('eval load_checkpoint chunk Loader done!')
global_step = ckpt_loader.last_global_step
if mode == "eval_full":
data_iterator = dataset.iterate_forever(hps.batch_size*hps.num_gpus,hps.num_steps)
else:
data_iterator = dataset.iterate_once(hps.batch_size*hps.num_gpus,hps.num_steps)
print_debug('eval run local variables initalizer')
#tf.initialize_local_variables().run()
tf.local_variables_initializer().run()
loss_nom = 0.0
loss_den = 0.0
print_debug('eval run for loop of enumerated data iterator mode='+mode+' eval_steps='+str(num_eval_steps))
#for i, (x, y, w) in enumerate(data_iterator):
for i, (x, y) in enumerate(data_iterator):
if i >= num_eval_steps and mode!="eval_full":
break
#loss = sess.run(model.loss, {model.x: x, model.y: y, model.w: w})
loss = sess.run(model.loss, {model.x: x, model.y: y})
loss_nom += loss
loss_den += 1 # ???
#loss_den += w.mean()
loss = loss_nom / loss_den
#sys.stdout.write("%d: %.3f (%.3f) ... " % (i, loss, np.exp(loss)))
#sys.stdout.flush()
#sys.stdout.write("\n")
log_perplexity = loss_nom / loss_den
print("Results at %d: log_perplexity = %.3f perplexity = %.3f" % (
global_step, log_perplexity, np.exp(log_perplexity)))
summary = tf.Summary()
summary.value.add(tag='eval/log_perplexity', simple_value=log_perplexity)
summary.value.add(tag='eval/perplexity', simple_value=np.exp(log_perplexity))
sw.add_summary(summary, global_step)
sw.flush()
#if mode == "eval_full":
# break #we don't need to wait for other checkpoints in this mode
break #we always break
print_debug('run_eval END OF WHILE loader loop')
print_debug('run_eval END OF WHILE session loop')
sess.close()
tf.reset_default_graph()
def run_statistic(dataset, hps, logdir, ps_device, task=0, master=""):
with tf.variable_scope("model"):
print_debug('loading LM model')
model = LM(hps, "train", ps_device)
stime = time.time()
print("Current time: %s" % stime)
print("ALL VARIABLES")
for v in tf.all_variables():
print("%s %s %s %s" % (v.name, v.get_shape(), v.dtype, v.device))
print("TRAINABLE VARIABLES")
for v in tf.trainable_variables():
print("%s %s %s %s" % (v.name, v.get_shape(), v.dtype, v.device))
print("LOCAL VARIABLES")
for v in tf.local_variables():
print("%s %s %s %s" % (v.name, v.get_shape(), v.dtype, v.device))
sv = tf.train.Supervisor(is_chief=(task == 0),
logdir=logdir, # logdir=None, # logdir=logdir,
summary_op=None, # Automatic summaries don't work with placeholders.
#global_step=model.global_step,
save_summaries_secs=60 * hps.save_summary_every_min,
save_model_secs=60 * hps.save_model_every_min)
# save_summaries_secs=30,
# save_model_secs=120 * 5)
# config = tf.ConfigProto(allow_soft_placement=True,
# intra_op_parallelism_threads=2,
# inter_op_parallelism_threads=20)
config = tf.ConfigProto(allow_soft_placement=True)
close_summary_writer = False
with sv.managed_session(master, config=config, start_standard_services=True, close_summary_writer=False) as sess:
# Slowly increase the number of workers during beginning of the training.
# while not sv.should_stop() and (time.time() - stime) < hps.max_time:
# step = int(sess.run(model.global_step))
# waiting_until_step = task * hps.num_delayed_steps
# if step >= waiting_until_step:
# break
# else:
# print("Current step is %d. Waiting until: %d" % (step, waiting_until_step))
# time.sleep(20.0)
local_step = 0
prev_global_step = sess.run(model.global_step)
cur_global_step = 0
prev_time = time.time()
data_iterator = dataset.iterate_forever(hps.batch_size * hps.num_gpus, hps.num_steps)
print_debug(
'before looping model, sv.save_path=%s , sv.should_stop()=%d, (time.time() - stime)=%.2fs, hps.max_time=%.2fs ' % (
sv.save_path, sv.should_stop(), (time.time() - stime), hps.max_time))
while not sv.should_stop() and (time.time() - stime) < hps.max_time:
if (int(time.time()) - int(stime)) % 10 == 0:
print_debug(
'While In looping model, sv.should_stop()=%d, (time.time() - stime)=%.2fs, hps.max_time=%.2fs ' % (
sv.should_stop(), (time.time() - stime), hps.max_time))
fetches = [model.global_step, model.loss, model.train_op]
# Chief worker computes summaries every 100 steps.
should_compute_summary = (task == 0 and local_step % 100 == 0)
if should_compute_summary:
fetches += [model.summary_op]
# x, y, w = next(data_iterator)
x, y = next(data_iterator)
should_run_profiler = (hps.run_profiler and task == 0 and local_step % 1000 == 13)
if should_run_profiler:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# fetched = sess.run(fetches, {model.x: x, model.y: y, model.w: w},
fetched = sess.run(fetches, {model.x: x, model.y: y},
options=run_options, run_metadata=run_metadata)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
print("Running profiler")
with open(logdir + "/timeline.json", 'w') as f:
f.write(ctf)
print("Finished profiling!")
else:
# fetched = sess.run(fetches, {model.x: x, model.y: y, model.w: w})
fetched = sess.run(fetches, {model.x: x, model.y: y})
cur_global_step = fetched[0]
local_step += 1
if should_compute_summary:
# print_debug('should_compute_summary!!! BUT WE DROPED THIS MODE TO SAVE MEMORY SPACE sv.should_stop()=%d, (time.time() - stime)=%.2fs, hps.max_time=%.2fs ' %(sv.should_stop(), (time.time() - stime), hps.max_time))
sv.summary_computed(sess, fetched[-1])
if local_step < 10 or local_step % 20 == 0:
cur_time = time.time()
num_words = hps.batch_size * hps.num_gpus * hps.num_steps
wps = (cur_global_step - prev_global_step) * num_words / (cur_time - prev_time)
prev_global_step = cur_global_step
print("Iteration %d, time = %.2fs, wps = %.0f, train loss = %.4f" % (
cur_global_step, cur_time - prev_time, wps, fetched[1]))
prev_time = cur_time
# save last model
print_debug('Supervisor Begin Save after training period')
sv._saver.save(sess, sv.save_path, cur_global_step)
print_debug('Supervisor DONE Save after training period')
# close sv with close summery flag
sv.stop(None, close_summary_writer)
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource info registry."""
from googlecloudsdk.core.resource import resource_exceptions
from googlecloudsdk.core.resource import resource_transform
class ResourceInfo(object):
"""collection => resource information mapping support.
Attributes:
async_collection: The operations collection when --async is set.
collection: Memoized collection name set by Get().
cache_command: The gcloud command string that updates the URI cache.
list_format: The default list format string for resource_printer.Print().
simple_format: The --simple-list format string for resource_printer.Print().
defaults: The resource projection transform defaults.
transforms: Memoized combined transform symbols dict set by GetTransforms().
Special format values:
None: Ignore this format.
'default': calliope.base.DEFAULT_FORMAT.
'error': Resource print using this format is an error.
'none': Do not print anything.
"""
def __init__(self, async_collection=None, cache_command=None,
list_format=None, simple_format=None, defaults=None,
transforms=None):
self.collection = None # memoized by Get().
self.async_collection = async_collection
self.cache_command = cache_command
self.list_format = list_format
self.simple_format = simple_format
self.defaults = defaults
self.transforms = transforms # memoized by GetTransforms().
def GetTransforms(self):
"""Returns the combined transform symbols dict.
Returns:
The builtin transforms combined with the collection specific transforms
if any.
"""
if self.transforms:
return self.transforms
# The builtin transforms are always available.
self.transforms = resource_transform.GetTransforms()
# Check if there are any collection specific transforms.
specific_transforms = resource_transform.GetTransforms(self.collection)
if not specific_transforms:
return self.transforms
builtin_transforms = self.transforms
self.transforms = {}
self.transforms.update(builtin_transforms)
self.transforms.update(specific_transforms)
return self.transforms
RESOURCE_REGISTRY = {
# apheleia
'apheleia.projects.regions.functions': ResourceInfo(
list_format="""
table(
name,
status,
triggers.len()
)
""",
),
# appengine
'app.module_versions': ResourceInfo(
list_format="""
table(
module,
version,
format("%.2f", traffic_split)
)
""",
),
'app.versions': ResourceInfo(
list_format="""
table(
service,
id:label=VERSION,
format("{0:.2f}", traffic_split):label=TRAFFIC_SPLIT,
last_deployed_time.date("%Y-%m-%dT%H:%M:%S"):label=LAST_DEPLOYED,
version.servingStatus
)
""",
),
'app.instances': ResourceInfo(
list_format="""
table(
service:sort=1,
version:sort=2,
id:sort=3,
instance.status
)
""",
),
'app.services': ResourceInfo(
list_format="""
table(
id:label=SERVICE:sort=1,
versions.len():label=NUM_VERSIONS
)
""",
),
# autoscaler
'autoscaler.instances': ResourceInfo(
list_format="""
table(
name,
description.yesno(no="-"),
state.yesno(no="-"),
state_details.yesno(no="-")
)
""",
),
# bigquery
'bigquery.datasets': ResourceInfo(
list_format="""
table(
datasetReference.datasetId
)
""",
),
'bigquery.jobs.describe': ResourceInfo(
list_format="""
table(
job_type,
state,
start_time,
duration,
bytes_processed
)
""",
),
'bigquery.jobs.list': ResourceInfo(
list_format="""
table(
job_id,
job_type,
state,
start_time,
duration
)
""",
),
'bigquery.jobs.wait': ResourceInfo(
list_format="""
table(
job_type,
state,
start_time,
duration,
bytes_processed
)
""",
),
'bigquery.projects': ResourceInfo(
list_format="""
table(
projectReference.projectId,
friendlyName
)
""",
),
'bigquery.tables.list': ResourceInfo(
list_format="""
table(
id,
type:label=TABLE_OR_VIEW
)
""",
),
# cloud billing
'cloudbilling.billingAccounts': ResourceInfo(
cache_command='billing accounts list',
list_format="""
table(
name[16:],
displayName,
open,
)
""",
),
'cloudbilling.billingAccounts_projects': ResourceInfo(
list_format="""
table(
projectId,
billingAccountName[16:],
billingEnabled,
)
""",
),
# cloud resource manager
'cloudresourcemanager.projects': ResourceInfo(
cache_command='projects list',
list_format="""
table(
projectId,
name,
projectNumber
)
""",
),
# Cloud SDK client side resources
# 'coudsdk.*': ...
# compute
'compute.addresses': ResourceInfo(
cache_command='compute addresses list',
list_format="""
table(
name,
region.basename(),
address,
status
)
""",
),
'compute.autoscalers': ResourceInfo(
async_collection='compute.operations',
cache_command='compute autoscaler list',
list_format="""
table(
name,
target.basename(),
autoscalingPolicy.policy():label=POLICY
)
""",
),
'compute.backendBuckets': ResourceInfo(
cache_command='compute backend-buckets list',
list_format="""
table(
name,
bucketName:label=GCS_BUCKET_NAME
)
""",
),
'compute.backendService': ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol
)
""",
),
'compute.disks': ResourceInfo(
cache_command='compute disks list',
list_format="""
table(
name,
zone.basename(),
sizeGb,
type.basename(),
status
)
""",
),
'compute.diskTypes': ResourceInfo(
cache_command='compute disk-types list',
list_format="""
table(
name,
zone.basename(),
validDiskSize:label=VALID_DISK_SIZES
)
""",
),
'compute.firewalls': ResourceInfo(
cache_command='compute firewall-rules list',
list_format="""
table(
name,
network.basename(),
sourceRanges.list():label=SRC_RANGES,
allowed[].map().firewall_rule().list():label=RULES,
sourceTags.list():label=SRC_TAGS,
targetTags.list():label=TARGET_TAGS
)
""",
),
'compute.forwardingRules': ResourceInfo(
cache_command='compute forwarding-rules list',
list_format="""
table(
name,
region.basename(),
IPAddress,
IPProtocol,
target.scope()
)
""",
),
'compute.groups': ResourceInfo(
cache_command='compute groups list',
list_format="""
table(
name,
members.len():label=NUM_MEMBERS,
description
)
""",
),
'compute.httpHealthChecks': ResourceInfo(
cache_command='compute http-health-checks list',
list_format="""
table(
name,
host,
port,
requestPath
)
""",
),
'compute.httpsHealthChecks': ResourceInfo(
cache_command='compute https-health-checks list',
list_format="""
table(
name,
host,
port,
requestPath
)
""",
),
'compute.images': ResourceInfo(
cache_command='compute images list',
list_format="""
table(
name,
selfLink.map().scope(projects).segment(0):label=PROJECT,
image_alias():label=ALIAS,
deprecated.state:label=DEPRECATED,
status
)
""",
),
'compute.instanceGroups': ResourceInfo(
cache_command='compute instance-groups list',
list_format="""
table(
name,
zone.basename(),
network.basename(),
isManaged:label=MANAGED,
size:label=INSTANCES
)
""",
),
'compute.instanceGroupManagers': ResourceInfo(
cache_command='compute instance-groups managed list',
list_format="""
table(
name,
zone.basename(),
baseInstanceName,
size,
instanceGroup.basename():label=GROUP,
instanceTemplate.basename(),
autoscaled
)
""",
),
'compute.instances': ResourceInfo(
cache_command='compute instances list',
list_format="""
table(
name,
zone.basename(),
machineType.basename(),
scheduling.preemptible.yesno(yes=true, no=''),
networkInterfaces[0].networkIP:label=INTERNAL_IP,
networkInterfaces[0].accessConfigs[0].natIP:label=EXTERNAL_IP,
status
)
""",
),
'compute.instanceTemplates': ResourceInfo(
cache_command='compute instance-templates list',
list_format="""
table(
name,
properties.machineType,
properties.scheduling.preemptible.yesno(yes=true, no=''),
creationTimestamp
)
""",
),
'compute.machineTypes': ResourceInfo(
cache_command='compute machine-types list',
list_format="""
table(
name,
zone.basename(),
guestCpus:label=CPUS,
memoryMb.size(units_in=MiB, units_out=GiB):label=MEMORY_GB,
deprecated.state:label=DEPRECATED
)
""",
),
'compute.networks': ResourceInfo(
cache_command='compute networks list',
list_format="""
table(
name,
IPv4Range:label=IPV4_RANGE,
gatewayIPv4
)
""",
),
'compute.operations': ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
targetLink.scope():label=TARGET,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
)
""",
),
'compute.projects': ResourceInfo(
list_format="""
value(
format("There is no API support yet.")
)
""",
),
'compute.regions': ResourceInfo(
cache_command='compute regions list',
list_format="""
table(
name,
quotas.metric.CPUS.quota():label=CPUS,
quotas.metric.DISKS_TOTAL_GB.quota():label=DISKS_GB,
quotas.metric.IN_USE_ADDRESSES.quota():label=ADDRESSES,
quotas.metric.STATIC_ADDRESSES.quota():label=RESERVED_ADDRESSES,
status():label=STATUS,
deprecated.deleted:label=TURNDOWN_DATE
)
""",
),
'compute.replicaPools': ResourceInfo(
list_format="""
table(
name,
currentNumReplicas
)
""",
),
'compute.replicaPoolsReplicas': ResourceInfo(
list_format="""
table(
name,
status.templateVersion,
status.state:label=STATUS
)
""",
),
'compute.resourceViews': ResourceInfo(
list_format="""
value(
uri()
)
""",
),
'compute.resourceViewsResources': ResourceInfo(
list_format="""
value(
uri()
)
""",
),
'compute.routes': ResourceInfo(
cache_command='compute routes list',
list_format="""
table(
name,
network.basename(),
destRange,
firstof(nextHopInstance, nextHopGateway, nextHopIp).scope()
:label=NEXT_HOP,
priority
)
""",
),
'compute.snapshots': ResourceInfo(
cache_command='compute snapshts list',
list_format="""
table(
name,
diskSizeGb,
sourceDisk.scope():label=SRC_DISK,
status
)
""",
),
'compute.sslCertificates': ResourceInfo(
cache_command='compute ssl-certificates list',
list_format="""
table(
name,
creationTimestamp
)
""",
),
'compute.targetHttpProxies': ResourceInfo(
cache_command='compute target-http-proxies list',
list_format="""
table(
name,
urlMap.basename()
)
""",
),
'compute.targetHttpsProxies': ResourceInfo(
cache_command='compute target-https-proxies list',
list_format="""
table(
name,
sslCertificates.map().basename().list():label=SSL_CERTIFICATES,
urlMap.basename()
)
""",
),
'compute.targetInstances': ResourceInfo(
cache_command='compute target-instances list',
list_format="""
table(
name,
zone.basename(),
instance.basename(),
natPolicy
)
""",
),
'compute.targetPools': ResourceInfo(
cache_command='compute pools list',
list_format="""
table(
name,
region.basename(),
sessionAffinity,
backupPool.basename():label=BACKUP,
healthChecks[].map().basename().list():label=HEALTH_CHECKS
)
""",
),
'compute.targetVpnGateways': ResourceInfo(
cache_command='compute vpn-gateways list',
list_format="""
table(
name,
network.basename(),
region.basename()
)
""",
),
'compute.urlMaps': ResourceInfo(
cache_command='compute url-maps list',
list_format="""
table(
name,
defaultService.basename()
)
""",
),
'compute.users': ResourceInfo(
cache_command='compute users list',
list_format="""
table(
name,
owner,
description
)
""",
),
'compute.vpnTunnels': ResourceInfo(
cache_command='compute vpn-tunnels list',
list_format="""
table(
name,
region.basename(),
targetVpnGateway.basename():label=GATEWAY,
peerIp:label=PEER_ADDRESS
)
""",
),
'compute.zones': ResourceInfo(
cache_command='compute zones list',
list_format="""
table(
name,
region.basename(),
status():label=STATUS,
maintenanceWindows.next_maintenance():label=NEXT_MAINTENANCE,
deprecated.deleted:label=TURNDOWN_DATE
)
""",
),
# container
'container.projects.zones.clusters': ResourceInfo(
list_format="""
table(
name,
zone,
clusterApiVersion,
endpoint:label=MASTER_IP,
machineType,
sourceImage,
numNodes:label=NODES,
status
)
""",
),
'container.projects.zones.nodePools': ResourceInfo(
list_format="""
table(
name,
zone,
machineType,
diskSizeGb,
version
)
""",
),
'container.projects.zones.operations': ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
zone,
target,
status,
errorMessage
)
""",
),
# dataflow
'dataflow.jobs': ResourceInfo(
list_format="""
table(
job_id:label=ID,
job_name:label=NAME,
job_type:label=TYPE,
creation_time.yesno(no="-"),
status
)
""",
),
# dataproc
'dataproc.clusters': ResourceInfo(
list_format="""
table(
clusterName:label=NAME,
configuration.numWorkers:label=WORKER_COUNT,
status.state:label=STATUS,
configuration.gceClusterConfiguration.zoneUri.zone()
)
""",
),
'dataproc.jobs': ResourceInfo(
async_collection='dataproc.operations',
list_format="""
table(
reference.jobId,
type.yesno(no="-"),
status.state:label=STATUS
)
""",
),
'dataproc.operations': ResourceInfo(
list_format="""
table(
name:label=OPERATION_NAME,
done
)
""",
),
# deployment manager v2
'deploymentmanager.deployments': ResourceInfo(
async_collection='deployments.operations',
list_format="""
table(
name,
operation.operationType:label=LAST_OPERATION_TYPE,
operation.status,
description,
manifest.basename(),
update.errors.group(code, message)
)
""",
simple_format="""
[legend-log=status,
empty-legend="No Deployments were found in your project!"]
value(
name
)
""",
),
'deploymentmanager.operations': ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
status,
targetLink.basename():label=TARGET,
error.errors.group(code, message)
)
""",
simple_format="""
[legend-log=status,
empty-legend="No Operations were found in your project!"]
value(
name
)
""",
),
'deploymentmanager.resources': ResourceInfo(
list_format="""
table(
name,
operationType,
status.yesno(no="COMPLETED"):label=UPDATE_STATE,
update.error.errors.group(code, message)
)
""",
simple_format="""
[legend-log=status,
empty-legend="No Resources were found in your deployment!"]
value(
name
)
""",
),
# dns
'dns.changes': ResourceInfo(
list_format="""
table(
id,
startTime,
status
)
""",
),
'dns.managedZones': ResourceInfo(
cache_command='dns managed-zones list',
list_format="""
table(
name,
dnsName,
description
)
""",
),
'dns.resourceRecordSets': ResourceInfo(
list_format="""
table(
name,
type,
ttl,
rrdatas.list():label=DATA
)
""",
),
# genomics
'genomics.datasets': ResourceInfo(
list_format="""
table(
id,
name
)
""",
),
# logging
'logging.logs': ResourceInfo(
list_format="""
table(
name
)
""",
),
'logging.sinks': ResourceInfo(
list_format="""
table(
name,
destination
)
""",
),
'logging.metrics': ResourceInfo(
list_format="""
table(
name,
description,
filter
)
""",
),
'logging.typedSinks': ResourceInfo(
list_format="""
table(
name,
destination,
type
)
""",
),
# projects
'developerprojects.projects': ResourceInfo(
list_format="""
table(
projectId,
title,
projectNumber
)
""",
),
# service management (inception)
'servicemanagement-v1.services': ResourceInfo(
list_format="""
table(
serviceName:label=NAME,
serviceConfig.title
)
""",
simple_format="""
value(
serviceName
)
""",
),
# source
'source.jobs.list': ResourceInfo(
list_format="""
table(
name.YesNo(no="default"):label=REPO_NAME,
projectId,
vcs,
state,
createTime
)
""",
),
# sql
'sql.backupRuns': ResourceInfo(
list_format="""
table(
dueTime.iso(),
error.code.yesno(no="-"):label=ERROR,
status
)
""",
),
'sql.backupRuns.v1beta4': ResourceInfo(
list_format="""
table(
id,
windowStartTime.iso(),
error.code.yesno(no="-"):label=ERROR,
status
)
""",
),
'sql.flags': ResourceInfo(
list_format="""
table(
name,
type,
allowedStringValues.list():label=ALLOWED_VALUES
)
""",
),
'sql.instances': ResourceInfo(
async_collection='sql.operations',
cache_command='sql instances list',
list_format="""
table(
instance:label=NAME,
region,
settings.tier,
ipAddresses[0].ipAddress.yesno(no="-"):label=ADDRESS,
state:label=STATUS
)
""",
),
'sql.instances.v1beta4': ResourceInfo(
async_collection='sql.operations.v1beta4',
cache_command='sql instances list',
list_format="""
table(
name,
region,
settings.tier,
ipAddresses[0].ipAddress.yesno(no="-"):label=ADDRESS,
state:label=STATUS
)
""",
),
'sql.operations': ResourceInfo(
list_format="""
table(
operation,
operationType:label=TYPE,
startTime.iso():label=START,
endTime.iso():label=END,
error[0].code.yesno(no="-"):label=ERROR,
state:label=STATUS
)
""",
),
'sql.operations.v1beta4': ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
startTime.iso():label=START,
endTime.iso():label=END,
error[0].code.yesno(no="-"):label=ERROR,
state:label=STATUS
)
""",
),
'sql.sslCerts': ResourceInfo(
async_collection='sql.operations',
list_format="""
table(
commonName:label=NAME,
sha1Fingerprint,
expirationTime.yesno(no="-"):label=EXPIRATION
)
""",
),
'sql.tiers': ResourceInfo(
list_format="""
table(
tier,
region.list():label=AVAILABLE_REGIONS,
RAM.size(),
DiskQuota.size():label=DISK
)
""",
),
# test
'test.android.devices': ResourceInfo(
list_format="""
table[box](
id:label=DEVICE_ID,
manufacturer:label=MAKE,
name:label=MODEL_NAME,
form.color(blue=VIRTUAL,yellow=PHYSICAL),
format("{0:4} x {1}", screenY, screenX):label=RESOLUTION,
supportedVersionIds.list("none"):label=OS_VERSION_IDS,
tags.list().color(green=default,red=deprecated,yellow=preview)
)
""",
),
'test.android.versions': ResourceInfo(
list_format="""
table[box](
id:label=OS_VERSION_ID:align=center,
versionString:label=VERSION:align=center,
codeName,
apiLevel:align=center,
releaseDate.date(format='%Y-%m-%d'):align=center,
tags.list().color(green=default,red=deprecated,yellow=preview)
)
""",
),
'test.android.locales': ResourceInfo(
list_format="""
table[box](
id:label=LOCALE,
name,
region,
tags.list().color(green=default,red=deprecated,yellow=preview)
)
""",
),
'test.android.run.outcomes': ResourceInfo(
async_collection='test.android.run.url',
list_format="""
table[box](
outcome.color(red=Fail, green=Pass, yellow=Inconclusive),
axis_value:label=TEST_AXIS_VALUE,
test_details:label=TEST_DETAILS
)
""",
),
'test.android.run.url': ResourceInfo(
list_format="""
value(format(
'Final test results will be available at [{0}].', [])
)
""",
),
'test.web.browsers': ResourceInfo(
list_format="""
table[box](
id:label=BROWSER_ID,
name,
release,
versionString:label=VERSION,
androidCatalog.yesno("*", "-"),
linuxCatalog.yesno("*", "-"),
windowsCatalog.yesno("*", "-")
)
""",
),
# updater
'replicapoolupdater.rollingUpdates': ResourceInfo(
list_format="""
table(
id,
instanceGroupManager.basename():label=GROUP_NAME,
instanceTemplate.basename("-"):label=TEMPLATE_NAME,
status,
statusMessage
)
""",
),
'replicapoolupdater.rollingUpdates.instanceUpdates': ResourceInfo(
list_format="""
table(
instance.basename():label=INSTANCE_NAME,
status
)
""",
),
# generic
'uri': ResourceInfo(
list_format="""
table(
uri():sort=101:label=""
)
""",
),
}
def Get(collection, must_be_registered=True):
"""Returns the ResourceInfo for collection or None if not registered.
Args:
collection: The resource collection.
must_be_registered: Raises exception if True, otherwise returns None.
Raises:
UnregisteredCollectionError: If collection is not registered and
must_be_registered is True.
Returns:
The ResourceInfo for collection or None if not registered.
"""
info = RESOURCE_REGISTRY.get(collection, None)
if not info:
if not must_be_registered:
return None
raise resource_exceptions.UnregisteredCollectionError(
'Collection [{0}] is not registered.'.format(collection))
info.collection = collection
return info
|
|
# -*- coding: utf-8 -*-
#This code is adaped from
# https://github.com/dfm/corner.py
# git hash 5c2cd63 on May 25
# Modifications by John Baker NASA-GSFC (2016-18)
#Copyright (c) 2013-2016 Daniel Foreman-Mackey
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
from __future__ import print_function, absolute_import
import logging
import math
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import ScalarFormatter
from matplotlib.patches import Ellipse
try:
from scipy.ndimage import gaussian_filter
except ImportError:
gaussian_filter = None
__all__ = ["corner", "hist2d", "quantile"]
def corner(xs, bins=20, range=None, weights=None, cov=None, color="k",
smooth=None, smooth1d=None,
labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color="#4682b4",
scale_hist=False, quantiles=None, verbose=False, fig=None,
max_n_ticks=5, top_ticks=False, use_math_text=False,
hist_kwargs=None, **hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like[nsamples, ndim]
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like[ndim,]
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like[nsamples,]
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str
A ``matplotlib`` style color for all histograms.
smooth, smooth1d : float
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool
If true, print the values of the computed quantiles.
plot_contours : bool
Draw contours for dense regions of the plot.
use_math_text : bool
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
max_n_ticks: int
Maximum number of ticks to try to use
top_ticks : bool
If true, label the top ticks of each axis
fig : matplotlib.Figure
Overplot onto the provided figure object.
hist_kwargs : dict
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots.
"""
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
# Try filling in labels from pandas.DataFrame columns.
if labels is None:
try:
labels = xs.columns
except AttributeError:
pass
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
# Parse the weight array.
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("Weights must be 1-D")
if xs.shape[1] != weights.shape[0]:
raise ValueError("Lengths of weights must match number of samples")
# Parse the parameter ranges.
if range is None:
if "extents" in hist2d_kwargs:
logging.warn("Deprecated keyword argument 'extents'. "
"Use 'range' instead.")
range = hist2d_kwargs.pop("extents")
else:
range = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in range], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in "
"column(s) {0} have no dynamic range. "
"Please provide a `range` argument.")
.format(", ".join(map(
"{0}".format, np.arange(len(m))[m]))))
else:
# If any of the extents are percentiles, convert them to ranges.
# Also make sure it's a normal list.
range = list(range)
for i, _ in enumerate(range):
try:
emin, emax = range[i]
except TypeError:
q = [0.5 - 0.5*range[i], 0.5 + 0.5*range[i]]
range[i] = quantile(xs[i], q, weights=weights)
if len(range) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
try:
bins = [int(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
# Some magic numbers for pretty axis layout.
K = len(xs)
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# Create a new figure if one wasn't provided.
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim))
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
#idea is to pass in covariance, otherwise concoct something from the 1-sigma range.
if(cov==[]):
print("concocting covar elements from 1-sigma ranges")
cov=np.zeros((K,K))
for k in np.arange(K):
q_16, q_50, q_84 = quantile(xs[k], [0.16, 0.5, 0.84],weights=weights)
deltax=(q_84-q_16)/2.0
cov[k,k]=deltax**2
print("cov=",cov)
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Set up the default histogram keywords.
if hist_kwargs is None:
hist_kwargs = dict()
hist_kwargs["color"] = hist_kwargs.get("color", color)
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, i]
#This is to normalize the histogram so that different data can be compared
if(weights is None):
hist1d_wts=[1.0/len(x) for w in x]
else:
hist1d_wts=[w*1.0/len(x) for w in weights]
# Plot the histograms.
if smooth1d is None:
n, _, _ = ax.hist(x, bins=bins[i], weights=hist1d_wts,
range=np.sort(range[i]), **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, b = np.histogram(x, bins=bins[i], weights=hist1d_wts,
range=np.sort(range[i]))
n = gaussian_filter(n, smooth1d)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
ax.plot(x0, y0, **hist_kwargs)
if truths is not None and truths[i] is not None:
ax.axvline(truths[i], color=truth_color)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
if show_titles:
title = None
if title_fmt is not None:
# Compute the quantiles for the title. This might redo
# unneeded computation but who cares.
q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
weights=weights)
q_m, q_p = q_50-q_16, q_84-q_50
# Format the quantile display.
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))
# Add in the column name if it's given.
if labels is not None:
title = "{0} = {1}".format(labels[i], title)
elif labels is not None:
title = "{0}".format(labels[i])
if title is not None:
ax.set_title(title, **title_kwargs)
# Set up the axes.
ax.set_xlim(range[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(y, x, ax=ax, range=[range[j], range[i]], weights=weights,
color=color, smooth=smooth, bins=[bins[j], bins[i]],
**hist2d_kwargs)
#center
cx=truths[j]#need to add checking for availability of truths?
cy=truths[i]
#add covariance ellipses
if(cov is not None):
#ang=math.acos(cov[0,1]/math.sqrt(cov[0,0]*cov[1,1]))*180/math.pi
#print (j,i,labels[j],labels[i],"center=",cx,cy)
#add an error ellipse
N_thetas=60
dtheta=2.0*math.pi/(N_thetas-1)
thetas=np.arange(0,(2.0*math.pi+dtheta),dtheta)
#Cplus=(cov[i,i]+cov[j,j])/2.0
#Cminus=(-cov[i,i]+cov[j,j])/2.0
#print("cov[ii],cov[ij],cov[jj],Cplus,Cminus:",cov[i,i],cov[i,j],cov[j,j],Cplus,Cminus)
ang=-math.pi/4.
root=cov[i,j]/math.sqrt(cov[i,i]*cov[j,j])
if(root>1):root=1
if(root<-1):root=-1
acoeff=math.sqrt(1-root)
bcoeff=math.sqrt(1+root)
xcoeff=math.sqrt(cov[j,j])
ycoeff=math.sqrt(cov[i,i])
#print("a2,b2",acoeff*acoeff,bcoeff*bcoeff)
#print("a,b,ang, xcoeff,ycoeff, root=",acoeff,bcoeff,ang,xcoeff,ycoeff,root)
if "levels" in hist2d_kwargs:
levels= hist2d_kwargs["levels"]
else:
levels== 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
for xlev in levels:
#in the next line we convert the credibility limit
#to a "sigma" limit for a 2-d normal
#this becomes a scale-factor for the error ellipse
#1-exp(x^2/(-2)=y
#-2*log(1-y)=x^2
lev_fac = math.sqrt( -2 * math.log( 1 - xlev ) )
#print ("scales for quantile level = ",xlev," -> ",lev_fac,": (",xcoeff*lev_fac,",",ycoeff*lev_fac,")")
elxs=[cx+lev_fac*xcoeff*(acoeff*math.cos(th)*math.cos(ang)-bcoeff*math.sin(th)*math.sin(ang)) for th in thetas]
elys=[cy+lev_fac*ycoeff*(acoeff*math.cos(th)*math.sin(ang)+bcoeff*math.sin(th)*math.cos(ang)) for th in thetas]
ax.plot(elxs,elys,color='r')
ax.grid()
if truths is not None:
if truths[i] is not None and truths[j] is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color)
if truths[j] is not None:
ax.axvline(truths[j], color=truth_color)
if truths[i] is not None:
ax.axhline(truths[i], color=truth_color)
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# use MathText for axes ticks
ax.yaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
return fig
def quantile(x, q, weights=None):
"""
Compute sample quantiles with support for weighted samples.
Note
----
When ``weights`` is ``None``, this method simply calls numpy's percentile
function with the values of ``q`` multiplied by 100.
Parameters
----------
x : array_like[nsamples,]
The samples.
q : array_like[nquantiles,]
The list of quantiles to compute. These should all be in the range
``[0, 1]``.
weights : Optional[array_like[nsamples,]]
An optional weight corresponding to each sample. These
Returns
-------
quantiles : array_like[nquantiles,]
The sample quantiles computed at ``q``.
Raises
------
ValueError
For invalid quantiles; ``q`` not in ``[0, 1]`` or dimension mismatch
between ``x`` and ``weights``.
"""
x = np.atleast_1d(x)
q = np.atleast_1d(q)
if np.any(q < 0.0) or np.any(q > 1.0):
raise ValueError("Quantiles must be between 0 and 1")
if weights is None:
return np.percentile(x, 100.0 * q)
else:
weights = np.atleast_1d(weights)
if len(x) != len(weights):
raise ValueError("Dimension mismatch: len(weights) != len(x)")
idx = np.argsort(x)
sw = weights[idx]
cdf = np.cumsum(sw)[:-1]
cdf /= cdf[-1]
cdf = np.append(0, cdf)
return np.interp(q, cdf, x[idx]).tolist()
def hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,
ax=None, color=None, plot_datapoints=True, plot_density=True,
plot_contours=True, no_fill_contours=False, fill_contours=False,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
**kwargs):
"""
Plot a 2-D histogram of samples.
Parameters
----------
x : array_like[nsamples,]
The samples.
y : array_like[nsamples,]
The samples.
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool
Draw the individual data points.
plot_density : bool
Draw the density colormap.
plot_contours : bool
Draw the contours.
no_fill_contours : bool
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool
Fill the contours.
contour_kwargs : dict
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
"""
if ax is None:
ax = pl.gca()
# Set the default range based on the data range if not provided.
if range is None:
if "extent" in kwargs:
logging.warn("Deprecated keyword argument 'extent'. "
"Use 'range' instead.")
range = kwargs["extent"]
else:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)])
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels)+1)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=list(map(np.sort, range)),
weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument.")
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = np.diff(V) == 0
if np.any(m):
logging.warning("Too few points to create valid contours")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
])
Y2 = np.concatenate([
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
])
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],
cmap=white_cmap, antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
False)
ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(range[0])
ax.set_ylim(range[1])
|
|
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
print(len(cw154))
print(len(trito))
totalfiles = normalB + mcell + pcell + cd19cell + cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr20"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG",
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("total_chrom20.phy", header=None, index=None)
print(tott.shape)
|
|
#bjShuffleStart.py
#mcvan
#16/Sept/14
from random import *
import pygame
DECKS = 1
playerHand = []
dealerHand = []
dealerScore = 0
playerScore = 0
doReset = False
CARDPOINTS = {'AD': 11, 'AH': 11, 'AC': 11, 'AS': 11,
'KD': 10, 'KH': 10, 'KC': 10, 'KS': 10,
'QD': 10, 'QH': 10, 'QC': 10, 'QS': 10,
'JD': 10, 'JH': 10, 'JC': 10, 'JS': 10,
'2D': 2, '2H': 2, '2C': 2, '2S': 2,
'3D': 3, '3H': 3, '3C': 3, '3S': 3,
'4D': 4, '4H': 4, '4C': 4, '4S': 4,
'5D': 5, '5H': 5, '5C': 5, '5S': 5,
'6D': 6, '6H': 6, '6C': 6, '6S': 6,
'7D': 7, '7H': 7, '7C': 7, '7S': 7,
'8D': 8, '8H': 8, '8C': 8, '8S': 8,
'9D': 9, '9H': 9, '9C': 9, '9S': 9,
'10D': 10, '10H': 10, '10C': 10, '10S':10}
def cardList():
cards = ['AD', 'AH', 'AC', 'AS',
'KD', 'KH', 'KC', 'KS',
'QD', 'QH', 'QC', 'QS',
'JD', 'JH', 'JC', 'JS',
'2D', '2H', '2C', '2S',
'3D', '3H', '3C', '3S',
'4D', '4H', '4C', '4S',
'5D', '5H', '5C', '5S',
'6D', '6H', '6C', '6S',
'7D', '7H', '7C', '7S',
'8D', '8H', '8C', '8S',
'9D', '9H', '9C', '9S',
'10D', '10H', '10C', '10S'] * DECKS
NUMCARDS = len(cards)
shuffled_cards = []
for i in range(NUMCARDS):
cardCount = randrange(0,(NUMCARDS - i))
shuffled_cards.append(cards[cardCount])
del cards[cardCount]
return shuffled_cards
shuffledCards = cardList()
def startHandPlayer(shuffledDeck):
playersCards = []
playersScore = 0
newCard = shuffledDeck.pop(0)
playersCards.append(newCard)
playersScore += CARDPOINTS[newCard]
newCard = shuffledDeck.pop(0)
playersCards.append(newCard)
playersScore += CARDPOINTS[newCard]
#playersScore = checkAces(playersScore, playersCards)
print("Player's cards: ", playersCards, " Player's hand score ", playersScore)
return (playersScore, playersCards)
def ace(score):
if score == 10:
return 11
if score > 10:
return 1
else:
return 11
def score(hand):
score = 0
for card in hand:
cardpoint = CARDPOINTS.get(card)
if cardpoint == 11:
score += ace(score)
else:
score += cardpoint
return score
def startPlayer(deck):
hit(deck)
hit(deck)
addCard(dealerHand,shuffledCards)
addCard(dealerHand,shuffledCards)
def getCard(tempDeck):
card = choice(tempDeck)
# tempDeck.remove(choice)
return card
def addCard(hand, tempDeck):
hand.append(getCard(tempDeck))
def hit(deck):
addCard(playerHand,deck)
score(playerHand)
score(dealerHand)
def dealerTurn(deck):
addCard(dealerHand,deck)
def checkWin(screen):
dealerScore = score(dealerHand)
playerScore = score(playerHand)
doReset = True
if playerScore > 21:
screen.blit(lose,(100,300))
print "You lose"
elif dealerScore > 21:
screen.blit(win,(100,300))
print "You win"
elif dealerScore > playerScore and dealerScore < 21:
screen.blit(lose,(100,300))
print "You lose"
elif dealerScore == 21:
screen.blit(win, (100,300))
print "Everyone Wins"
#graphics
pygame.display.set_caption("Get to the red square!")
screen = pygame.display.set_mode((1000, 600))
startPlayer(shuffledCards)
while True:
pygame.init()
gerg = pygame.font.SysFont("comicsansms", 72)
small = pygame.font.SysFont("comicsansms", 14)
win = gerg.render("You Win!", 1, (255,255,255))
lose = gerg.render("You Lose",1, (255,255,255))
ins = small.render("'h' to hold and 't' to take/hit. Once you lose or win hit 'r' to reset", 1, (255,255,255))
reset = gerg.render("Please press r to reset",1,(255,255,255))
screen.fill((255, 0, 0))
for e in pygame.event.get():
if e.type == pygame.QUIT:
running = False
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
running = False
if (e.type==pygame.KEYDOWN):
if e.key==pygame.K_h:
if dealerScore < 17:
addCard(dealerHand,shuffledCards)
print dealerHand
print playerHand
checkWin(screen)
if e.key==pygame.K_t:
hit(shuffledCards)
playerScore = score(playerHand)
if playerScore < 21:
addCard(dealerHand,shuffledCards)
print playerHand
print dealerHand
checkWin(screen)
if e.key==pygame.K_r:
startPlayer(shuffledCards)
playerScore = 0
dealerScore = 0
playerHand = []
dealerHand = []
startPlayer(shuffledCards)
#if e.key==pygame.K_s:
# startPlayer(shuffledCards)
# print playerHand
# print dealerHand
# print score(playerHand)
# if playerScore == 21
while doReset is True:
screen.blit(reset,(100,300))
# cardPlayer = pygame.image.load(("card/"+dealerHand[0]+"png"),10,10)
# cardDealer = pygame.image.load("dealer.jpg")
# screen.blit(lose,(400,400))
# screen.blit(cardPlayer, (10,10))
#sets up where the cards are displayed on screen
screen.blit(ins, (10,500))
x=0
for card in playerHand:
x+=1
temp = pygame.image.load("card/"+card+".png")
screen.blit(temp, (72*x,10))
y=0
for card in dealerHand:
y+=1
temp = pygame.image.load("card/"+card+".png")
screen.blit(temp, (72*y,200))
# Move the player if an arrow key is pressed
key = pygame.key.get_pressed()
# Draw the scene
pygame.display.flip()
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import time
import zipfile
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
"""
def __init__(self, dag, pickle_id=None):
"""
:param dag: the DAG
:type dag: DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
self._dag_id = dag.dag_id
self._task_ids = [task.task_id for task in dag.tasks]
self._full_filepath = dag.full_filepath
self._is_paused = dag.is_paused
self._concurrency = dag.concurrency
self._pickle_id = pickle_id
self._task_special_args = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if len(special_args) > 0:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self):
return self._task_special_args
def get_task_special_arg(self, task_id, special_arg_name):
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type: list(SimpleDag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def list_py_file_paths(directory, safe_mode=True):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
patterns = patterns + [p for p in f.read().split('\n') if p]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(re.search(p, os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as f:
content = f.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
return file_paths
class AbstractDagFileProcessor(object):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
__metaclass__ = ABCMeta
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: list[SimpleDag]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:type _file_path_queue: list[unicode]
:type _processors: dict[unicode, AbstractDagFileProcessor]
:type _last_runtime: dict[unicode, float]
:type _last_finish_time: dict[unicode, datetime]
"""
def __init__(self,
dag_directory,
file_paths,
parallelism,
process_file_interval,
max_runs,
processor_factory):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param parallelism: maximum number of simultaneous process to run at once
:type parallelism: int
:param process_file_interval: process a file at most once every this
many seconds
:type process_file_interval: float
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:type process_file_interval: float
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode) -> (AbstractDagFileProcessor)
"""
self._file_paths = file_paths
self._file_path_queue = []
self._parallelism = parallelism
self._dag_directory = dag_directory
self._max_runs = max_runs
self._process_file_interval = process_file_interval
self._processor_factory = processor_factory
# Map from file path to the processor
self._processors = {}
# Map from file path to the last runtime
self._last_runtime = {}
# Map from file path to the last finish time
self._last_finish_time = {}
# Map from file path to the number of runs
self._run_count = defaultdict(int)
# Scheduler heartbeat key.
self._heart_beat_key = 'heart-beat'
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_runtime(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the current runtime (in seconds) of the process that's
processing the specified file or None if the file is not currently
being processed
"""
if file_path in self._processors:
return (timezone.utcnow() - self._processors[file_path].start_time)\
.total_seconds()
return None
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
return self._last_runtime.get(file_path)
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
return self._last_finish_time.get(file_path)
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
processor.terminate()
self._processors = filtered_processors
def processing_count(self):
"""
:return: the number of files currently being processed
:rtype: int
"""
return len(self._processors)
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1)
def heartbeat(self):
"""
This should be periodically called by the scheduler. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[SimpleDag]
"""
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
now = timezone.utcnow()
finished_processors[file_path] = processor
self._last_runtime[file_path] = (now -
processor.start_time).total_seconds()
self._last_finish_time[file_path] = now
self._run_count[file_path] += 1
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s scheduler processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result:
simple_dags.append(simple_dag)
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._process_file_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, num_runs in self._run_count.items()
if num_runs == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path)
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update scheduler heartbeat count.
self._run_count[self._heart_beat_key] += 1
return simple_dags
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for file_path in self._file_paths:
if self._run_count[file_path] < self._max_runs:
return False
if self._run_count[self._heart_beat_key] < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
processor.terminate()
|
|
from __future__ import print_function
import os
import subprocess
import tempfile
import shutil
import logging
from nose.tools import assert_equal, assert_true, assert_false
ENVIRONMENT_CORRECT = False
try:
from dls_ade import vcs_git
from dls_ade import Server
from dls_ade import bytes_to_string
except ImportError as e:
print(e)
vcs_git = None
Server = None
raise ImportError("PYTHONPATH must contain the dls_ade package")
def check_environment():
"""Checks that the environment has been set up correctly for testing."""
# Make sure env var is set.(PYTHONPATH must also be set, but cannot
# easily test it is correct)
global ENVIRONMENT_CORRECT
if ENVIRONMENT_CORRECT:
return
try:
os.environ['GIT_ROOT_DIR']
except KeyError:
raise EnvironmentError("GIT_ROOT_DIR must be set")
ENVIRONMENT_CORRECT = True
class SystemTestingError(Exception):
"""Class for exceptions relating to system_testing module."""
pass
class SettingsError(SystemTestingError):
"""Class for exceptions relating to invalid settings"""
pass
class TempdirError(SystemTestingError):
"""Class for exceptions relating to issues with temporary directories."""
pass
class GitRootDirError(SystemTestingError):
"""Class for exceptions raised when GIT_ROOT_DIR is not set."""
def __init__(self):
err_message = "Cannot call functions if GIT_ROOT_DIR not set."
super(GitRootDirError, self).__init__(err_message)
def get_local_temp_clone(server_repo_path):
"""Obtain the root directory for a temporary clone of the given repository.
Args:
server_repo_path: The repository path for the server.
Returns:
str: The root directory of the cloned server repository.
This will always be located in a temporary folder.
Raises:
:class:`~dls_ade.exceptions.VCSGitError`: From \
:func:`dls_ade,vcs_git.temp_clone`.
"""
logging.debug("Cloning server repo path: " + server_repo_path)
server = Server()
repo = server.temp_clone(server_repo_path).repo
tempdir = repo.working_tree_dir
logging.debug("Server repo path cloned to: " + tempdir)
return tempdir
def delete_temp_repo(local_repo_path):
"""Delete a repository in a temporary directory.
Args:
local_repo_path: The path to the temporary directory.
Raises:
:class:`.TempdirError`: If the path given is not a temporary folder.
:class:`.TempdirError`: If the path given is not for a git repository.
"""
if not os.path.realpath(local_repo_path).startswith(tempfile.gettempdir()):
err_message = ("{local_repo_path:s} is not a temporary folder, cannot "
"delete.")
raise TempdirError(err_message.format(
local_repo_path=local_repo_path))
if not vcs_git.is_local_repo_root(local_repo_path):
err_message = ("{local_repo_path:s} is not a git root directory, "
"cannot delete.")
raise TempdirError(err_message.format(
local_repo_path=local_repo_path))
shutil.rmtree(local_repo_path)
def check_if_repos_equal(path_1, path_2):
"""Check if the two local paths given are equivalent.
This involves all files and folders (plus names) being identical. The
names of the folders themselves are ignored.
The `.git` folder is ignored, as it is different even for a cloned
repository. The `.gitattributes` file is also ignored.
Args:
path_1: The first path for comparison.
path_2: The second path for comparison.
Returns:
bool: True if the directories are equal, False otherwise.
Raises:
:class:`.SettingsError`: If either of the two paths are blank.
:class:`subprocess.CalledProcessError`: If there is an unexpected \
error in :func:`subprocess.check_output`.
"""
if not (path_1 and path_2):
err_message = ("Two paths must be given to compare folders.\n"
"path 1: {path_1:s}, path 2: {path_2:s}.")
raise SettingsError(err_message.format(path_1=path_1, path_2=path_2))
# .keep files allow git to store otherwise empty folders.
command_format = ("diff -rq --exclude=.git --exclude=.gitattributes "
"--exclude=.keep {path1:s} {path2:s}")
call_args = command_format.format(path1=path_1, path2=path_2).split()
logging.debug("Testing folders for equality.")
logging.debug("Comparison path one: " + path_1)
logging.debug("Comparison path two: " + path_2)
try:
subprocess.check_output(call_args)
except subprocess.CalledProcessError as e:
logging.debug(e.output)
if e.returncode == 1: # Indicates files are different.
return False
else:
raise
return True
class SystemTest(object):
"""Class for the automatic generation of system tests using nosetests.
Attributes:
_script: The script to be tested.
description: The test description as used by nosetests.
_std_out: The standard output of the script called.
_std_err: The standard error of the script called.
_return_code: The return code of the script called.
_server_repo_clone_path: The path to a clone of `_server_repo_path`.
_default_server_repo_path: A server path pointing to a default repo.
This is used to set `_server_repo_path` to a default state.
_exception_type: The exception type to test for in standard error.
_exception_string: The exception string to test for in standard error.
_std_out_compare_string: The string for standard output comparisons.
_std_out_starts_with_string: Standard output 'startswith' check string.
_std_out_ends_with_string: Standard output 'endswith' check string.
_arguments: A string containing the arguments for the given script.
_input: The input to be sent to the script while it's running.
_attributes_dict: A dictionary of all git attributes to check for.
_local_repo_path: A local path, used for attribute checking.
_repo_comp_method: Specifies how standard output is tested.
This can be: 'local_comp' to test `_local_comp_path_one` against
`_local_comp_path_two`. 'server_comp' to test
`_local_comp_path_one` against `_server_repo_clone_path` (cloned
from `_server_repo_path`) or 'all_comp' to compare all three paths
against each other.
_local_comp_path_one: A local path used for directory comparisons.
_local_comp_path_two: A local path used for directory comparisons.
_server_repo_path: The remote repository path.
This is used for both git attribute checking as well as directory
comparisons (after being cloned to `_server_repo_clone_path`)
_branch_name: The name of the repository branch.
This is used for checking that the given `_local_repo_path` is on
the given branch, as well as changing `_server_repo_clone_path`'s
branch.
_settings_list: A list of all attributes that may be changed.
Raises:
:class:`.SettingsError`: Indicates issues with given settings.
:class:`.TempdirError`: Indicates problem when acting on a temporary \
directory.
:class:`dls_ade.exceptions.VCSGitError`: Indicates error in this \
class or in the settings dict.
:class:`AssertionError`: Indicates a failure of the script being \
tested.
:class:`.GitRootDirError`: Indicates if GIT_ROOT_DIR is not set.
"""
def __init__(self, script, description):
check_environment()
self.server = Server()
self._script = script
self.description = description
self._std_out = ""
self._std_err = ""
self._return_code = None
# Used for attribute checking and comparisons
self._server_repo_clone_path = ""
# Used to initialise a server repo to a default state.
self._default_server_repo_path = ""
# Used to tests exceptions
self._exception_type = ""
self._exception_string = ""
# Used to test output
self._std_out_compare_string = None
self._std_out_starts_with_string = None
self._std_out_ends_with_string = None
# Used to alter script interaction
self._arguments = ""
self._input = None
# Used for attribute checking
self._attributes_dict = {}
self._local_repo_path = ""
# Used for comparisons
self._repo_comp_method = ""
self._local_comp_path_one = ""
self._local_comp_path_two = ""
# Used for attribute checking and comparisons
self._server_repo_path = ""
# Used to check for branch names, and also changes the branch of the
# local `_server_repo_clone_path` repo.
self._branch_name = ""
self._settings_list = [ # List of valid variables to update.
'default_server_repo_path',
'exception_type',
'exception_string',
'std_out_compare_string',
'std_out_starts_with_string',
'std_out_ends_with_string',
'arguments',
'input',
'attributes_dict',
'server_repo_path',
'local_repo_path',
'repo_comp_method',
'local_comp_path_one',
'local_comp_path_two',
'branch_name',
]
def load_settings(self, settings):
"""Loads the given settings dictionary into the relevant variables.
Note: This will only load the following variables:
- default_server_repo_path
- exception_type
- exception_string
- std_out_compare_string
- std_out_starts_with_string
- std_out_ends_with_string
- arguments
- input
- attributes_dict
- server_repo_path
- local_repo_path
- repo_comp_method
- local_comp_path_one
- local_comp_path_two
- branch_name
Args:
settings: The dictionary of settings used to set up the test.
"""
self.__dict__.update({("_" + key): value for (key, value)
in settings.items()
if key in self._settings_list})
logging.debug("The test's local variables are:")
for key, value in self.__dict__.items():
logging.debug(str(key) + ": " + str(value))
logging.debug("End of local variables.")
def __call__(self):
"""Defined for the use of nosetests.
This is considered the test function.
Raises:
:class:`.SettingsError`: From :meth:`.run_tests`.
:class:`.SettingsError`: From :meth:`.set_server_repo_to_default`.
:class:`.TempdirError`: From :meth:`.run_tests`
:class:`AssertionError`: From :meth:`.run_tests`.
:class:`dls_ade.exceptions.VCSGitError`: From :meth:`.run_tests`.
"""
self.set_server_repo_to_default()
self.call_script()
self.run_tests()
def set_server_repo_to_default(self):
"""Sets the given server repository to a default state.
Note:
If used on an existing server repository, all commit history will
be overwritten.
Raises:
:class:`.SettingsError`: If default given but no server repo.
:class:`dls_ade.exceptions.VCSGitError`: From \
:mod:`~dls_ade.vcs_git` functions.
"""
if not self._default_server_repo_path:
return
if not self._server_repo_path:
raise SettingsError("If 'default_server_repo_path is set, then "
"'server_repo_path' must also be set.")
logging.debug("Setting server repo to default.")
logging.debug("'Default' server repo path: " +
self._default_server_repo_path)
vcs = self.server.temp_clone(self._default_server_repo_path)
temp_repo = vcs.repo
vcs_git.delete_remote(temp_repo, "origin")
if self.server.is_server_repo(self._server_repo_path):
temp_repo.create_remote(
"origin",
os.path.join(self.server.url, self._server_repo_path)
)
temp_repo.git.push("origin", temp_repo.active_branch, "-f")
else:
vcs.add_new_remote_and_push(self._server_repo_path)
def call_script(self):
"""Call the script and store output, error and return code.
If `input` is set, this will pass the input to the child process.
Raises:
:class:`ValueError`: From Popen().
"""
call_args = (self._script + " " + self._arguments).split()
logging.debug("About to call script with: " + " ".join(call_args))
# If no input is given, this will prevent communicate() from sending
# data to the child process.
if self._input is not None:
stdin_pipe = subprocess.PIPE
else:
stdin_pipe = None
process = subprocess.Popen(call_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=stdin_pipe)
self._std_out, self._std_err = process.communicate(self._input)
self._std_out = bytes_to_string(self._std_out)
self._std_err = bytes_to_string(self._std_err)
logging.debug("standard out:\n" + self._std_out)
logging.debug("standard error:\n" + self._std_err)
self._return_code = process.returncode
logging.debug("return code: " + str(self._return_code))
def run_tests(self):
"""Performs the entire test suite.
Raises:
:class:`.SettingsError`: From the tests.
:class:`.TempdirError`: From :meth:`delete_cloned_server_repo`
:class:`AssertionError`: From the tests.
:class:`dls_ade.exceptions.VCSGitError`: From the tests.
"""
logging.debug("Performing tests.")
self.check_std_out_and_exceptions()
self.check_for_and_clone_remote_repo()
self.run_git_repo_tests()
self.run_comparison_tests()
# Filesystem equality checks
self.delete_cloned_server_repo()
def check_std_out_and_exceptions(self):
"""Performs all the standard out and error comparisons.
This includes exception testing.
Raises:
:class:`.SettingsError`: From the comparison tests.
:class:`AssertionError`: From the comparison tests.
:class:`dls_ade.exceptions.VCSGitError`: From the comparison tests.
"""
self.check_std_out_for_exception_string()
self.compare_std_out_to_string()
self.check_std_out_starts_with_string()
self.check_std_out_ends_with_string()
def check_std_out_for_exception_string(self):
"""Check the standard out and error for the exception information.
Raises:
:class:`.SettingsError`: If either the exception type or string \
is blank while the other is not.
:class:`AssertionError`: If the test does not pass.
"""
if not self._exception_type or not self._exception_string:
if not self._exception_type and not self._exception_string:
return
raise SettingsError("Both exception_type and exception_string "
"must be provided.")
if isinstance(self._exception_string, list):
expected_string_components = self._exception_string
else:
expected_string_components = [self._exception_string]
expected_string_components.append(self._exception_type)
logging.debug("Expected error string components: " +
",".join(expected_string_components))
assert_true(all(elem in self._std_out for elem in expected_string_components) or
all(elem in self._std_err for elem in expected_string_components))
def compare_std_out_to_string(self):
"""Compare the standard output to std_out_compare_string.
Raises:
:class:`AssertionError`: If the test does not pass.
"""
if self._std_out_compare_string is None:
return
logging.debug("Comparing the standard output to comparison string.")
if isinstance(self._std_out_compare_string, list):
expected_string_components = self._std_out_compare_string
else:
expected_string_components = [self._std_out_compare_string]
assert_true(all(elem in self._std_out
for elem in expected_string_components))
def check_std_out_starts_with_string(self):
"""Check if the standard output starts with std_out_starts_with_string.
Raises:
:class:`AssertionError`: If the test does not pass.
"""
if self._std_out_starts_with_string is None:
return
logging.debug("Checking if the standard output starts with the given "
"string.")
assert_true(self._std_out.startswith(self._std_out_starts_with_string))
def check_std_out_ends_with_string(self):
"""Check if the standard output ends with std_out_ends_with_string.
Raises:
:class:`AssertionError`: If the test does not pass.
"""
if self._std_out_ends_with_string is None:
return
logging.debug("Checking if the standard output ends with the given "
"string.")
assert_true(self._std_out.endswith(self._std_out_ends_with_string))
def check_for_and_clone_remote_repo(self):
"""Checks server repo path exists and clones it.
Raises:
:class:`AssertionError`: From check_remote_repo_exists
:class:`dls_ade.exceptions.VCSGitError`: from clone_server_repo
"""
if not self._server_repo_path:
return
self.check_remote_repo_exists()
self.clone_server_repo()
def check_remote_repo_exists(self):
"""Check that the server_repo_path exists on the server.
Raises:
:class:`AssertionError`: If the test does not pass.
"""
logging.debug("Checking server repo path given exists.")
assert_true(self.server.is_server_repo(self._server_repo_path))
def clone_server_repo(self):
"""Clone the server_repo_path to a temp dir and return the path.
If a branch name is set, then the remote branch will be checked out.
Raises:
VCSGitError: From vcs_git.temp_clone()
"""
logging.debug("Cloning the server repository to temporary directory.")
repo = self.server.temp_clone(self._server_repo_path).repo
if self._branch_name:
vcs_git.checkout_remote_branch(self._branch_name, repo)
self._server_repo_clone_path = repo.working_tree_dir
logging.debug("The cloned directory is: " +
self._server_repo_clone_path)
def run_git_repo_tests(self):
"""Perform all repository-related tests.
These are the tests that require a git repository to be given.
Raises:
:class:`.SettingsError`: From :meth:`.run_git_attributes_tests`
:class:`.AssertionError`: From :meth:`.run_git_attributes_tests`
"""
self.check_local_repo_active_branch()
self.run_git_attributes_tests()
# This should check local_repo and server_repo_path for attributes_dict
def check_local_repo_active_branch(self):
"""This checks if the local repository's active branch is correct."""
if not self._branch_name:
return
if not self._local_repo_path:
# The branch_name may still be used when cloning the server repo.
return
logging.debug("Checking that local repository active branch is "
"correct.")
current_active_branch = vcs_git.get_active_branch(
vcs_git.init_repo(self._local_repo_path))
logging.debug("Actual branch: " + current_active_branch)
assert_equal(self._branch_name, current_active_branch)
def run_git_attributes_tests(self):
"""Perform the git attributes tests.
Raises:
:class:`.SettingsError`: If no path is provided given an \
attributes dictionary.
:class:`.AssertionError`: If the test does not pass.
"""
if not self._attributes_dict:
return
if not (self._server_repo_clone_path or self._local_repo_path):
raise SettingsError("As an attributes dict has been provided, "
"either the local_repo_path or "
"server_repo_clone_path must be provided.")
if self._server_repo_clone_path:
logging.debug("Testing server clone's attributes.")
return_value = vcs_git.check_git_attributes(
vcs_git.init_repo(self._server_repo_clone_path),
self._attributes_dict
)
assert_true(return_value)
if self._local_repo_path:
logging.debug("Testing local repo's attributes.")
return_value = vcs_git.check_git_attributes(
vcs_git.init_repo(self._local_repo_path),
self._attributes_dict)
assert_true(return_value)
def run_comparison_tests(self):
"""Run the local path comparison tests.
The repo_comp_method must be one of the following:
- `local_comp`: Compares the two local paths.
Paths are local_comp_path_one and local_comp_path_two.
- `server_comp`: Compares a local path with the cloned server repo.
Paths are local_comp_path_one and server_repo_clone_path.
- `all_comp`: Compares all three paths against one another.
Paths are local_comp_path_one, local_comp_path_two and
server_repo_clone_path.
Raises:
:class:`.SettingsError`: From :func:`.check_if_repos_equal`.
:class:`.SettingsError`: If the `repo_comp_method` has an \
unexpected value.
:class:`AssertionError`: If the test does not pass.
:class:`subprocess.CalledProcessError`: From \
:func:`.check_if_repos_equal`.
"""
if not self._repo_comp_method:
return
if self._repo_comp_method == "local_comp":
logging.debug("Performing 'local' comparison.")
equal = check_if_repos_equal(self._local_comp_path_one,
self._local_comp_path_two)
assert_true(equal)
elif self._repo_comp_method == "server_comp":
logging.debug("Performing 'server' comparison.")
equal = check_if_repos_equal(self._local_comp_path_one,
self._server_repo_clone_path)
assert_true(equal)
elif self._repo_comp_method == "all_comp":
logging.debug("Performing 'all' comparison.")
equal_1 = check_if_repos_equal(self._local_comp_path_one,
self._local_comp_path_two)
equal_2 = check_if_repos_equal(self._local_comp_path_one,
self._server_repo_clone_path)
assert_true(equal_1 and equal_2)
else:
err_message = ("The repo_comp_method must be called using one of "
"the following:"
"\nlocal_comp, server_comp, all_comp."
"\nGot: {repo_comp_method:s}")
raise SettingsError(err_message.format(
repo_comp_method=self._repo_comp_method)
)
def delete_cloned_server_repo(self):
"""Deletes the clone of the server repository.
Raises:
:class:`.TempdirError`: If the clone path is not a directory.
:class:`.TempdirError`: If the clone path is not a git repository.
"""
if not self._server_repo_clone_path:
return
logging.debug("About to delete temporary repository: " +
self._server_repo_clone_path)
delete_temp_repo(self._server_repo_clone_path)
self._server_repo_clone_path = ""
def generate_tests_from_dicts(script, test_settings):
"""Generator for the automatic construction of system tests.
Args:
script: The script to be tested.
test_settings: The settings for each individual test.
Raises:
:class:`.SettingsError`: From :class:`.SystemTest`
:class:`.TempdirError`: From :class:`.SystemTest`
:class:`dls_ade.exceptions.VCSGitError`: From :class:`.SystemTest`
:class:`AssertionError`: From :class:`.SystemTest`
:class:`.GitRootDirError`: Indicates if GIT_ROOT_DIR is not set.
"""
check_environment()
for settings in test_settings:
if 'script' in settings:
script = settings.pop('script')
description = settings.pop('description')
system_test = SystemTest(script, description)
system_test.load_settings(settings)
yield system_test
|
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (assert_equal,
assert_allclose, assert_raises, assert_)
from scipy._lib._numpy_compat import suppress_warnings
import pytest
from scipy.interpolate import (BSpline, BPoly, PPoly, make_interp_spline,
make_lsq_spline, _bspl, splev, splrep, splprep, splder, splantider,
sproot, splint, insert)
import scipy.linalg as sl
from scipy.interpolate._bsplines import _not_a_knot, _augknt
import scipy.interpolate._fitpack_impl as _impl
class TestBSpline(object):
def test_ctor(self):
# knots should be an ordered 1D array of finite real numbers
assert_raises((TypeError, ValueError), BSpline,
**dict(t=[1, 1.j], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0))
# for n+k+1 knots and degree k need at least n coefficients
assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0))
assert_raises(ValueError, BSpline,
**dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2))
# non-integer orders
assert_raises(ValueError, BSpline,
**dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k="cubic"))
assert_raises(ValueError, BSpline,
**dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5))
# basic inteval cannot have measure zero (here: [1..1])
assert_raises(ValueError, BSpline,
**dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2))
# tck vs self.tck
n, k = 11, 3
t = np.arange(n+k+1)
c = np.random.random(n)
b = BSpline(t, c, k)
assert_allclose(t, b.t)
assert_allclose(c, b.c)
assert_equal(k, b.k)
def test_tck(self):
b = _make_random_spline()
tck = b.tck
assert_allclose(b.t, tck[0], atol=1e-15, rtol=1e-15)
assert_allclose(b.c, tck[1], atol=1e-15, rtol=1e-15)
assert_equal(b.k, tck[2])
# b.tck is read-only
try:
b.tck = 'foo'
except AttributeError:
pass
except:
raise AssertionError("AttributeError not raised.")
def test_degree_0(self):
xx = np.linspace(0, 1, 10)
b = BSpline(t=[0, 1], c=[3.], k=0)
assert_allclose(b(xx), 3)
b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0)
assert_allclose(b(xx), np.where(xx < 0.35, 3, 4))
def test_degree_1(self):
t = [0, 1, 2, 3, 4]
c = [1, 2, 3]
k = 1
b = BSpline(t, c, k)
x = np.linspace(1, 3, 50)
assert_allclose(c[0]*B_012(x) + c[1]*B_012(x-1) + c[2]*B_012(x-2),
b(x), atol=1e-14)
assert_allclose(splev(x, (t, c, k)), b(x), atol=1e-14)
def test_bernstein(self):
# a special knot vector: Bernstein polynomials
k = 3
t = np.asarray([0]*(k+1) + [1]*(k+1))
c = np.asarray([1., 2., 3., 4.])
bp = BPoly(c.reshape(-1, 1), [0, 1])
bspl = BSpline(t, c, k)
xx = np.linspace(-1., 2., 10)
assert_allclose(bp(xx, extrapolate=True),
bspl(xx, extrapolate=True), atol=1e-14)
assert_allclose(splev(xx, (t, c, k)),
bspl(xx), atol=1e-14)
def test_rndm_naive_eval(self):
# test random coefficient spline *on the base interval*,
# t[k] <= x < t[-k-1]
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 50)
y_b = b(xx)
y_n = [_naive_eval(x, t, c, k) for x in xx]
assert_allclose(y_b, y_n, atol=1e-14)
y_n2 = [_naive_eval_2(x, t, c, k) for x in xx]
assert_allclose(y_b, y_n2, atol=1e-14)
def test_rndm_splev(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 50)
assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
def test_rndm_splrep(self):
np.random.seed(1234)
x = np.sort(np.random.random(20))
y = np.random.random(20)
tck = splrep(x, y)
b = BSpline(*tck)
t, k = b.t, b.k
xx = np.linspace(t[k], t[-k-1], 80)
assert_allclose(b(xx), splev(xx, tck), atol=1e-14)
def test_rndm_unity(self):
b = _make_random_spline()
b.c = np.ones_like(b.c)
xx = np.linspace(b.t[b.k], b.t[-b.k-1], 100)
assert_allclose(b(xx), 1.)
def test_vectorization(self):
n, k = 22, 3
t = np.sort(np.random.random(n))
c = np.random.random(size=(n, 6, 7))
b = BSpline(t, c, k)
tm, tp = t[k], t[-k-1]
xx = tm + (tp - tm) * np.random.random((3, 4, 5))
assert_equal(b(xx).shape, (3, 4, 5, 6, 7))
def test_len_c(self):
# for n+k+1 knots, only first n coefs are used.
# and BTW this is consistent with FITPACK
n, k = 33, 3
t = np.sort(np.random.random(n+k+1))
c = np.random.random(n)
# pad coefficients with random garbage
c_pad = np.r_[c, np.random.random(k+1)]
b, b_pad = BSpline(t, c, k), BSpline(t, c_pad, k)
dt = t[-1] - t[0]
xx = np.linspace(t[0] - dt, t[-1] + dt, 50)
assert_allclose(b(xx), b_pad(xx), atol=1e-14)
assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
assert_allclose(b(xx), splev(xx, (t, c_pad, k)), atol=1e-14)
def test_endpoints(self):
# base interval is closed
b = _make_random_spline()
t, _, k = b.tck
tm, tp = t[k], t[-k-1]
for extrap in (True, False):
assert_allclose(b([tm, tp], extrap),
b([tm + 1e-10, tp - 1e-10], extrap), atol=1e-9)
def test_continuity(self):
# assert continuity at internal knots
b = _make_random_spline()
t, _, k = b.tck
assert_allclose(b(t[k+1:-k-1] - 1e-10), b(t[k+1:-k-1] + 1e-10),
atol=1e-9)
def test_extrap(self):
b = _make_random_spline()
t, c, k = b.tck
dt = t[-1] - t[0]
xx = np.linspace(t[k] - dt, t[-k-1] + dt, 50)
mask = (t[k] < xx) & (xx < t[-k-1])
# extrap has no effect within the base interval
assert_allclose(b(xx[mask], extrapolate=True),
b(xx[mask], extrapolate=False))
# extrapolated values agree with FITPACK
assert_allclose(b(xx, extrapolate=True),
splev(xx, (t, c, k), ext=0))
def test_default_extrap(self):
# BSpline defaults to extrapolate=True
b = _make_random_spline()
t, _, k = b.tck
xx = [t[0] - 1, t[-1] + 1]
yy = b(xx)
assert_(not np.all(np.isnan(yy)))
def test_ppoly(self):
b = _make_random_spline()
t, c, k = b.tck
pp = PPoly.from_spline((t, c, k))
xx = np.linspace(t[k], t[-k], 100)
assert_allclose(b(xx), pp(xx), atol=1e-14, rtol=1e-14)
def test_derivative_rndm(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[0], t[-1], 50)
xx = np.r_[xx, t]
for der in range(1, k+1):
yd = splev(xx, (t, c, k), der=der)
assert_allclose(yd, b(xx, nu=der), atol=1e-14)
# higher derivatives all vanish
assert_allclose(b(xx, nu=k+1), 0, atol=1e-14)
def test_derivative_jumps(self):
# example from de Boor, Chap IX, example (24)
# NB: knots augmented & corresp coefs are zeroed out
# in agreement with the convention (29)
k = 2
t = [-1, -1, 0, 1, 1, 3, 4, 6, 6, 6, 7, 7]
np.random.seed(1234)
c = np.r_[0, 0, np.random.random(5), 0, 0]
b = BSpline(t, c, k)
# b is continuous at x != 6 (triple knot)
x = np.asarray([1, 3, 4, 6])
assert_allclose(b(x[x != 6] - 1e-10),
b(x[x != 6] + 1e-10))
assert_(not np.allclose(b(6.-1e-10), b(6+1e-10)))
# 1st derivative jumps at double knots, 1 & 6:
x0 = np.asarray([3, 4])
assert_allclose(b(x0 - 1e-10, nu=1),
b(x0 + 1e-10, nu=1))
x1 = np.asarray([1, 6])
assert_(not np.all(np.allclose(b(x1 - 1e-10, nu=1),
b(x1 + 1e-10, nu=1))))
# 2nd derivative is not guaranteed to be continuous either
assert_(not np.all(np.allclose(b(x - 1e-10, nu=2),
b(x + 1e-10, nu=2))))
def test_basis_element_quadratic(self):
xx = np.linspace(-1, 4, 20)
b = BSpline.basis_element(t=[0, 1, 2, 3])
assert_allclose(b(xx),
splev(xx, (b.t, b.c, b.k)), atol=1e-14)
assert_allclose(b(xx),
B_0123(xx), atol=1e-14)
b = BSpline.basis_element(t=[0, 1, 1, 2])
xx = np.linspace(0, 2, 10)
assert_allclose(b(xx),
np.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14)
def test_basis_element_rndm(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 20)
assert_allclose(b(xx), _sum_basis_elements(xx, t, c, k), atol=1e-14)
def test_cmplx(self):
b = _make_random_spline()
t, c, k = b.tck
cc = c * (1. + 3.j)
b = BSpline(t, cc, k)
b_re = BSpline(t, b.c.real, k)
b_im = BSpline(t, b.c.imag, k)
xx = np.linspace(t[k], t[-k-1], 20)
assert_allclose(b(xx).real, b_re(xx), atol=1e-14)
assert_allclose(b(xx).imag, b_im(xx), atol=1e-14)
def test_nan(self):
# nan in, nan out.
b = BSpline.basis_element([0, 1, 1, 2])
assert_(np.isnan(b(np.nan)))
def test_derivative_method(self):
b = _make_random_spline(k=5)
t, c, k = b.tck
b0 = BSpline(t, c, k)
xx = np.linspace(t[k], t[-k-1], 20)
for j in range(1, k):
b = b.derivative()
assert_allclose(b0(xx, j), b(xx), atol=1e-12, rtol=1e-12)
def test_antiderivative_method(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 20)
assert_allclose(b.antiderivative().derivative()(xx),
b(xx), atol=1e-14, rtol=1e-14)
# repeat with n-D array for c
c = np.c_[c, c, c]
c = np.dstack((c, c))
b = BSpline(t, c, k)
assert_allclose(b.antiderivative().derivative()(xx),
b(xx), atol=1e-14, rtol=1e-14)
def test_integral(self):
b = BSpline.basis_element([0, 1, 2]) # x for x < 1 else 2 - x
assert_allclose(b.integrate(0, 1), 0.5)
assert_allclose(b.integrate(1, 0), -0.5)
# extrapolate or zeros outside of [0, 2]; default is yes
assert_allclose(b.integrate(-1, 1), 0)
assert_allclose(b.integrate(-1, 1, extrapolate=True), 0)
assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5)
def test_subclassing(self):
# classmethods should not decay to the base class
class B(BSpline):
pass
b = B.basis_element([0, 1, 2, 2])
assert_equal(b.__class__, B)
assert_equal(b.derivative().__class__, B)
assert_equal(b.antiderivative().__class__, B)
def test_axis(self):
n, k = 22, 3
t = np.linspace(0, 1, n + k + 1)
sh0 = [6, 7, 8]
for axis in range(4):
sh = sh0[:]
sh.insert(axis, n) # [22, 6, 7, 8] etc
c = np.random.random(size=sh)
b = BSpline(t, c, k, axis=axis)
assert_equal(b.c.shape,
[sh[axis],] + sh[:axis] + sh[axis+1:])
xp = np.random.random((3, 4, 5))
assert_equal(b(xp).shape,
sh[:axis] + list(xp.shape) + sh[axis+1:])
#0 <= axis < c.ndim
for ax in [-1, len(sh)+1]:
assert_raises(ValueError, BSpline, **dict(t=t, c=c, k=k, axis=ax))
# derivative, antiderivative keeps the axis
for b1 in [BSpline(t, c, k, axis=axis).derivative(),
BSpline(t, c, k, axis=axis).derivative(2),
BSpline(t, c, k, axis=axis).antiderivative(),
BSpline(t, c, k, axis=axis).antiderivative(2)]:
assert_equal(b1.axis, b.axis)
def test_knots_multiplicity():
# Take a spline w/ random coefficients, throw in knots of varying
# multiplicity.
def check_splev(b, j, der=0, atol=1e-14, rtol=1e-14):
# check evaluations against FITPACK, incl extrapolations
t, c, k = b.tck
x = np.unique(t)
x = np.r_[t[0]-0.1, 0.5*(x[1:] + x[:1]), t[-1]+0.1]
assert_allclose(splev(x, (t, c, k), der), b(x, der),
atol=atol, rtol=rtol, err_msg='der = %s k = %s' % (der, b.k))
# test loop itself
# [the index `j` is for interpreting the traceback in case of a failure]
for k in [1, 2, 3, 4, 5]:
b = _make_random_spline(k=k)
for j, b1 in enumerate(_make_multiples(b)):
yield check_splev, b1, j
for der in range(1, k+1):
yield check_splev, b1, j, der, 1e-12, 1e-12
### stolen from @pv, verbatim
def _naive_B(x, k, i, t):
"""
Naive way to compute B-spline basis functions. Useful only for testing!
computes B(x; t[i],..., t[i+k+1])
"""
if k == 0:
return 1.0 if t[i] <= x < t[i+1] else 0.0
if t[i+k] == t[i]:
c1 = 0.0
else:
c1 = (x - t[i])/(t[i+k] - t[i]) * _naive_B(x, k-1, i, t)
if t[i+k+1] == t[i+1]:
c2 = 0.0
else:
c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * _naive_B(x, k-1, i+1, t)
return (c1 + c2)
### stolen from @pv, verbatim
def _naive_eval(x, t, c, k):
"""
Naive B-spline evaluation. Useful only for testing!
"""
if x == t[k]:
i = k
else:
i = np.searchsorted(t, x) - 1
assert t[i] <= x <= t[i+1]
assert i >= k and i < len(t) - k
return sum(c[i-j] * _naive_B(x, k, i-j, t) for j in range(0, k+1))
def _naive_eval_2(x, t, c, k):
"""Naive B-spline evaluation, another way."""
n = len(t) - (k+1)
assert n >= k+1
assert len(c) >= n
assert t[k] <= x <= t[n]
return sum(c[i] * _naive_B(x, k, i, t) for i in range(n))
def _sum_basis_elements(x, t, c, k):
n = len(t) - (k+1)
assert n >= k+1
assert len(c) >= n
s = 0.
for i in range(n):
b = BSpline.basis_element(t[i:i+k+2], extrapolate=False)(x)
s += c[i] * np.nan_to_num(b) # zero out out-of-bounds elements
return s
def B_012(x):
""" A linear B-spline function B(x | 0, 1, 2)."""
x = np.atleast_1d(x)
return np.piecewise(x, [(x < 0) | (x > 2),
(x >= 0) & (x < 1),
(x >= 1) & (x <= 2)],
[lambda x: 0., lambda x: x, lambda x: 2.-x])
def B_0123(x, der=0):
"""A quadratic B-spline function B(x | 0, 1, 2, 3)."""
x = np.atleast_1d(x)
conds = [x < 1, (x > 1) & (x < 2), x > 2]
if der == 0:
funcs = [lambda x: x*x/2.,
lambda x: 3./4 - (x-3./2)**2,
lambda x: (3.-x)**2 / 2]
elif der == 2:
funcs = [lambda x: 1.,
lambda x: -2.,
lambda x: 1.]
else:
raise ValueError('never be here: der=%s' % der)
pieces = np.piecewise(x, conds, funcs)
return pieces
def _make_random_spline(n=35, k=3):
np.random.seed(123)
t = np.sort(np.random.random(n+k+1))
c = np.random.random(n)
return BSpline.construct_fast(t, c, k)
def _make_multiples(b):
"""Increase knot multiplicity."""
c, k = b.c, b.k
t1 = b.t.copy()
t1[17:19] = t1[17]
t1[22] = t1[21]
yield BSpline(t1, c, k)
t1 = b.t.copy()
t1[:k+1] = t1[0]
yield BSpline(t1, c, k)
t1 = b.t.copy()
t1[-k-1:] = t1[-1]
yield BSpline(t1, c, k)
class TestInterop(object):
#
# Test that FITPACK-based spl* functions can deal with BSpline objects
#
def setup_method(self):
xx = np.linspace(0, 4.*np.pi, 41)
yy = np.cos(xx)
b = make_interp_spline(xx, yy)
self.tck = (b.t, b.c, b.k)
self.xx, self.yy, self.b = xx, yy, b
self.xnew = np.linspace(0, 4.*np.pi, 21)
c2 = np.c_[b.c, b.c, b.c]
self.c2 = np.dstack((c2, c2))
self.b2 = BSpline(b.t, self.c2, b.k)
def test_splev(self):
xnew, b, b2 = self.xnew, self.b, self.b2
# check that splev works with 1D array of coefficients
# for array and scalar `x`
assert_allclose(splev(xnew, b),
b(xnew), atol=1e-15, rtol=1e-15)
assert_allclose(splev(xnew, b.tck),
b(xnew), atol=1e-15, rtol=1e-15)
assert_allclose([splev(x, b) for x in xnew],
b(xnew), atol=1e-15, rtol=1e-15)
# With n-D coefficients, there's a quirck:
# splev(x, BSpline) is equivalent to BSpline(x)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling splev.. with BSpline objects with c.ndim > 1 is not recommended.")
assert_allclose(splev(xnew, b2), b2(xnew), atol=1e-15, rtol=1e-15)
# However, splev(x, BSpline.tck) needs some transposes. This is because
# BSpline interpolates along the first axis, while the legacy FITPACK
# wrapper does list(map(...)) which effectively interpolates along the
# last axis. Like so:
sh = tuple(range(1, b2.c.ndim)) + (0,) # sh = (1, 2, 0)
cc = b2.c.transpose(sh)
tck = (b2.t, cc, b2.k)
assert_allclose(splev(xnew, tck),
b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15)
def test_splrep(self):
x, y = self.xx, self.yy
# test that "new" splrep is equivalent to _impl.splrep
tck = splrep(x, y)
t, c, k = _impl.splrep(x, y)
assert_allclose(tck[0], t, atol=1e-15)
assert_allclose(tck[1], c, atol=1e-15)
assert_equal(tck[2], k)
# also cover the `full_output=True` branch
tck_f, _, _, _ = splrep(x, y, full_output=True)
assert_allclose(tck_f[0], t, atol=1e-15)
assert_allclose(tck_f[1], c, atol=1e-15)
assert_equal(tck_f[2], k)
# test that the result of splrep roundtrips with splev:
# evaluate the spline on the original `x` points
yy = splev(x, tck)
assert_allclose(y, yy, atol=1e-15)
# ... and also it roundtrips if wrapped in a BSpline
b = BSpline(*tck)
assert_allclose(y, b(x), atol=1e-15)
# test that both "old" and "new" splrep raise for an n-D ``y`` array
# with n > 1
y2 = np.c_[y, y]
assert_raises(Exception, splrep, x, y2)
assert_raises(Exception, _impl.splrep, x, y2)
def test_splprep(self):
x = np.arange(15).reshape((3, 5))
b, u = splprep(x)
tck, u1 = _impl.splprep(x)
# test the roundtrip with splev for both "old" and "new" output
assert_allclose(u, u1, atol=1e-15)
assert_allclose(splev(u, b), x, atol=1e-15)
assert_allclose(splev(u, tck), x, atol=1e-15)
# cover the ``full_output=True`` branch
(b_f, u_f), _, _, _ = splprep(x, s=0, full_output=True)
assert_allclose(u, u_f, atol=1e-15)
assert_allclose(splev(u_f, b_f), x, atol=1e-15)
# test that both "old" and "new" code paths raise for x.ndim > 2
x1 = np.arange(3*4*5).reshape((3, 4, 5))
assert_raises(ValueError, splprep, x1)
assert_raises(ValueError, _impl.splprep, x1)
def test_sproot(self):
b, b2 = self.b, self.b2
roots = np.array([0.5, 1.5, 2.5, 3.5])*np.pi
# sproot accepts a BSpline obj w/ 1D coef array
assert_allclose(sproot(b), roots, atol=1e-7, rtol=1e-7)
assert_allclose(sproot((b.t, b.c, b.k)), roots, atol=1e-7, rtol=1e-7)
# ... and deals with trailing dimensions if coef array is n-D
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling sproot.. with BSpline objects with c.ndim > 1 is not recommended.")
r = sproot(b2, mest=50)
r = np.asarray(r)
assert_equal(r.shape, (3, 2, 4))
assert_allclose(r - roots, 0, atol=1e-12)
# and legacy behavior is preserved for a tck tuple w/ n-D coef
c2r = b2.c.transpose(1, 2, 0)
rr = np.asarray(sproot((b2.t, c2r, b2.k), mest=50))
assert_equal(rr.shape, (3, 2, 4))
assert_allclose(rr - roots, 0, atol=1e-12)
def test_splint(self):
# test that splint accepts BSpline objects
b, b2 = self.b, self.b2
assert_allclose(splint(0, 1, b),
splint(0, 1, b.tck), atol=1e-14)
assert_allclose(splint(0, 1, b),
b.integrate(0, 1), atol=1e-14)
# ... and deals with n-D arrays of coefficients
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling splint.. with BSpline objects with c.ndim > 1 is not recommended.")
assert_allclose(splint(0, 1, b2), b2.integrate(0, 1), atol=1e-14)
# and the legacy behavior is preserved for a tck tuple w/ n-D coef
c2r = b2.c.transpose(1, 2, 0)
integr = np.asarray(splint(0, 1, (b2.t, c2r, b2.k)))
assert_equal(integr.shape, (3, 2))
assert_allclose(integr,
splint(0, 1, b), atol=1e-14)
def test_splder(self):
for b in [self.b, self.b2]:
# pad the c array (FITPACK convention)
ct = len(b.t) - len(b.c)
if ct > 0:
b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
for n in [1, 2, 3]:
bd = splder(b)
tck_d = _impl.splder((b.t, b.c, b.k))
assert_allclose(bd.t, tck_d[0], atol=1e-15)
assert_allclose(bd.c, tck_d[1], atol=1e-15)
assert_equal(bd.k, tck_d[2])
assert_(isinstance(bd, BSpline))
assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out
def test_splantider(self):
for b in [self.b, self.b2]:
# pad the c array (FITPACK convention)
ct = len(b.t) - len(b.c)
if ct > 0:
b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
for n in [1, 2, 3]:
bd = splantider(b)
tck_d = _impl.splantider((b.t, b.c, b.k))
assert_allclose(bd.t, tck_d[0], atol=1e-15)
assert_allclose(bd.c, tck_d[1], atol=1e-15)
assert_equal(bd.k, tck_d[2])
assert_(isinstance(bd, BSpline))
assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out
def test_insert(self):
b, b2, xx = self.b, self.b2, self.xx
j = b.t.size // 2
tn = 0.5*(b.t[j] + b.t[j+1])
bn, tck_n = insert(tn, b), insert(tn, (b.t, b.c, b.k))
assert_allclose(splev(xx, bn),
splev(xx, tck_n), atol=1e-15)
assert_(isinstance(bn, BSpline))
assert_(isinstance(tck_n, tuple)) # back-compat: tck in, tck out
# for n-D array of coefficients, BSpline.c needs to be transposed
# after that, the results are equivalent.
sh = tuple(range(b2.c.ndim))
c_ = b2.c.transpose(sh[1:] + (0,))
tck_n2 = insert(tn, (b2.t, c_, b2.k))
bn2 = insert(tn, b2)
# need a transpose for comparing the results, cf test_splev
assert_allclose(np.asarray(splev(xx, tck_n2)).transpose(2, 0, 1),
bn2(xx), atol=1e-15)
assert_(isinstance(bn2, BSpline))
assert_(isinstance(tck_n2, tuple)) # back-compat: tck in, tck out
class TestInterp(object):
#
# Test basic ways of constructing interpolating splines.
#
xx = np.linspace(0., 2.*np.pi)
yy = np.sin(xx)
def test_order_0(self):
b = make_interp_spline(self.xx, self.yy, k=0)
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_linear(self):
b = make_interp_spline(self.xx, self.yy, k=1)
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_not_a_knot(self):
for k in [3, 5]:
b = make_interp_spline(self.xx, self.yy, k)
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_quadratic_deriv(self):
der = [(1, 8.)] # order, value: f'(x) = 8.
# derivative at right-hand edge
b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(None, der))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
assert_allclose(b(self.xx[-1], 1), der[0][1], atol=1e-14, rtol=1e-14)
# derivative at left-hand edge
b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(der, None))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
assert_allclose(b(self.xx[0], 1), der[0][1], atol=1e-14, rtol=1e-14)
def test_cubic_deriv(self):
k = 3
# first derivatives at left & right edges:
der_l, der_r = [(1, 3.)], [(1, 4.)]
b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
assert_allclose([b(self.xx[0], 1), b(self.xx[-1], 1)],
[der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
# 'natural' cubic spline, zero out 2nd derivatives at the boundaries
der_l, der_r = [(2, 0)], [(2, 0)]
b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_quintic_derivs(self):
k, n = 5, 7
x = np.arange(n).astype(np.float_)
y = np.sin(x)
der_l = [(1, -12.), (2, 1)]
der_r = [(1, 8.), (2, 3.)]
b = make_interp_spline(x, y, k=k, bc_type=(der_l, der_r))
assert_allclose(b(x), y, atol=1e-14, rtol=1e-14)
assert_allclose([b(x[0], 1), b(x[0], 2)],
[val for (nu, val) in der_l])
assert_allclose([b(x[-1], 1), b(x[-1], 2)],
[val for (nu, val) in der_r])
@pytest.mark.xfail(reason='unstable')
def test_cubic_deriv_unstable(self):
# 1st and 2nd derivative at x[0], no derivative information at x[-1]
# The problem is not that it fails [who would use this anyway],
# the problem is that it fails *silently*, and I've no idea
# how to detect this sort of instability.
# In this particular case: it's OK for len(t) < 20, goes haywire
# at larger `len(t)`.
k = 3
t = _augknt(self.xx, k)
der_l = [(1, 3.), (2, 4.)]
b = make_interp_spline(self.xx, self.yy, k, t, bc_type=(der_l, None))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_knots_not_data_sites(self):
# Knots need not coincide with the data sites.
# use a quadratic spline, knots are at data averages,
# two additional constraints are zero 2nd derivs at edges
k = 2
t = np.r_[(self.xx[0],)*(k+1),
(self.xx[1:] + self.xx[:-1]) / 2.,
(self.xx[-1],)*(k+1)]
b = make_interp_spline(self.xx, self.yy, k, t,
bc_type=([(2, 0)], [(2, 0)]))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
assert_allclose([b(self.xx[0], 2), b(self.xx[-1], 2)], [0., 0.],
atol=1e-14)
def test_minimum_points_and_deriv(self):
# interpolation of f(x) = x**3 between 0 and 1. f'(x) = 3 * xx**2 and
# f'(0) = 0, f'(1) = 3.
k = 3
x = [0., 1.]
y = [0., 1.]
b = make_interp_spline(x, y, k, bc_type=([(1, 0.)], [(1, 3.)]))
xx = np.linspace(0., 1.)
yy = xx**3
assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
# If one of the derivatives is omitted, the spline definition is
# incomplete:
assert_raises(ValueError, make_interp_spline, x, y, k,
**dict(bc_type=([(1, 0.)], None)))
def test_complex(self):
k = 3
xx = self.xx
yy = self.yy + 1.j*self.yy
# first derivatives at left & right edges:
der_l, der_r = [(1, 3.j)], [(1, 4.+2.j)]
b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r))
assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
assert_allclose([b(xx[0], 1), b(xx[-1], 1)],
[der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
# also test zero and first order
for k in (0, 1):
b = make_interp_spline(xx, yy, k=k)
assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
def test_int_xy(self):
x = np.arange(10).astype(np.int_)
y = np.arange(10).astype(np.int_)
# cython chokes on "buffer type mismatch" (construction) or
# "no matching signature found" (evaluation)
for k in (0, 1, 2, 3):
b = make_interp_spline(x, y, k=k)
b(x)
def test_sliced_input(self):
# cython code chokes on non C contiguous arrays
xx = np.linspace(-1, 1, 100)
x = xx[::5]
y = xx[::5]
for k in (0, 1, 2, 3):
make_interp_spline(x, y, k=k)
def test_check_finite(self):
# check_finite defaults to True; nans and such trigger a ValueError
x = np.arange(10).astype(float)
y = x**2
for z in [np.nan, np.inf, -np.inf]:
y[-1] = z
assert_raises(ValueError, make_interp_spline, x, y)
def test_multiple_rhs(self):
yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
der_l = [(1, [1., 2.])]
der_r = [(1, [3., 4.])]
b = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
assert_allclose(b(self.xx), yy, atol=1e-14, rtol=1e-14)
assert_allclose(b(self.xx[0], 1), der_l[0][1], atol=1e-14, rtol=1e-14)
assert_allclose(b(self.xx[-1], 1), der_r[0][1], atol=1e-14, rtol=1e-14)
def test_shapes(self):
np.random.seed(1234)
k, n = 3, 22
x = np.sort(np.random.random(size=n))
y = np.random.random(size=(n, 5, 6, 7))
b = make_interp_spline(x, y, k)
assert_equal(b.c.shape, (n, 5, 6, 7))
# now throw in some derivatives
d_l = [(1, np.random.random((5, 6, 7)))]
d_r = [(1, np.random.random((5, 6, 7)))]
b = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
assert_equal(b.c.shape, (n + k - 1, 5, 6, 7))
def test_full_matrix(self):
np.random.seed(1234)
k, n = 3, 7
x = np.sort(np.random.random(size=n))
y = np.random.random(size=n)
t = _not_a_knot(x, k)
b = make_interp_spline(x, y, k, t)
cf = make_interp_full_matr(x, y, t, k)
assert_allclose(b.c, cf, atol=1e-14, rtol=1e-14)
def make_interp_full_matr(x, y, t, k):
"""Assemble an spline order k with knots t to interpolate
y(x) using full matrices.
Not-a-knot BC only.
This routine is here for testing only (even though it's functional).
"""
assert x.size == y.size
assert t.size == x.size + k + 1
n = x.size
A = np.zeros((n, n), dtype=np.float_)
for j in range(n):
xval = x[j]
if xval == t[k]:
left = k
else:
left = np.searchsorted(t, xval) - 1
# fill a row
bb = _bspl.evaluate_all_bspl(t, k, xval, left)
A[j, left-k:left+1] = bb
c = sl.solve(A, y)
return c
### XXX: 'periodic' interp spline using full matrices
def make_interp_per_full_matr(x, y, t, k):
x, y, t = map(np.asarray, (x, y, t))
n = x.size
nt = t.size - k - 1
# have `n` conditions for `nt` coefficients; need nt-n derivatives
assert nt - n == k - 1
# LHS: the collocation matrix + derivatives at edges
A = np.zeros((nt, nt), dtype=np.float_)
# derivatives at x[0]:
offset = 0
if x[0] == t[k]:
left = k
else:
left = np.searchsorted(t, x[0]) - 1
if x[-1] == t[k]:
left2 = k
else:
left2 = np.searchsorted(t, x[-1]) - 1
for i in range(k-1):
bb = _bspl.evaluate_all_bspl(t, k, x[0], left, nu=i+1)
A[i, left-k:left+1] = bb
bb = _bspl.evaluate_all_bspl(t, k, x[-1], left2, nu=i+1)
A[i, left2-k:left2+1] = -bb
offset += 1
# RHS
y = np.r_[[0]*(k-1), y]
# collocation matrix
for j in range(n):
xval = x[j]
# find interval
if xval == t[k]:
left = k
else:
left = np.searchsorted(t, xval) - 1
# fill a row
bb = _bspl.evaluate_all_bspl(t, k, xval, left)
A[j + offset, left-k:left+1] = bb
c = sl.solve(A, y)
return c
def make_lsq_full_matrix(x, y, t, k=3):
"""Make the least-square spline, full matrices."""
x, y, t = map(np.asarray, (x, y, t))
m = x.size
n = t.size - k - 1
A = np.zeros((m, n), dtype=np.float_)
for j in range(m):
xval = x[j]
# find interval
if xval == t[k]:
left = k
else:
left = np.searchsorted(t, xval) - 1
# fill a row
bb = _bspl.evaluate_all_bspl(t, k, xval, left)
A[j, left-k:left+1] = bb
# have observation matrix, can solve the LSQ problem
B = np.dot(A.T, A)
Y = np.dot(A.T, y)
c = sl.solve(B, Y)
return c, (A, Y)
class TestLSQ(object):
#
# Test make_lsq_spline
#
np.random.seed(1234)
n, k = 13, 3
x = np.sort(np.random.random(n))
y = np.random.random(n)
t = _augknt(np.linspace(x[0], x[-1], 7), k)
def test_lstsq(self):
# check LSQ construction vs a full matrix version
x, y, t, k = self.x, self.y, self.t, self.k
c0, AY = make_lsq_full_matrix(x, y, t, k)
b = make_lsq_spline(x, y, t, k)
assert_allclose(b.c, c0)
assert_equal(b.c.shape, (t.size - k - 1,))
# also check against numpy.lstsq
aa, yy = AY
c1, _, _, _ = np.linalg.lstsq(aa, y)
assert_allclose(b.c, c1)
def test_weights(self):
# weights = 1 is same as None
x, y, t, k = self.x, self.y, self.t, self.k
w = np.ones_like(x)
b = make_lsq_spline(x, y, t, k)
b_w = make_lsq_spline(x, y, t, k, w=w)
assert_allclose(b.t, b_w.t, atol=1e-14)
assert_allclose(b.c, b_w.c, atol=1e-14)
assert_equal(b.k, b_w.k)
def test_multiple_rhs(self):
x, t, k, n = self.x, self.t, self.k, self.n
y = np.random.random(size=(n, 5, 6, 7))
b = make_lsq_spline(x, y, t, k)
assert_equal(b.c.shape, (t.size-k-1, 5, 6, 7))
def test_complex(self):
# cmplx-valued `y`
x, t, k = self.x, self.t, self.k
yc = self.y * (1. + 2.j)
b = make_lsq_spline(x, yc, t, k)
b_re = make_lsq_spline(x, yc.real, t, k)
b_im = make_lsq_spline(x, yc.imag, t, k)
assert_allclose(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15)
def test_int_xy(self):
x = np.arange(10).astype(np.int_)
y = np.arange(10).astype(np.int_)
t = _augknt(x, k=1)
# cython chokes on "buffer type mismatch"
make_lsq_spline(x, y, t, k=1)
def test_sliced_input(self):
# cython code chokes on non C contiguous arrays
xx = np.linspace(-1, 1, 100)
x = xx[::3]
y = xx[::3]
t = _augknt(x, 1)
make_lsq_spline(x, y, t, k=1)
def test_checkfinite(self):
# check_finite defaults to True; nans and such trigger a ValueError
x = np.arange(12).astype(float)
y = x**2
t = _augknt(x, 3)
for z in [np.nan, np.inf, -np.inf]:
y[-1] = z
assert_raises(ValueError, make_lsq_spline, x, y, t)
|
|
import logging
import os
import shutil
import signal
import sys
import threading
import time
from distutils.dir_util import copy_tree
from subprocess import Popen
import plac
import pymysql
import psycopg2
from elasticsearch import Elasticsearch
from scrapy.utils.log import configure_logging
cur_path = os.path.dirname(os.path.realpath(__file__))
par_path = os.path.dirname(cur_path)
sys.path.append(cur_path)
sys.path.append(par_path)
from newsplease.helper_classes.savepath_parser import SavepathParser
from newsplease.config import JsonConfig
from newsplease.config import CrawlerConfig
try:
import builtins
except ImportError:
from future import builtins
if sys.version_info[0] < 3:
ConnectionError = OSError
class NewsPleaseLauncher(object):
"""
This class is supposed to be called initially to start all processes. It
sets up and manages all crawlers.
"""
python_command = None
crawlers = []
cfg = None
log = None
cfg_directory_path = None
cfg_file_path = None
json_file_path = None
shall_resume = False
no_confirm = False
threads = []
threads_daemonized = []
crawler_list = None
daemon_list = None
shutdown = False
thread_event = None
mysql = None
postgresql = None
elasticsearch = None
number_of_active_crawlers = 0
config_directory_default_path = "~/news-please-repo/config/"
config_file_default_name = "config.cfg"
library_mode = None
__single_crawler = False
def __init__(self, cfg_directory_path, is_resume, is_reset_elasticsearch,
is_reset_json, is_reset_mysql, is_reset_postgresql, is_no_confirm, library_mode=False):
"""
The constructor of the main class, thus the real entry point to the tool.
:param cfg_file_path:
:param is_resume:
:param is_reset_elasticsearch:
:param is_reset_json:
:param is_reset_mysql:
:param is_reset_postgresql:
:param is_no_confirm:
"""
configure_logging({"LOG_LEVEL": "ERROR"})
self.log = logging.getLogger(__name__)
# other parameters
self.shall_resume = is_resume
self.no_confirm = is_no_confirm
self.library_mode = library_mode
# Sets an environmental variable called 'CColon', so scripts can import
# modules of this project in relation to this script's dir
# example: sitemap_crawler can import UrlExtractor via
# from newsplease.helper_classderes.url_extractor import UrlExtractor
os.environ['CColon'] = os.path.abspath(os.path.dirname(__file__))
# set stop handlers
self.set_stop_handler()
# threading
self.thread_event = threading.Event()
# Get & set CFG and JSON locally.
if cfg_directory_path:
# if a path was given by the user
self.cfg_directory_path = self.get_expanded_path(cfg_directory_path)
else:
# if no path was given by the user, use default
self.cfg_directory_path = self.get_expanded_path(self.config_directory_default_path)
# init cfg path if empty
self.init_config_file_path_if_empty()
self.cfg_file_path = self.cfg_directory_path + self.config_file_default_name
# config
self.cfg = CrawlerConfig.get_instance()
self.cfg.setup(self.cfg_file_path)
self.mysql = self.cfg.section("MySQL")
self.postgresql = self.cfg.section("Postgresql")
self.elasticsearch = self.cfg.section("Elasticsearch")
# perform reset if given as parameter
if is_reset_mysql:
self.reset_mysql()
if is_reset_postgresql:
self.reset_postgresql()
if is_reset_json:
self.reset_files()
if is_reset_elasticsearch:
self.reset_elasticsearch()
# close the process
if is_reset_elasticsearch or is_reset_json or is_reset_mysql or is_reset_postgresql:
sys.exit(0)
self.json_file_path = self.cfg_directory_path + self.cfg.section('Files')['url_input_file_name']
self.json = JsonConfig.get_instance()
self.json.setup(self.json_file_path)
self.crawler_list = self.CrawlerList()
self.daemon_list = self.DaemonList()
self.__single_crawler = self.get_abs_file_path("./single_crawler.py", True, False)
self.manage_crawlers()
def set_stop_handler(self):
"""
Initializes functions that are invoked when the user or OS wants to kill this process.
:return:
"""
signal.signal(signal.SIGTERM, self.graceful_stop)
signal.signal(signal.SIGABRT, self.graceful_stop)
signal.signal(signal.SIGINT, self.graceful_stop)
@staticmethod
def has_arg(string):
"""
Determines if the string passed to this method was passed to the
script.
:param str string: string to test
:rtype: bool
"""
return len([arg for arg in sys.argv if arg == string]) != 0
def manage_crawlers(self):
"""
Manages all crawlers, threads and limites the number of parallel
running threads.
"""
sites = self.json.get_site_objects()
for index, site in enumerate(sites):
if "daemonize" in site:
self.daemon_list.add_daemon(index, site["daemonize"])
elif "additional_rss_daemon" in site:
self.daemon_list.add_daemon(index,
site["additional_rss_daemon"])
self.crawler_list.append_item(index)
else:
self.crawler_list.append_item(index)
num_threads = self.cfg.section('Crawler')[
'number_of_parallel_crawlers']
if self.crawler_list.len() < num_threads:
num_threads = self.crawler_list.len()
for _ in range(num_threads):
thread = threading.Thread(target=self.manage_crawler,
args=(),
kwargs={})
self.threads.append(thread)
thread.start()
num_daemons = self.cfg.section('Crawler')['number_of_parallel_daemons']
if self.daemon_list.len() < num_daemons:
num_daemons = self.daemon_list.len()
for _ in range(num_daemons):
thread_daemonized = threading.Thread(target=self.manage_daemon,
args=(),
kwargs={})
self.threads_daemonized.append(thread_daemonized)
thread_daemonized.start()
while not self.shutdown:
try:
time.sleep(10)
# if we are not in daemon mode and no crawler is running any longer,
# all articles have been crawled and the tool can shut down
if self.daemon_list.len() == 0 and self.number_of_active_crawlers == 0:
self.graceful_stop()
break
except IOError:
# This exception will only occur on kill-process on windows.
# The process should be killed, thus this exception is
# irrelevant.
pass
def manage_crawler(self):
"""
Manages a normal crawler thread.
When a crawler finished, it loads another one if there are still sites
to crawl.
"""
index = True
self.number_of_active_crawlers += 1
while not self.shutdown and index is not None:
index = self.crawler_list.get_next_item()
if index is None:
self.number_of_active_crawlers -= 1
break
self.start_crawler(index)
def manage_daemon(self):
"""
Manages a daemonized crawler thread.
Once a crawler it finished, it loads the next one.
"""
while not self.shutdown:
# next scheduled daemon, tuple (time, index)
item = self.daemon_list.get_next_item()
cur = time.time()
pajama_time = item[0] - cur
if pajama_time > 0:
self.thread_event.wait(pajama_time)
if not self.shutdown:
self.start_crawler(item[1], daemonize=True)
def start_crawler(self, index, daemonize=False):
"""
Starts a crawler from the input-array.
:param int index: The array-index of the site
:param int daemonize: Bool if the crawler is supposed to be daemonized
(to delete the JOBDIR)
"""
call_process = [sys.executable,
self.__single_crawler,
self.cfg_file_path,
self.json_file_path,
"%s" % index,
"%s" % self.shall_resume,
"%s" % daemonize]
self.log.debug("Calling Process: %s", call_process)
crawler = Popen(call_process,
stderr=None,
stdout=None)
crawler.communicate()
self.crawlers.append(crawler)
def graceful_stop(self, signal_number=None, stack_frame=None):
"""
This function will be called when a graceful-stop is initiated.
"""
stop_msg = "Hard" if self.shutdown else "Graceful"
if signal_number is None:
self.log.info("%s stop called manually. "
"Shutting down.", stop_msg)
else:
self.log.info("%s stop called by signal #%s. Shutting down."
"Stack Frame: %s",
stop_msg, signal_number, stack_frame)
self.shutdown = True
self.crawler_list.stop()
self.daemon_list.stop()
self.thread_event.set()
return True
def init_config_file_path_if_empty(self):
"""
if the config file path does not exist, this function will initialize the path with a default
config file
:return
"""
if os.path.exists(self.cfg_directory_path):
return
user_choice = 'n'
if self.no_confirm:
user_choice = 'y'
else:
sys.stdout.write(
"Config directory does not exist at '" + os.path.abspath(self.cfg_directory_path) + "'. "
+ "Should a default configuration be created at this path? [Y/n] ")
if sys.version_info[0] < 3:
user_choice = raw_input()
else:
user_choice = input()
user_choice = user_choice.lower().replace("yes", "y").replace("no", "n")
if not user_choice or user_choice == '': # the default is yes
user_choice = "y"
if "y" not in user_choice and "n" not in user_choice:
sys.stderr.write("Wrong input, aborting.")
sys.exit(1)
if "n" in user_choice:
sys.stdout.write("Config file will not be created. Terminating.")
sys.exit(1)
# copy the default config file to the new path
copy_tree(os.environ['CColon'] + os.path.sep + 'config', self.cfg_directory_path)
return
def get_expanded_path(self, path):
"""
expands a path that starts with an ~ to an absolute path
:param path:
:return:
"""
if path.startswith('~'):
return os.path.expanduser('~') + path[1:]
else:
return path
def get_abs_file_path(self, rel_file_path,
quit_on_error=None, check_relative_to_path=True):
"""
Returns the absolute file path of the given [relative] file path
to either this script or to the config file.
May throw a RuntimeError if quit_on_error is True.
:param str rel_file_path: relative file path
:param bool quit_on_error: determines if the script may throw an
exception
:return str: absolute file path of the given relative file path
:raises RuntimeError: if the file path does not exist and
quit_on_error is True
"""
if self.cfg_file_path is not None and \
check_relative_to_path and \
not self.cfg.section('Files')['relative_to_start_processes_file']:
script_dir = os.path.dirname(self.cfg_file_path)
else:
# absolute dir this script is in
script_dir = os.path.dirname(__file__)
abs_file_path = os.path.abspath(
os.path.join(script_dir, rel_file_path))
if not os.path.exists(abs_file_path):
self.log.error(abs_file_path + " does not exist.")
if quit_on_error is True:
raise RuntimeError("Imported file not found. Quit.")
return abs_file_path
def reset_mysql(self):
"""
Resets the MySQL database.
"""
confirm = self.no_confirm
print("""
Cleanup MySQL database:
This will truncate all tables and reset the whole database.
""")
if not confirm:
confirm = 'yes' in builtins.input(
"""
Do you really want to do this? Write 'yes' to confirm: {yes}"""
.format(yes='yes' if confirm else ''))
if not confirm:
print("Did not type yes. Thus aborting.")
return
print("Resetting database...")
try:
# initialize DB connection
self.conn = pymysql.connect(host=self.mysql["host"],
port=self.mysql["port"],
db=self.mysql["db"],
user=self.mysql["username"],
passwd=self.mysql["password"])
self.cursor = self.conn.cursor()
self.cursor.execute("TRUNCATE TABLE CurrentVersions")
self.cursor.execute("TRUNCATE TABLE ArchiveVersions")
self.conn.close()
except (pymysql.err.OperationalError, pymysql.ProgrammingError, pymysql.InternalError,
pymysql.IntegrityError, TypeError) as error:
self.log.error("Database reset error: %s", error)
def reset_postgresql(self):
"""
Resets the Postgresql database.
"""
confirm = self.no_confirm
print("""
Cleanup Postgresql database:
This will truncate all tables and reset the whole database.
""")
if not confirm:
confirm = 'yes' in builtins.input(
"""
Do you really want to do this? Write 'yes' to confirm: {yes}"""
.format(yes='yes' if confirm else ''))
if not confirm:
print("Did not type yes. Thus aborting.")
return
print("Resetting database...")
try:
# initialize DB connection
self.conn = psycopg2.connect(host=self.postgresql["host"],
port=self.postgresql["port"],
database=self.postgresql["database"],
user=self.postgresql["user"],
password=self.postgresql["password"])
self.cursor = self.conn.cursor()
self.cursor.execute("TRUNCATE TABLE CurrentVersions RESTART IDENTITY")
self.cursor.execute("TRUNCATE TABLE ArchiveVersions RESTART IDENTITY")
self.conn.commit()
self.cursor.close()
except psycopg2.DatabaseError as error:
self.log.error("Database reset error: %s", error)
finally:
if self.conn is not None:
self.conn.close()
def reset_elasticsearch(self):
"""
Resets the Elasticsearch Database.
"""
print("""
Cleanup Elasticsearch database:
This will truncate all tables and reset the whole Elasticsearch database.
""")
confirm = self.no_confirm
if not confirm:
confirm = 'yes' in builtins.input(
"""
Do you really want to do this? Write 'yes' to confirm: {yes}"""
.format(yes='yes' if confirm else ''))
if not confirm:
print("Did not type yes. Thus aborting.")
return
try:
# initialize DB connection
es = Elasticsearch([self.elasticsearch["host"]],
http_auth=(self.elasticsearch["username"], self.elasticsearch["secret"]),
port=self.elasticsearch["port"],
use_ssl=self.elasticsearch["use_ca_certificates"],
verify_certs=self.elasticsearch["use_ca_certificates"],
ca_certs=self.elasticsearch["ca_cert_path"],
client_cert=self.elasticsearch["client_cert_path"],
client_key=self.elasticsearch["client_key_path"])
print("Resetting Elasticsearch database...")
es.indices.delete(index=self.elasticsearch["index_current"], ignore=[400, 404])
es.indices.delete(index=self.elasticsearch["index_archive"], ignore=[400, 404])
except ConnectionError as error:
self.log.error("Failed to connect to Elasticsearch. "
"Please check if the database is running and the config is correct: %s" % error)
def reset_files(self):
"""
Resets the local data directory.
"""
confirm = self.no_confirm
path = SavepathParser.get_base_path(
SavepathParser.get_abs_path_static(
self.cfg.section("Files")["local_data_directory"],
os.path.dirname(self.cfg_file_path)
)
)
print("""
Cleanup files:
This will delete {path} and all its contents.
""".format(path=path))
if not confirm:
confirm = 'yes' in builtins.input(
"""
Do you really want to do this? Write 'yes' to confirm: {yes}"""
.format(yes='yes' if confirm else ''))
if not confirm:
print("Did not type yes. Thus aborting.")
return
print("Removing: {}".format(path))
try:
shutil.rmtree(path)
except OSError as error:
if not os.path.exists(path):
self.log.error("%s does not exist.", path)
self.log.error(error)
class CrawlerList(object):
"""
Class that manages all crawlers that aren't supposed to be daemonized.
Exists to be able to use threading.Lock().
"""
lock = None
crawler_list = []
graceful_stop = False
def __init__(self):
self.lock = threading.Lock()
def append_item(self, item):
"""
Appends the given item to the crawler_list.
:param: item to append to the crawler_list.
"""
self.lock.acquire()
try:
self.crawler_list.append(item)
finally:
self.lock.release()
def len(self):
"""
Determines the number of crawler in the list.
:return int: crawler_list's length
"""
return len(self.crawler_list)
def get_next_item(self):
"""
Pops the first crawler in the list.
:return: crawler_list's first item
"""
if self.graceful_stop:
return None
self.lock.acquire()
try:
if len(self.crawler_list) > 0:
item = self.crawler_list.pop(0)
else:
item = None
finally:
self.lock.release()
return item
def stop(self):
self.graceful_stop = True
class DaemonList(object):
"""
Class that manages all crawlers that are supposed to be daemonized.
Exists to be able to use threading.Lock().
"""
lock = None
daemons = {}
queue = []
queue_times = []
graceful_stop = False
def __init__(self):
self.queue = []
self.lock = threading.Lock()
def sort_queue(self):
"""
Sorts the queue, so the tuple with the lowest index (first value)
is the first element in the array.
"""
self.queue = sorted(self.queue, key=lambda t: t[0])
self.queue_times = sorted(self.queue_times)
def len(self):
"""
Determines the number of daemonized crawlers in the list.
:return int: crawler_list's length
"""
return len(self.daemons)
def add_daemon(self, index, _time):
"""
Adds a daemon to the queue.
:param index: The index, usually the index of the site-object
:param _time: The repetition-time (every _time seconds the crawler)
starts again.
"""
self.lock.acquire()
try:
self.daemons[index] = _time
self.add_execution(time.time(), index)
finally:
self.lock.release()
def add_execution(self, _time, index):
"""
Adds an execution to the queue.
When for this particular _time an execution is already scheduled,
the time will be checked for one second later until a free slot
is found.
:param _time: The (unix)-timestamp when the crawler should be
executed.
:param index: The index, usually the index of the site-object
"""
_time = int(_time)
while _time in self.queue_times:
_time += 1
self.queue_times.append(_time)
self.queue.append((_time, index))
def get_next_item(self):
"""
Gets the next daemon-item and adds the daemon to the queue again.
(With the new scheduled time)
"""
if self.graceful_stop:
return None
self.lock.acquire()
self.sort_queue()
try:
item = self.queue.pop(0)
prev_time = self.queue_times.pop(0)
self.add_execution(
# prev + daemonize if in time, now + daemonize if in delay
max(prev_time, time.time()) + self.daemons[item[1]], item[1]
)
finally:
self.lock.release()
return item
def stop(self):
self.graceful_stop = True
@plac.annotations(
cfg_file_path=plac.Annotation('path to the config file', 'option', 'c'),
resume=plac.Annotation('resume crawling from last process', 'flag'),
reset_elasticsearch=plac.Annotation('reset Elasticsearch indexes', 'flag'),
reset_json=plac.Annotation('reset JSON files', 'flag'),
reset_mysql=plac.Annotation('reset MySQL database', 'flag'),
reset_postgresql=plac.Annotation('reset Postgresql database', 'flag'),
reset_all=plac.Annotation('combines all reset options', 'flag'),
no_confirm=plac.Annotation('skip confirm dialogs', 'flag')
)
def cli(cfg_file_path, resume, reset_elasticsearch, reset_mysql, reset_postgresql, reset_json, reset_all, no_confirm):
"A generic news crawler and extractor."
if reset_all:
reset_elasticsearch = True
reset_json = True
reset_mysql = True
reset_postgresql = True
if cfg_file_path and not cfg_file_path.endswith(os.path.sep):
cfg_file_path += os.path.sep
NewsPleaseLauncher(cfg_file_path, resume, reset_elasticsearch, reset_json, reset_mysql, reset_postgresql, no_confirm)
def main():
plac.call(cli)
if __name__ == "__main__":
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import doctest
from distutils.core import setup
from distutils.core import Command
from unittest import TextTestRunner, TestLoader
from glob import glob
from subprocess import call
from os.path import splitext, basename, join as pjoin
try:
import epydoc
has_epydoc = True
except ImportError:
has_epydoc = False
import libcloud.utils.misc
from libcloud.utils.dist import get_packages, get_data_files
libcloud.utils.misc.SHOW_DEPRECATION_WARNING = False
HTML_VIEWSOURCE_BASE = 'https://svn.apache.org/viewvc/libcloud/trunk'
PROJECT_BASE_DIR = 'http://libcloud.apache.org'
TEST_PATHS = ['libcloud/test', 'libcloud/test/common', 'libcloud/test/compute',
'libcloud/test/storage', 'libcloud/test/loadbalancer',
'libcloud/test/dns']
DOC_TEST_MODULES = ['libcloud.compute.drivers.dummy',
'libcloud.storage.drivers.dummy',
'libcloud.dns.drivers.dummy']
SUPPORTED_VERSIONS = ['2.5', '2.6', '2.7', 'PyPy', '3.x']
if sys.version_info <= (2, 4):
version = '.'.join([str(x) for x in sys.version_info[:3]])
print('Version ' + version + ' is not supported. Supported versions are ' +
', '.join(SUPPORTED_VERSIONS))
sys.exit(1)
def read_version_string():
version = None
sys.path.insert(0, pjoin(os.getcwd()))
from libcloud import __version__
version = __version__
sys.path.pop(0)
return version
class TestCommand(Command):
description = "run test suite"
user_options = []
def initialize_options(self):
THIS_DIR = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(0, THIS_DIR)
for test_path in TEST_PATHS:
sys.path.insert(0, pjoin(THIS_DIR, test_path))
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
try:
import mock
mock
except ImportError:
print('Missing "mock" library. mock is library is needed '
'to run the tests. You can install it using pip: '
'pip install mock')
sys.exit(1)
status = self._run_tests()
sys.exit(status)
def _run_tests(self):
secrets_current = pjoin(self._dir, 'libcloud/test', 'secrets.py')
secrets_dist = pjoin(self._dir, 'libcloud/test', 'secrets.py-dist')
if not os.path.isfile(secrets_current):
print("Missing " + secrets_current)
print("Maybe you forgot to copy it from -dist:")
print("cp libcloud/test/secrets.py-dist libcloud/test/secrets.py")
sys.exit(1)
mtime_current = os.path.getmtime(secrets_current)
mtime_dist = os.path.getmtime(secrets_dist)
if mtime_dist > mtime_current:
print("It looks like test/secrets.py file is out of date.")
print("Please copy the new secret.py-dist file over otherwise" +
" tests might fail")
pre_python26 = (sys.version_info[0] == 2
and sys.version_info[1] < 6)
if pre_python26:
missing = []
# test for dependencies
try:
import simplejson
simplejson # silence pyflakes
except ImportError:
missing.append("simplejson")
try:
import ssl
ssl # silence pyflakes
except ImportError:
missing.append("ssl")
if missing:
print("Missing dependencies: " + ", ".join(missing))
sys.exit(1)
testfiles = []
for test_path in TEST_PATHS:
for t in glob(pjoin(self._dir, test_path, 'test_*.py')):
testfiles.append('.'.join(
[test_path.replace('/', '.'), splitext(basename(t))[0]]))
tests = TestLoader().loadTestsFromNames(testfiles)
for test_module in DOC_TEST_MODULES:
tests.addTests(doctest.DocTestSuite(test_module))
t = TextTestRunner(verbosity=2)
res = t.run(tests)
return not res.wasSuccessful()
class Pep8Command(Command):
description = "run pep8 script"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import pep8
pep8
except ImportError:
print ('Missing "pep8" library. You can install it using pip: '
'pip install pep8')
sys.exit(1)
cwd = os.getcwd()
retcode = call(('pep8 %s/libcloud/' %
(cwd)).split(' '))
sys.exit(retcode)
class ApiDocsCommand(Command):
description = "generate API documentation"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if not has_epydoc:
raise RuntimeError('Missing "epydoc" package!')
os.system(
'pydoctor'
' --add-package=libcloud'
' --project-name=libcloud'
' --make-html'
' --html-viewsource-base="%s"'
' --project-base-dir=`pwd`'
' --project-url="%s"'
% (HTML_VIEWSOURCE_BASE, PROJECT_BASE_DIR))
class CoverageCommand(Command):
description = "run test suite and generate coverage report"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import coverage
cov = coverage.coverage(config_file='.coveragerc')
cov.start()
tc = TestCommand(self.distribution)
tc._run_tests()
cov.stop()
cov.save()
cov.html_report()
# pre-2.6 will need the ssl PyPI package
pre_python26 = (sys.version_info[0] == 2 and sys.version_info[1] < 6)
setup(
name='apache-libcloud',
version=read_version_string(),
description='A standard Python library that abstracts away differences' +
' among multiple cloud provider APIs. For more information' +
' and documentation, please see http://libcloud.apache.org',
author='Apache Software Foundation',
author_email='dev@libcloud.apache.org',
requires=([], ['ssl', 'simplejson'],)[pre_python26],
packages=get_packages('libcloud'),
package_dir={
'libcloud': 'libcloud',
},
package_data={'libcloud': get_data_files('libcloud', parent='libcloud')},
license='Apache License (2.0)',
url='http://libcloud.apache.org/',
cmdclass={
'test': TestCommand,
'pep8': Pep8Command,
'apidocs': ApiDocsCommand,
'coverage': CoverageCommand
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: Implementation :: PyPy'])
|
|
""" Functions for moksha-ctl """
import decorator
import subprocess
import os
import sys
import shutil
import psutil
# Local imports
import config
import colors as c
import utils
ctl_config = config.load_config()
pid_files = ['paster.pid', 'orbited.pid', 'moksha-hub.pid']
PRETTY_PREFIX = "[" + c.magenta("moksha-ctl") + "] "
@decorator.decorator
def _with_virtualenv(func, *args, **kwargs):
# If sys has the 'real_prefix' attribute, then we are most likely already
# running in a virtualenv, in which case we do not need to switch it up.
# http://groups.google.com/group/python-virtualenv/browse_thread/thread/e30029b2e50ae17a?pli=1
if hasattr(sys, 'real_prefix'):
return func(*args, **kwargs)
# Otherwise, we'll use the handy virtualenvcontext module to switch it up
# for us.
import virtualenvcontext
with virtualenvcontext.VirtualenvContext(ctl_config['venv']):
return func(*args, **kwargs)
@decorator.decorator
def _in_srcdir(func, *args, **kwargs):
with utils.DirectoryContext(ctl_config['moksha-src-dir']):
return func(*args, **kwargs)
@decorator.decorator
def _reporter(func, *args, **kwargs):
descriptor = ":".join([func.__name__] + [a for a in args if a])
print PRETTY_PREFIX, "Running:", descriptor
output = None
try:
output = func(*args, **kwargs)
if not output:
raise Exception
print PRETTY_PREFIX, "[ " + c.green('OK') + " ]", descriptor
except Exception as e:
print PRETTY_PREFIX, "[ " + c.red('FAIL') + " ]", descriptor,
print ' -- ', str(e)
return output
@_reporter
def bootstrap():
""" Should only be run once. First-time moksha setup. """
ret = True
if os.path.exists('/etc/redhat-release'):
reqs = [
'python-setuptools', 'python-qpid', 'qpid-cpp-server',
'python-psutil', 'ccze', # ccze is awesome
'openssl-devel',
]
ret = ret and not os.system(
'sudo yum install -q -y ' + ' '.join(reqs))
else:
# No orbited or qpid on ubuntu as far as I can tell
# TODO -- how should we work this?
ret = ret and not os.system(
'sudo apt-get install -y python-setuptools')
ret = ret and not os.system('sudo easy_install -q pip')
ret = ret and not os.system('sudo pip -q install virtualenv')
ret = ret and not os.system('sudo pip -q install virtualenvwrapper')
ret = ret and not os.system('sudo pip -q install virtualenvcontext')
ret = ret and not os.system('sudo pip -q install fabulous')
try:
os.mkdir(os.path.expanduser('~/.virtualenvs'))
except OSError as e:
if "File exists" in str(e):
pass
else:
raise e
if not os.getenv('WORKON_HOME', None):
# TODO -- auto-insert virtualenv snippet into ~/.bashrc if
# its not already there.
shellrc_snippet = """
# virtualenv stuff
export WORKON_HOME=$HOME/.virtualenvs;
source /usr/bin/virtualenvwrapper.sh;
"""
print PRETTY_PREFIX, "Ok... but,"
print "You should definitely add the following to your ~/.bashrc."
print
print "*" * 60
print shellrc_snippet
print "*" * 60
print
print "Then please re-run './moksha-ctl.py bootstrap'."
return False
else:
print PRETTY_PREFIX, "Great. Done-ski."
print "Please run './moksha-ctl.py rebuild' to continue."
return ret
def _do_virtualenvwrapper_command(cmd):
""" This is tricky, because all virtualenwrapper commands are
actually bash functions, so we can't call them like we would
other executables.
"""
print "Trying '%s'" % cmd
out, err = subprocess.Popen(
['bash', '-c', '. /usr/bin/virtualenvwrapper.sh; %s' % cmd],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
).communicate()
print out
print err
@_reporter
def rebuild():
""" Completely destroy and rebuild the virtualenv. """
try:
_do_virtualenvwrapper_command('rmvirtualenv %s' % ctl_config['venv'])
except Exception as e:
print str(e)
cmd = 'mkvirtualenv --no-site-packages %s' % ctl_config['venv']
_do_virtualenvwrapper_command(cmd)
# Do two things here:
# - remove all *.pyc that exist in srcdir.
# - remove all data/templates dirs that exist (mako caches).
for base, dirs, files in os.walk(ctl_config['moksha-src-dir']):
for fname in files:
if fname.endswith(".pyc"):
os.remove(os.path.sep.join([base, fname]))
if base.endswith('data/templates'):
shutil.rmtree(base)
return install()
@_reporter
@_with_virtualenv
def install():
""" Install moksha and all its dependencies. """
# Use a dict to track return values so we can summarize results
ret = {}
# Do the work
ret['install_hacks'] = install_hacks()
with utils.DirectoryContext(ctl_config['moksha-src-dir']):
# `install` instead of `develop` to avoid weird directory vs. egg
# namespace issues
ret['python setup.py install'] = \
not os.system('%s setup.py install' % sys.executable)
ret['install_apps'] = install_apps()
ret['link_qpid_libs'] = link_qpid_libs()
ret['develop'] = develop()
# Summarize what went wrong if anything
if not all(ret.values()):
print PRETTY_PREFIX, "Something went wrong for `install`"
for k, v in ret.iteritems():
if not v:
print PRETTY_PREFIX, " Failing because", c.yellow(k), "failed."
return all(ret.values())
@_reporter
@_with_virtualenv
def install_hacks():
""" Install dependencies with weird workarounds. """
distributions = [
'Extremes',
'tg.devtools',
'orbited',
]
# This automatically uses --use-mirrors
for dist in distributions:
print PRETTY_PREFIX, "pip installing", c.yellow(dist)
utils.install_distributions([dist])
# TODO -- test to see if the installs worked.
return True
@_reporter
@_with_virtualenv
@_in_srcdir
def install_apps():
""" Install *all* the moksha `apps`. """
with utils.DirectoryContext(ctl_config['apps-dir']):
dnames = [d for d in os.listdir('.') if os.path.isdir(d)]
for d in dnames:
install_app(app=d)
return True
@_reporter
@_with_virtualenv
def install_app(app):
""" Install a particular app. $ ./moksha-ctl.py install_app:metrics """
dirname = "/".join([ctl_config['moksha-src-dir'], ctl_config['apps-dir'], app])
with utils.DirectoryContext(dirname):
fnames = os.listdir('.')
if not 'pavement.py' in fnames:
print "No `pavement.py` found for app '%s'. Skipping." % app
return False
try:
shutil.rmtree('dist')
except OSError as e:
pass # It's cool.
base = '/'.join(sys.executable.split('/')[:-1])
cmd = '%s/paver bdist_egg > /dev/null 2>&1' % base
if os.system(cmd):
return False
cmd = '%s/easy_install -Z dist/*.egg > /dev/null 2>&1' % base
if os.system(cmd):
return False
return True
@_reporter
@_with_virtualenv
@_in_srcdir
def link_qpid_libs():
""" Link qpid and mllib in from the system site-packages. """
location = 'lib/python{major}.{minor}/site-packages'.format(
major=sys.version_info.major, minor=sys.version_info.minor)
template = 'ln -s /usr/{location}/{lib} {workon}/{venv}/{location}/'
for lib in ['qpid', 'mllib']:
cmd = template.format(
location=location, venv=ctl_config['venv'], lib=lib,
workon=os.getenv("WORKON_HOME"))
out = os.system(cmd)
# TODO -- test for success
return True
@_reporter
@_with_virtualenv
@_in_srcdir
def start(service=None):
""" Start paster, orbited, and moksha-hub. """
def start_service(name):
print PRETTY_PREFIX, "Starting " + c.yellow(name)
return not os.system('.scripts/start-{name} {venv}'.format(
name=name, venv=ctl_config['venv']))
ret = True
if service:
pid_file = service + '.pid'
if os.path.exists(pid_file):
raise ValueError("%s file exists" % pid_file)
ret = ret and start_service(name=service)
else:
if any(map(os.path.exists, pid_files)):
raise ValueError("some .pid file exists")
ret = ret and start_service(name='paster')
ret = ret and start_service(name='orbited')
ret = ret and start_service(name='moksha-hub')
print " * Log files are in logs/<service>.log. Run:"
print " $ ./moksha-ctl.py logs"
return ret
@_reporter
@_with_virtualenv
@_in_srcdir
def stop(service=None):
""" Stop paster, orbited, and moksha-hub. """
def stopfail(msg):
print PRETTY_PREFIX + " [ " + c.red('FAIL') + " ]", msg
def stopwin(msg):
print PRETTY_PREFIX + " [ " + c.green('OK') + " ]", msg
_pid_files = pid_files
if service:
_pid_files = [service + '.pid']
ret = True
processes = psutil.get_process_list()
for fname in _pid_files:
if not os.path.exists(fname):
stopfail(fname + " does not exist.")
continue
pid = None
try:
with open(fname) as f:
pid = int(f.read())
except IOError:
stopfail(fname + " does not exist.")
ret = False
continue
except ValueError:
stopfail(fname + " is corrupt.")
ret = False
continue
instances = [p for p in processes if p.pid == pid]
if len(instances) == 0:
stopfail("No such process with pid: " + str(pid))
ret = False
os.remove(fname)
continue
proc = instances[0]
result = proc.kill()
stopwin("Killed %i %s" % (proc.pid, proc.name))
os.remove(fname)
return ret
@_reporter
@_with_virtualenv
@_in_srcdir
def develop():
""" `python setup.py develop` """
ret = True
ret = ret and not os.system('%s setup.py develop' % sys.executable)
ret = ret and not os.system('%s setup.py install' % sys.executable)
return ret
@_reporter
@_with_virtualenv
def restart():
""" Stop, `python setup.py develop`, start. """
stop() # We don't care if this fa
develop()
return start()
@_reporter
@_with_virtualenv
def egg_info():
""" Rebuild egg_info. """
with utils.DirectoryContext(ctl_config['moksha-src-dir']):
os.system('%s setup.py egg_info' % sys.executable)
# No decorators here
def logs():
""" Watch colorized logs of paster, orbited, and moksha-hub """
log_location = 'logs'
log_files = ['paster.log', 'orbited.log', 'moksha-hub.log']
with utils.DirectoryContext(ctl_config['moksha-src-dir']):
cmd = 'tail -f %s | ccze' % ' '.join([
log_location + '/' + fname for fname in log_files
])
print PRETTY_PREFIX, "Running '", cmd, "'"
os.system(cmd)
# --
# Below here follows the *giant* 'wtf' block. Add things to it as necessary.
# --
WTF_PREFIX = PRETTY_PREFIX + "[" + c.magenta('wtf') + "]"
def _wtfwin(msg):
print WTF_PREFIX, "[ " + c.green('OK') + " ]", msg
def _wtffail(msg):
print WTF_PREFIX, "[ " + c.red('FAIL') + " ]", msg
@_in_srcdir
def wtf():
import virtualenvcontext
""" Debug a busted moksha environment. """
wtfwin, wtffail = _wtfwin, _wtffail
wtfwin(' venv is set to "%s"' % ctl_config['venv'])
workon = os.getenv('WORKON_HOME')
if not workon:
wtffail('$WORKON_HOME is not set.')
else:
wtfwin('$WORKON_HOME is set to ' + workon)
if os.path.exists(os.path.expanduser(workon)):
wtfwin(workon + ' exists.')
else:
wtffail(workon + ' does not exist.')
with virtualenvcontext.VirtualenvContext(ctl_config['venv']):
try:
import qpid
if not qpid.__file__.startswith(os.path.expanduser(workon)):
raise ImportError
wtfwin('virtualenv python-qpid is installed.')
except Exception as e:
wtffail('virtualenv python-qpid not installed.')
try:
import qpid
if not qpid.__file__.startswith('/usr/'):
raise ImportError
wtfwin('system-wide python-qpid is installed.')
except ImportError as e:
wtffail('system-wide python-qpid not installed.')
with virtualenvcontext.VirtualenvContext(ctl_config['venv']):
all_processes = psutil.get_process_list()
for pid_file in pid_files:
prog = pid_file[:-4]
instances = [p for p in all_processes if prog in p.name]
pid = None
try:
with open(pid_file) as f:
pid = int(f.read())
except IOError:
wtffail(pid_file + ' does not exist.')
except ValueError:
wtffail(pid_file + ' is corrupt.')
if not psutil.pid_exists(pid):
if pid and len(instances) == 0:
wtffail(prog + ' is not running BUT it has a pid file!')
elif len(instances) != 0:
wtffail(prog + " appears to be running, " +
"but pidfile doesn't match")
else:
wtffail(prog + ' is not running.')
else:
if len(instances) > 1:
wtffail(prog + ' has multiple instances running.')
elif len(instances) == 0:
wtffail(prog + ' not running. ' +
'But pidfile points to ANOTHER process!')
elif instances[0].pid != pid:
wtffail('pid of ' + prog + " doesn't match pid-file")
else:
wtfwin(prog + ' is running and healthy.')
|
|
import os
from django.utils import six
from datetime import datetime, date
import logging
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from django.conf import settings
from django.contrib.staticfiles.finders import find
from django.core.cache import cache
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.core.files.base import File
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import Sum, Count
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
from django.utils.functional import cached_property
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from treebeard.mp_tree import MP_Node
from oscar.core.decorators import deprecated
from oscar.core.utils import slugify
from oscar.core.validators import non_python_keyword
from oscar.core.loading import get_classes, get_model, get_class
from oscar.models.fields import NullCharField, AutoSlugField
ProductManager, BrowsableProductManager = get_classes(
'catalogue.managers', ['ProductManager', 'BrowsableProductManager'])
Selector = get_class('partner.strategy', 'Selector')
@python_2_unicode_compatible
class AbstractProductClass(models.Model):
"""
Used for defining options and attributes for a subset of products.
E.g. Books, DVDs and Toys. A product can only belong to one product class.
At least one product class must be created when setting up a new
Oscar deployment.
Not necessarily equivalent to top-level categories but usually will be.
"""
name = models.CharField(_('Name'), max_length=128)
slug = AutoSlugField(_('Slug'), max_length=128, unique=True,
populate_from='name')
#: Some product type don't require shipping (eg digital products) - we use
#: this field to take some shortcuts in the checkout.
requires_shipping = models.BooleanField(_("Requires shipping?"),
default=True)
#: Digital products generally don't require their stock levels to be
#: tracked.
track_stock = models.BooleanField(_("Track stock levels?"), default=True)
#: These are the options (set by the user when they add to basket) for this
#: item class. For instance, a product class of "SMS message" would always
#: require a message to be specified before it could be bought.
#: Note that you can also set options on a per-product level.
options = models.ManyToManyField(
'catalogue.Option', blank=True, verbose_name=_("Options"))
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['name']
verbose_name = _("Product class")
verbose_name_plural = _("Product classes")
def __str__(self):
return self.name
@property
def has_attributes(self):
return self.attributes.exists()
@python_2_unicode_compatible
class AbstractCategory(MP_Node):
"""
A product category. Merely used for navigational purposes; has no
effects on business logic.
Uses django-treebeard.
"""
name = models.CharField(_('Name'), max_length=255, db_index=True)
description = models.TextField(_('Description'), blank=True)
image = models.ImageField(_('Image'), upload_to='categories', blank=True,
null=True, max_length=255)
slug = models.SlugField(_('Slug'), max_length=255, db_index=True)
_slug_separator = '/'
_full_name_separator = ' > '
def __str__(self):
return self.full_name
@property
def full_name(self):
"""
Returns a string representation of the category and it's ancestors,
e.g. 'Books > Non-fiction > Essential programming'.
It's rarely used in Oscar's codebase, but used to be stored as a
CharField and is hence kept for backwards compatibility. It's also
sufficiently useful to keep around.
"""
names = [category.name for category in self.get_ancestors_and_self()]
return self._full_name_separator.join(names)
@property
def full_slug(self):
"""
Returns a string of this category's slug concatenated with the slugs
of it's ancestors, e.g. 'books/non-fiction/essential-programming'.
Oscar used to store this as in the 'slug' model field, but this field
has been re-purposed to only store this category's slug and to not
include it's ancestors' slugs.
"""
slugs = [category.slug for category in self.get_ancestors_and_self()]
return self._slug_separator.join(slugs)
def generate_slug(self):
"""
Generates a slug for a category. This makes no attempt at generating
a unique slug.
"""
return slugify(self.name)
def ensure_slug_uniqueness(self):
"""
Ensures that the category's slug is unique amongst it's siblings.
This is inefficient and probably not thread-safe.
"""
unique_slug = self.slug
siblings = self.get_siblings().exclude(pk=self.pk)
next_num = 2
while siblings.filter(slug=unique_slug).exists():
unique_slug = '{slug}_{end}'.format(slug=self.slug, end=next_num)
next_num += 1
if unique_slug != self.slug:
self.slug = unique_slug
self.save()
def save(self, *args, **kwargs):
"""
Oscar traditionally auto-generated slugs from names. As that is
often convenient, we still do so if a slug is not supplied through
other means. If you want to control slug creation, just create
instances with a slug already set, or expose a field on the
appropriate forms.
"""
if self.slug:
# Slug was supplied. Hands off!
super(AbstractCategory, self).save(*args, **kwargs)
else:
self.slug = self.generate_slug()
super(AbstractCategory, self).save(*args, **kwargs)
# We auto-generated a slug, so we need to make sure that it's
# unique. As we need to be able to inspect the category's siblings
# for that, we need to wait until the instance is saved. We
# update the slug and save again if necessary.
self.ensure_slug_uniqueness()
def get_ancestors_and_self(self):
"""
Gets ancestors and includes itself. Use treebeard's get_ancestors
if you don't want to include the category itself. It's a separate
function as it's commonly used in templates.
"""
return list(self.get_ancestors()) + [self]
def get_descendants_and_self(self):
"""
Gets descendants and includes itself. Use treebeard's get_descendants
if you don't want to include the category itself. It's a separate
function as it's commonly used in templates.
"""
return list(self.get_descendants()) + [self]
def get_absolute_url(self):
"""
Our URL scheme means we have to look up the category's ancestors. As
that is a bit more expensive, we cache the generated URL. That is
safe even for a stale cache, as the default implementation of
ProductCategoryView does the lookup via primary key anyway. But if
you change that logic, you'll have to reconsider the caching
approach.
"""
cache_key = 'CATEGORY_URL_%s' % self.pk
url = cache.get(cache_key)
if not url:
url = reverse(
'catalogue:category',
kwargs={'category_slug': self.full_slug, 'pk': self.pk})
cache.set(cache_key, url)
return url
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['path']
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def has_children(self):
return self.get_num_children() > 0
def get_num_children(self):
return self.get_children().count()
@python_2_unicode_compatible
class AbstractProductCategory(models.Model):
"""
Joining model between products and categories. Exists to allow customising.
"""
product = models.ForeignKey('catalogue.Product', verbose_name=_("Product"))
category = models.ForeignKey('catalogue.Category',
verbose_name=_("Category"))
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['product', 'category']
unique_together = ('product', 'category')
verbose_name = _('Product category')
verbose_name_plural = _('Product categories')
def __str__(self):
return u"<productcategory for product '%s'>" % self.product
@python_2_unicode_compatible
class AbstractProduct(models.Model):
"""
The base product object
There's three kinds of products; they're distinguished by the structure
field.
- A stand alone product. Regular product that lives by itself.
- A child product. All child products have a parent product. They're a
specific version of the parent.
- A parent product. It essentially represents a set of products.
An example could be a yoga course, which is a parent product. The different
times/locations of the courses would be associated with the child products.
"""
STANDALONE, PARENT, CHILD = 'standalone', 'parent', 'child'
STRUCTURE_CHOICES = (
(STANDALONE, _('Stand-alone product')),
(PARENT, _('Parent product')),
(CHILD, _('Child product'))
)
structure = models.CharField(
_("Product structure"), max_length=10, choices=STRUCTURE_CHOICES,
default=STANDALONE)
upc = NullCharField(
_("UPC"), max_length=64, blank=True, null=True, unique=True,
help_text=_("Universal Product Code (UPC) is an identifier for "
"a product which is not specific to a particular "
" supplier. Eg an ISBN for a book."))
parent = models.ForeignKey(
'self', null=True, blank=True, related_name='children',
verbose_name=_("Parent product"),
help_text=_("Only choose a parent product if you're creating a child "
"product. For example if this is a size "
"4 of a particular t-shirt. Leave blank if this is a "
"stand-alone product (i.e. there is only one version of"
" this product)."))
# Title is mandatory for canonical products but optional for child products
title = models.CharField(pgettext_lazy(u'Product title', u'Title'),
max_length=255, blank=True)
slug = models.SlugField(_('Slug'), max_length=255, unique=False)
description = models.TextField(_('Description'), blank=True)
#: "Kind" of product, e.g. T-Shirt, Book, etc.
#: None for child products, they inherit their parent's product class
product_class = models.ForeignKey(
'catalogue.ProductClass', null=True, blank=True, on_delete=models.PROTECT,
verbose_name=_('Product type'), related_name="products",
help_text=_("Choose what type of product this is"))
attributes = models.ManyToManyField(
'catalogue.ProductAttribute',
through='ProductAttributeValue',
verbose_name=_("Attributes"),
help_text=_("A product attribute is something that this product may "
"have, such as a size, as specified by its class"))
#: It's possible to have options product class-wide, and per product.
product_options = models.ManyToManyField(
'catalogue.Option', blank=True, verbose_name=_("Product options"),
help_text=_("Options are values that can be associated with a item "
"when it is added to a customer's basket. This could be "
"something like a personalised message to be printed on "
"a T-shirt."))
recommended_products = models.ManyToManyField(
'catalogue.Product', through='ProductRecommendation', blank=True,
verbose_name=_("Recommended products"),
help_text=_("These are products that are recommended to accompany the "
"main product."))
# Denormalised product rating - used by reviews app.
# Product has no ratings if rating is None
rating = models.FloatField(_('Rating'), null=True, editable=False)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
# This field is used by Haystack to reindex search
date_updated = models.DateTimeField(
_("Date updated"), auto_now=True, db_index=True)
categories = models.ManyToManyField(
'catalogue.Category', through='ProductCategory',
verbose_name=_("Categories"))
#: Determines if a product may be used in an offer. It is illegal to
#: discount some types of product (e.g. ebooks) and this field helps
#: merchants from avoiding discounting such products
#: Note that this flag is ignored for child products; they inherit from
#: the parent product.
is_discountable = models.BooleanField(
_("Is discountable?"), default=True, help_text=_(
"This flag indicates if this product can be used in an offer "
"or not"))
objects = ProductManager()
browsable = BrowsableProductManager()
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['-date_created']
verbose_name = _('Product')
verbose_name_plural = _('Products')
def __init__(self, *args, **kwargs):
super(AbstractProduct, self).__init__(*args, **kwargs)
self.attr = ProductAttributesContainer(product=self)
def __str__(self):
if self.title:
return self.title
if self.attribute_summary:
return u"%s (%s)" % (self.get_title(), self.attribute_summary)
else:
return self.get_title()
def get_absolute_url(self):
"""
Return a product's absolute url
"""
return reverse('catalogue:detail',
kwargs={'product_slug': self.slug, 'pk': self.id})
def clean(self):
"""
Validate a product. Those are the rules:
+---------------+-------------+--------------+--------------+
| | stand alone | parent | child |
+---------------+-------------+--------------+--------------+
| title | required | required | optional |
+---------------+-------------+--------------+--------------+
| product class | required | required | must be None |
+---------------+-------------+--------------+--------------+
| parent | forbidden | forbidden | required |
+---------------+-------------+--------------+--------------+
| stockrecords | 0 or more | forbidden | 0 or more |
+---------------+-------------+--------------+--------------+
| categories | 1 or more | 1 or more | forbidden |
+---------------+-------------+--------------+--------------+
| attributes | optional | optional | optional |
+---------------+-------------+--------------+--------------+
| rec. products | optional | optional | unsupported |
+---------------+-------------+--------------+--------------+
| options | optional | optional | forbidden |
+---------------+-------------+--------------+--------------+
Because the validation logic is quite complex, validation is delegated
to the sub method appropriate for the product's structure.
"""
getattr(self, '_clean_%s' % self.structure)()
if not self.is_parent:
self.attr.validate_attributes()
def _clean_standalone(self):
"""
Validates a stand-alone product
"""
if not self.title:
raise ValidationError(_("Your product must have a title."))
if not self.product_class:
raise ValidationError(_("Your product must have a product class."))
if self.parent_id:
raise ValidationError(_("Only child products can have a parent."))
def _clean_child(self):
"""
Validates a child product
"""
if not self.parent_id:
raise ValidationError(_("A child product needs a parent."))
if self.parent_id and not self.parent.is_parent:
raise ValidationError(
_("You can only assign child products to parent products."))
if self.product_class:
raise ValidationError(
_("A child product can't have a product class."))
if self.pk and self.categories.exists():
raise ValidationError(
_("A child product can't have a category assigned."))
# Note that we only forbid options on product level
if self.pk and self.product_options.exists():
raise ValidationError(
_("A child product can't have options."))
def _clean_parent(self):
"""
Validates a parent product.
"""
self._clean_standalone()
if self.has_stockrecords:
raise ValidationError(
_("A parent product can't have stockrecords."))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.get_title())
super(AbstractProduct, self).save(*args, **kwargs)
self.attr.save()
# Properties
@property
def is_standalone(self):
return self.structure == self.STANDALONE
@property
def is_parent(self):
return self.structure == self.PARENT
@property
def is_child(self):
return self.structure == self.CHILD
def can_be_parent(self, give_reason=False):
"""
Helps decide if a the product can be turned into a parent product.
"""
reason = None
if self.is_child:
reason = _('The specified parent product is a child product.')
if self.has_stockrecords:
reason = _(
"One can't add a child product to a product with stock"
" records.")
is_valid = reason is None
if give_reason:
return is_valid, reason
else:
return is_valid
@property
def options(self):
"""
Returns a set of all valid options for this product.
It's possible to have options product class-wide, and per product.
"""
pclass_options = self.get_product_class().options.all()
return set(pclass_options) or set(self.product_options.all())
@property
def is_shipping_required(self):
return self.get_product_class().requires_shipping
@property
def has_stockrecords(self):
"""
Test if this product has any stockrecords
"""
return self.stockrecords.exists()
@property
def num_stockrecords(self):
return self.stockrecords.count()
@property
def attribute_summary(self):
"""
Return a string of all of a product's attributes
"""
attributes = self.attribute_values.all()
pairs = [attribute.summary() for attribute in attributes]
return ", ".join(pairs)
# The two properties below are deprecated because determining minimum
# price is not as trivial as it sounds considering multiple stockrecords,
# currencies, tax, etc.
# The current implementation is very naive and only works for a limited
# set of use cases.
# At the very least, we should pass in the request and
# user. Hence, it's best done as an extension to a Strategy class.
# Once that is accomplished, these properties should be removed.
@property
@deprecated
def min_child_price_incl_tax(self):
"""
Return minimum child product price including tax.
"""
return self._min_child_price('incl_tax')
@property
@deprecated
def min_child_price_excl_tax(self):
"""
Return minimum child product price excluding tax.
This is a very naive approach; see the deprecation notice above. And
only use it for display purposes (e.g. "new Oscar shirt, prices
starting from $9.50").
"""
return self._min_child_price('excl_tax')
def _min_child_price(self, prop):
"""
Return minimum child product price.
This is for visual purposes only. It ignores currencies, most of the
Strategy logic for selecting stockrecords, knows nothing about the
current user or request, etc. It's only here to ensure
backwards-compatibility; the previous implementation wasn't any
better.
"""
strategy = Selector().strategy()
children_stock = strategy.select_children_stockrecords(self)
prices = [
strategy.pricing_policy(child, stockrecord)
for child, stockrecord in children_stock]
raw_prices = sorted([getattr(price, prop) for price in prices])
return raw_prices[0] if raw_prices else None
# Wrappers for child products
def get_title(self):
"""
Return a product's title or it's parent's title if it has no title
"""
title = self.title
if not title and self.parent_id:
title = self.parent.title
return title
get_title.short_description = pgettext_lazy(u"Product title", u"Title")
def get_product_class(self):
"""
Return a product's item class. Child products inherit their parent's.
"""
if self.is_child:
return self.parent.product_class
else:
return self.product_class
get_product_class.short_description = _("Product class")
def get_is_discountable(self):
"""
At the moment, is_discountable can't be set individually for child
products; they inherit it from their parent.
"""
if self.is_child:
return self.parent.is_discountable
else:
return self.is_discountable
def get_categories(self):
"""
Return a product's categories or parent's if there is a parent product.
"""
if self.is_child:
return self.parent.categories
else:
return self.categories
get_categories.short_description = _("Categories")
# Images
def get_missing_image(self):
"""
Returns a missing image object.
"""
# This class should have a 'name' property so it mimics the Django file
# field.
return MissingProductImage()
def primary_image(self):
"""
Returns the primary image for a product. Usually used when one can
only display one product image, e.g. in a list of products.
"""
images = self.images.all()
ordering = self.images.model.Meta.ordering
if not ordering or ordering[0] != 'display_order':
# Only apply order_by() if a custom model doesn't use default
# ordering. Applying order_by() busts the prefetch cache of
# the ProductManager
images = images.order_by('display_order')
try:
return images[0]
except IndexError:
# We return a dict with fields that mirror the key properties of
# the ProductImage class so this missing image can be used
# interchangeably in templates. Strategy pattern ftw!
return {
'original': self.get_missing_image(),
'caption': '',
'is_missing': True}
# Updating methods
def update_rating(self):
"""
Recalculate rating field
"""
self.rating = self.calculate_rating()
self.save()
update_rating.alters_data = True
def calculate_rating(self):
"""
Calculate rating value
"""
result = self.reviews.filter(
status=self.reviews.model.APPROVED
).aggregate(
sum=Sum('score'), count=Count('id'))
reviews_sum = result['sum'] or 0
reviews_count = result['count'] or 0
rating = None
if reviews_count > 0:
rating = float(reviews_sum) / reviews_count
return rating
def has_review_by(self, user):
if user.is_anonymous():
return False
return self.reviews.filter(user=user).exists()
def is_review_permitted(self, user):
"""
Determines whether a user may add a review on this product.
Default implementation respects OSCAR_ALLOW_ANON_REVIEWS and only
allows leaving one review per user and product.
Override this if you want to alter the default behaviour; e.g. enforce
that a user purchased the product to be allowed to leave a review.
"""
if user.is_authenticated() or settings.OSCAR_ALLOW_ANON_REVIEWS:
return not self.has_review_by(user)
else:
return False
@cached_property
def num_approved_reviews(self):
return self.reviews.filter(
status=self.reviews.model.APPROVED).count()
class AbstractProductRecommendation(models.Model):
"""
'Through' model for product recommendations
"""
primary = models.ForeignKey(
'catalogue.Product', related_name='primary_recommendations',
verbose_name=_("Primary product"))
recommendation = models.ForeignKey(
'catalogue.Product', verbose_name=_("Recommended product"))
ranking = models.PositiveSmallIntegerField(
_('Ranking'), default=0,
help_text=_('Determines order of the products. A product with a higher'
' value will appear before one with a lower ranking.'))
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['primary', '-ranking']
unique_together = ('primary', 'recommendation')
verbose_name = _('Product recommendation')
verbose_name_plural = _('Product recomendations')
class ProductAttributesContainer(object):
"""
Stolen liberally from django-eav, but simplified to be product-specific
To set attributes on a product, use the `attr` attribute:
product.attr.weight = 125
"""
def __setstate__(self, state):
self.__dict__ = state
self.initialised = False
def __init__(self, product):
self.product = product
self.initialised = False
def __getattr__(self, name):
if not name.startswith('_') and not self.initialised:
values = self.get_values().select_related('attribute')
for v in values:
setattr(self, v.attribute.code, v.value)
self.initialised = True
return getattr(self, name)
raise AttributeError(
_("%(obj)s has no attribute named '%(attr)s'") % {
'obj': self.product.get_product_class(), 'attr': name})
def validate_attributes(self):
for attribute in self.get_all_attributes():
value = getattr(self, attribute.code, None)
if value is None:
if attribute.required:
raise ValidationError(
_("%(attr)s attribute cannot be blank") %
{'attr': attribute.code})
else:
try:
attribute.validate_value(value)
except ValidationError as e:
raise ValidationError(
_("%(attr)s attribute %(err)s") %
{'attr': attribute.code, 'err': e})
def get_values(self):
return self.product.attribute_values.all()
def get_value_by_attribute(self, attribute):
return self.get_values().get(attribute=attribute)
def get_all_attributes(self):
return self.product.get_product_class().attributes.all()
def get_attribute_by_code(self, code):
return self.get_all_attributes().get(code=code)
def __iter__(self):
return iter(self.get_values())
def save(self):
for attribute in self.get_all_attributes():
if hasattr(self, attribute.code):
value = getattr(self, attribute.code)
attribute.save_value(self.product, value)
@python_2_unicode_compatible
class AbstractProductAttribute(models.Model):
"""
Defines an attribute for a product class. (For example, number_of_pages for
a 'book' class)
"""
product_class = models.ForeignKey(
'catalogue.ProductClass', related_name='attributes', blank=True,
null=True, verbose_name=_("Product type"))
name = models.CharField(_('Name'), max_length=128)
code = models.SlugField(
_('Code'), max_length=128,
validators=[
RegexValidator(
regex=r'^[a-zA-Z_][0-9a-zA-Z_]*$',
message=_(
"Code can only contain the letters a-z, A-Z, digits, "
"and underscores, and can't start with a digit")),
non_python_keyword
])
# Attribute types
TEXT = "text"
INTEGER = "integer"
BOOLEAN = "boolean"
FLOAT = "float"
RICHTEXT = "richtext"
DATE = "date"
OPTION = "option"
ENTITY = "entity"
FILE = "file"
IMAGE = "image"
TYPE_CHOICES = (
(TEXT, _("Text")),
(INTEGER, _("Integer")),
(BOOLEAN, _("True / False")),
(FLOAT, _("Float")),
(RICHTEXT, _("Rich Text")),
(DATE, _("Date")),
(OPTION, _("Option")),
(ENTITY, _("Entity")),
(FILE, _("File")),
(IMAGE, _("Image")),
)
type = models.CharField(
choices=TYPE_CHOICES, default=TYPE_CHOICES[0][0],
max_length=20, verbose_name=_("Type"))
option_group = models.ForeignKey(
'catalogue.AttributeOptionGroup', blank=True, null=True,
verbose_name=_("Option Group"),
help_text=_('Select an option group if using type "Option"'))
required = models.BooleanField(_('Required'), default=False)
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['code']
verbose_name = _('Product attribute')
verbose_name_plural = _('Product attributes')
@property
def is_option(self):
return self.type == self.OPTION
@property
def is_file(self):
return self.type in [self.FILE, self.IMAGE]
def __str__(self):
return self.name
def save_value(self, product, value):
ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
try:
value_obj = product.attribute_values.get(attribute=self)
except ProductAttributeValue.DoesNotExist:
# FileField uses False for announcing deletion of the file
# not creating a new value
delete_file = self.is_file and value is False
if value is None or value == '' or delete_file:
return
value_obj = ProductAttributeValue.objects.create(
product=product, attribute=self)
if self.is_file:
# File fields in Django are treated differently, see
# django.db.models.fields.FileField and method save_form_data
if value is None:
# No change
return
elif value is False:
# Delete file
value_obj.delete()
else:
# New uploaded file
value_obj.value = value
value_obj.save()
else:
if value is None or value == '':
value_obj.delete()
return
if value != value_obj.value:
value_obj.value = value
value_obj.save()
def validate_value(self, value):
validator = getattr(self, '_validate_%s' % self.type)
validator(value)
# Validators
def _validate_text(self, value):
if not isinstance(value, six.string_types):
raise ValidationError(_("Must be str or unicode"))
_validate_richtext = _validate_text
def _validate_float(self, value):
try:
float(value)
except ValueError:
raise ValidationError(_("Must be a float"))
def _validate_integer(self, value):
try:
int(value)
except ValueError:
raise ValidationError(_("Must be an integer"))
def _validate_date(self, value):
if not (isinstance(value, datetime) or isinstance(value, date)):
raise ValidationError(_("Must be a date or datetime"))
def _validate_boolean(self, value):
if not type(value) == bool:
raise ValidationError(_("Must be a boolean"))
def _validate_entity(self, value):
if not isinstance(value, models.Model):
raise ValidationError(_("Must be a model instance"))
def _validate_option(self, value):
if not isinstance(value, get_model('catalogue', 'AttributeOption')):
raise ValidationError(
_("Must be an AttributeOption model object instance"))
if not value.pk:
raise ValidationError(_("AttributeOption has not been saved yet"))
valid_values = self.option_group.options.values_list(
'option', flat=True)
if value.option not in valid_values:
raise ValidationError(
_("%(enum)s is not a valid choice for %(attr)s") %
{'enum': value, 'attr': self})
def _validate_file(self, value):
if value and not isinstance(value, File):
raise ValidationError(_("Must be a file field"))
_validate_image = _validate_file
@python_2_unicode_compatible
class AbstractProductAttributeValue(models.Model):
"""
The "through" model for the m2m relationship between catalogue.Product and
catalogue.ProductAttribute. This specifies the value of the attribute for
a particular product
For example: number_of_pages = 295
"""
attribute = models.ForeignKey(
'catalogue.ProductAttribute', verbose_name=_("Attribute"))
product = models.ForeignKey(
'catalogue.Product', related_name='attribute_values',
verbose_name=_("Product"))
value_text = models.TextField(_('Text'), blank=True, null=True)
value_integer = models.IntegerField(_('Integer'), blank=True, null=True)
value_boolean = models.NullBooleanField(_('Boolean'), blank=True)
value_float = models.FloatField(_('Float'), blank=True, null=True)
value_richtext = models.TextField(_('Richtext'), blank=True, null=True)
value_date = models.DateField(_('Date'), blank=True, null=True)
value_option = models.ForeignKey(
'catalogue.AttributeOption', blank=True, null=True,
verbose_name=_("Value option"))
value_file = models.FileField(
upload_to=settings.OSCAR_IMAGE_FOLDER, max_length=255,
blank=True, null=True)
value_image = models.ImageField(
upload_to=settings.OSCAR_IMAGE_FOLDER, max_length=255,
blank=True, null=True)
value_entity = GenericForeignKey(
'entity_content_type', 'entity_object_id')
entity_content_type = models.ForeignKey(
ContentType, null=True, blank=True, editable=False)
entity_object_id = models.PositiveIntegerField(
null=True, blank=True, editable=False)
def _get_value(self):
return getattr(self, 'value_%s' % self.attribute.type)
def _set_value(self, new_value):
if self.attribute.is_option and isinstance(new_value, six.string_types):
# Need to look up instance of AttributeOption
new_value = self.attribute.option_group.options.get(
option=new_value)
setattr(self, 'value_%s' % self.attribute.type, new_value)
value = property(_get_value, _set_value)
class Meta:
abstract = True
app_label = 'catalogue'
unique_together = ('attribute', 'product')
verbose_name = _('Product attribute value')
verbose_name_plural = _('Product attribute values')
def __str__(self):
return self.summary()
def summary(self):
"""
Gets a string representation of both the attribute and it's value,
used e.g in product summaries.
"""
return u"%s: %s" % (self.attribute.name, self.value_as_text)
@property
def value_as_text(self):
"""
Returns a string representation of the attribute's value. To customise
e.g. image attribute values, declare a _image_as_text property and
return something appropriate.
"""
property_name = '_%s_as_text' % self.attribute.type
return getattr(self, property_name, self.value)
@property
def _richtext_as_text(self):
return strip_tags(self.value)
@property
def _entity_as_text(self):
"""
Returns the unicode representation of the related model. You likely
want to customise this (and maybe _entity_as_html) if you use entities.
"""
return six.text_type(self.value)
@property
def value_as_html(self):
"""
Returns a HTML representation of the attribute's value. To customise
e.g. image attribute values, declare a _image_as_html property and
return e.g. an <img> tag. Defaults to the _as_text representation.
"""
property_name = '_%s_as_html' % self.attribute.type
return getattr(self, property_name, self.value_as_text)
@property
def _richtext_as_html(self):
return mark_safe(self.value)
@python_2_unicode_compatible
class AbstractAttributeOptionGroup(models.Model):
"""
Defines a group of options that collectively may be used as an
attribute type
For example, Language
"""
name = models.CharField(_('Name'), max_length=128)
def __str__(self):
return self.name
class Meta:
abstract = True
app_label = 'catalogue'
verbose_name = _('Attribute option group')
verbose_name_plural = _('Attribute option groups')
@property
def option_summary(self):
options = [o.option for o in self.options.all()]
return ", ".join(options)
@python_2_unicode_compatible
class AbstractAttributeOption(models.Model):
"""
Provides an option within an option group for an attribute type
Examples: In a Language group, English, Greek, French
"""
group = models.ForeignKey(
'catalogue.AttributeOptionGroup', related_name='options',
verbose_name=_("Group"))
option = models.CharField(_('Option'), max_length=255)
def __str__(self):
return self.option
class Meta:
abstract = True
app_label = 'catalogue'
verbose_name = _('Attribute option')
verbose_name_plural = _('Attribute options')
@python_2_unicode_compatible
class AbstractOption(models.Model):
"""
An option that can be selected for a particular item when the product
is added to the basket.
For example, a list ID for an SMS message send, or a personalised message
to print on a T-shirt.
This is not the same as an 'attribute' as options do not have a fixed value
for a particular item. Instead, option need to be specified by a customer
when they add the item to their basket.
"""
name = models.CharField(_("Name"), max_length=128)
code = AutoSlugField(_("Code"), max_length=128, unique=True,
populate_from='name')
REQUIRED, OPTIONAL = ('Required', 'Optional')
TYPE_CHOICES = (
(REQUIRED, _("Required - a value for this option must be specified")),
(OPTIONAL, _("Optional - a value for this option can be omitted")),
)
type = models.CharField(_("Status"), max_length=128, default=REQUIRED,
choices=TYPE_CHOICES)
class Meta:
abstract = True
app_label = 'catalogue'
verbose_name = _("Option")
verbose_name_plural = _("Options")
def __str__(self):
return self.name
@property
def is_required(self):
return self.type == self.REQUIRED
class MissingProductImage(object):
"""
Mimics a Django file field by having a name property.
sorl-thumbnail requires all it's images to be in MEDIA_ROOT. This class
tries symlinking the default "missing image" image in STATIC_ROOT
into MEDIA_ROOT for convenience, as that is necessary every time an Oscar
project is setup. This avoids the less helpful NotFound IOError that would
be raised when sorl-thumbnail tries to access it.
"""
def __init__(self, name=None):
self.name = name if name else settings.OSCAR_MISSING_IMAGE_URL
media_file_path = os.path.join(settings.MEDIA_ROOT, self.name)
# don't try to symlink if MEDIA_ROOT is not set (e.g. running tests)
if settings.MEDIA_ROOT and not os.path.exists(media_file_path):
self.symlink_missing_image(media_file_path)
def symlink_missing_image(self, media_file_path):
static_file_path = find('oscar/img/%s' % self.name)
if static_file_path is not None:
try:
os.symlink(static_file_path, media_file_path)
except OSError:
raise ImproperlyConfigured((
"Please copy/symlink the "
"'missing image' image at %s into your MEDIA_ROOT at %s. "
"This exception was raised because Oscar was unable to "
"symlink it for you.") % (media_file_path,
settings.MEDIA_ROOT))
else:
logging.info((
"Symlinked the 'missing image' image at %s into your "
"MEDIA_ROOT at %s") % (media_file_path,
settings.MEDIA_ROOT))
@python_2_unicode_compatible
class AbstractProductImage(models.Model):
"""
An image of a product
"""
product = models.ForeignKey(
'catalogue.Product', related_name='images', verbose_name=_("Product"))
original = models.ImageField(
_("Original"), upload_to=settings.OSCAR_IMAGE_FOLDER, max_length=255)
caption = models.CharField(_("Caption"), max_length=200, blank=True)
#: Use display_order to determine which is the "primary" image
display_order = models.PositiveIntegerField(
_("Display order"), default=0,
help_text=_("An image with a display order of zero will be the primary"
" image for a product"))
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
class Meta:
abstract = True
app_label = 'catalogue'
# Any custom models should ensure that this ordering is unchanged, or
# your query count will explode. See AbstractProduct.primary_image.
ordering = ["display_order"]
unique_together = ("product", "display_order")
verbose_name = _('Product image')
verbose_name_plural = _('Product images')
def __str__(self):
return u"Image of '%s'" % self.product
def is_primary(self):
"""
Return bool if image display order is 0
"""
return self.display_order == 0
def delete(self, *args, **kwargs):
"""
Always keep the display_order as consecutive integers. This avoids
issue #855.
"""
super(AbstractProductImage, self).delete(*args, **kwargs)
for idx, image in enumerate(self.product.images.all()):
image.display_order = idx
image.save()
|
|
from contextlib import contextmanager
from .operations import Operations
from .migration import MigrationContext
from . import util
class EnvironmentContext(object):
"""Represent the state made available to an ``env.py`` script.
:class:`.EnvironmentContext` is normally instantiated
by the commands present in the :mod:`alembic.command`
module. From within an ``env.py`` script, the current
:class:`.EnvironmentContext` is available via the
``alembic.context`` datamember.
:class:`.EnvironmentContext` is also a Python context
manager, that is, is intended to be used using the
``with:`` statement. A typical use of :class:`.EnvironmentContext`::
from alembic.config import Config
from alembic.script import ScriptDirectory
config = Config()
config.set_main_option("script_location", "myapp:migrations")
script = ScriptDirectory.from_config(config)
def my_function(rev, context):
'''do something with revision "rev", which
will be the current database revision,
and "context", which is the MigrationContext
that the env.py will create'''
with EnvironmentContext(
config,
script,
fn = my_function,
as_sql = False,
starting_rev = 'base',
destination_rev = 'head',
tag = "sometag"
):
script.run_env()
The above script will invoke the ``env.py`` script
within the migration environment. If and when ``env.py``
calls :meth:`.MigrationContext.run_migrations`, the
``my_function()`` function above will be called
by the :class:`.MigrationContext`, given the context
itself as well as the current revision in the database.
.. note::
For most API usages other than full blown
invocation of migration scripts, the :class:`.MigrationContext`
and :class:`.ScriptDirectory` objects can be created and
used directly. The :class:`.EnvironmentContext` object
is *only* needed when you need to actually invoke the
``env.py`` module present in the migration environment.
"""
_migration_context = None
_default_opts = None
config = None
"""An instance of :class:`.Config` representing the
configuration file contents as well as other variables
set programmatically within it."""
script = None
"""An instance of :class:`.ScriptDirectory` which provides
programmatic access to version files within the ``versions/``
directory.
"""
def __init__(self, config, script, **kw):
"""Construct a new :class:`.EnvironmentContext`.
:param config: a :class:`.Config` instance.
:param script: a :class:`.ScriptDirectory` instance.
:param \**kw: keyword options that will be ultimately
passed along to the :class:`.MigrationContext` when
:meth:`.EnvironmentContext.configure` is called.
"""
self.config = config
self.script = script
self.context_opts = kw
if self._default_opts:
self.context_opts.update(self._default_opts)
def __enter__(self):
"""Establish a context which provides a
:class:`.EnvironmentContext` object to
env.py scripts.
The :class:`.EnvironmentContext` will
be made available as ``from alembic import context``.
"""
from .context import _install_proxy
_install_proxy(self)
return self
def __exit__(self, *arg, **kw):
from . import context, op
context._remove_proxy()
op._remove_proxy()
def is_offline_mode(self):
"""Return True if the current migrations environment
is running in "offline mode".
This is ``True`` or ``False`` depending
on the the ``--sql`` flag passed.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.context_opts.get('as_sql', False)
def is_transactional_ddl(self):
"""Return True if the context is configured to expect a
transactional DDL capable backend.
This defaults to the type of database in use, and
can be overridden by the ``transactional_ddl`` argument
to :meth:`.configure`
This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
return self.get_context().impl.transactional_ddl
def requires_connection(self):
return not self.is_offline_mode()
def get_head_revision(self):
"""Return the hex identifier of the 'head' revision.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.script._as_rev_number("head")
def get_starting_revision_argument(self):
"""Return the 'starting revision' argument,
if the revision was passed using ``start:end``.
This is only meaningful in "offline" mode.
Returns ``None`` if no value is available
or was configured.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
if self._migration_context is not None:
return self.script._as_rev_number(
self.get_context()._start_from_rev)
elif 'starting_rev' in self.context_opts:
return self.script._as_rev_number(
self.context_opts['starting_rev'])
else:
raise util.CommandError(
"No starting revision argument is available.")
def get_revision_argument(self):
"""Get the 'destination' revision argument.
This is typically the argument passed to the
``upgrade`` or ``downgrade`` command.
If it was specified as ``head``, the actual
version number is returned; if specified
as ``base``, ``None`` is returned.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.script._as_rev_number(
self.context_opts['destination_rev'])
def get_tag_argument(self):
"""Return the value passed for the ``--tag`` argument, if any.
The ``--tag`` argument is not used directly by Alembic,
but is available for custom ``env.py`` configurations that
wish to use it; particularly for offline generation scripts
that wish to generate tagged filenames.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso::
:meth:`.EnvironmentContext.get_x_argument` - a newer and more
open ended system of extending ``env.py`` scripts via the command
line.
"""
return self.context_opts.get('tag', None)
def get_x_argument(self, as_dictionary=False):
"""Return the value(s) passed for the ``-x`` argument, if any.
The ``-x`` argument is an open ended flag that allows any user-defined
value or values to be passed on the command line, then available
here for consumption by a custom ``env.py`` script.
The return value is a list, returned directly from the ``argparse``
structure. If ``as_dictionary=True`` is passed, the ``x`` arguments
are parsed using ``key=value`` format into a dictionary that is
then returned.
For example, to support passing a database URL on the command line,
the standard ``env.py`` script can be modified like this::
cmd_line_url = context.get_x_argument(as_dictionary=True).get('dbname')
if cmd_line_url:
engine = create_engine(cmd_line_url)
else:
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
This then takes effect by running the ``alembic`` script as::
alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
This function does not require that the :class:`.MigrationContext`
has been configured.
.. versionadded:: 0.6.0
.. seealso::
:meth:`.EnvironmentContext.get_tag_argument`
:attr:`.Config.cmd_opts`
"""
value = self.config.cmd_opts.x or []
if as_dictionary:
value = dict(
arg.split('=', 1) for arg in value
)
return value
def configure(self,
connection=None,
url=None,
dialect_name=None,
transactional_ddl=None,
output_buffer=None,
starting_rev=None,
tag=None,
template_args=None,
target_metadata=None,
include_symbol=None,
include_object=None,
include_schemas=False,
compare_type=False,
compare_server_default=False,
render_item=None,
upgrade_token="upgrades",
downgrade_token="downgrades",
alembic_module_prefix="op.",
sqlalchemy_module_prefix="sa.",
user_module_prefix=None,
**kw
):
"""Configure a :class:`.MigrationContext` within this
:class:`.EnvironmentContext` which will provide database
connectivity and other configuration to a series of
migration scripts.
Many methods on :class:`.EnvironmentContext` require that
this method has been called in order to function, as they
ultimately need to have database access or at least access
to the dialect in use. Those which do are documented as such.
The important thing needed by :meth:`.configure` is a
means to determine what kind of database dialect is in use.
An actual connection to that database is needed only if
the :class:`.MigrationContext` is to be used in
"online" mode.
If the :meth:`.is_offline_mode` function returns ``True``,
then no connection is needed here. Otherwise, the
``connection`` parameter should be present as an
instance of :class:`sqlalchemy.engine.Connection`.
This function is typically called from the ``env.py``
script within a migration environment. It can be called
multiple times for an invocation. The most recent
:class:`~sqlalchemy.engine.Connection`
for which it was called is the one that will be operated upon
by the next call to :meth:`.run_migrations`.
General parameters:
:param connection: a :class:`~sqlalchemy.engine.Connection`
to use
for SQL execution in "online" mode. When present, is also
used to determine the type of dialect in use.
:param url: a string database url, or a
:class:`sqlalchemy.engine.url.URL` object.
The type of dialect to be used will be derived from this if
``connection`` is not passed.
:param dialect_name: string name of a dialect, such as
"postgresql", "mssql", etc.
The type of dialect to be used will be derived from this if
``connection`` and ``url`` are not passed.
:param transactional_ddl: Force the usage of "transactional"
DDL on or off;
this otherwise defaults to whether or not the dialect in
use supports it.
:param output_buffer: a file-like object that will be used
for textual output
when the ``--sql`` option is used to generate SQL scripts.
Defaults to
``sys.stdout`` if not passed here and also not present on
the :class:`.Config`
object. The value here overrides that of the :class:`.Config`
object.
:param output_encoding: when using ``--sql`` to generate SQL
scripts, apply this encoding to the string output.
.. versionadded:: 0.5.0
:param starting_rev: Override the "starting revision" argument
when using ``--sql`` mode.
:param tag: a string tag for usage by custom ``env.py`` scripts.
Set via the ``--tag`` option, can be overridden here.
:param template_args: dictionary of template arguments which
will be added to the template argument environment when
running the "revision" command. Note that the script environment
is only run within the "revision" command if the --autogenerate
option is used, or if the option "revision_environment=true"
is present in the alembic.ini file.
.. versionadded:: 0.3.3
:param version_table: The name of the Alembic version table.
The default is ``'alembic_version'``.
:param version_table_schema: Optional schema to place version
table within.
.. versionadded:: 0.5.0
Parameters specific to the autogenerate feature, when
``alembic revision`` is run with the ``--autogenerate`` feature:
:param target_metadata: a :class:`sqlalchemy.schema.MetaData`
object that
will be consulted during autogeneration. The tables present
will be compared against
what is locally available on the target
:class:`~sqlalchemy.engine.Connection`
to produce candidate upgrade/downgrade operations.
:param compare_type: Indicates type comparison behavior during
an autogenerate
operation. Defaults to ``False`` which disables type
comparison. Set to
``True`` to turn on default type comparison, which has varied
accuracy depending on backend.
To customize type comparison behavior, a callable may be
specified which
can filter type comparisons during an autogenerate operation.
The format of this callable is::
def my_compare_type(context, inspected_column,
metadata_column, inspected_type, metadata_type):
# return True if the types are different,
# False if not, or None to allow the default implementation
# to compare these types
return None
context.configure(
# ...
compare_type = my_compare_type
)
``inspected_column`` is a :class:`sqlalchemy.schema.Column` as returned by
:meth:`sqlalchemy.engine.reflection.Inspector.reflecttable`, whereas
``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
the local model environment.
A return value of ``None`` indicates to allow default type
comparison to proceed.
.. seealso::
:paramref:`.EnvironmentContext.configure.compare_server_default`
:param compare_server_default: Indicates server default comparison
behavior during
an autogenerate operation. Defaults to ``False`` which disables
server default
comparison. Set to ``True`` to turn on server default comparison,
which has
varied accuracy depending on backend.
To customize server default comparison behavior, a callable may
be specified
which can filter server default comparisons during an
autogenerate operation.
defaults during an autogenerate operation. The format of this
callable is::
def my_compare_server_default(context, inspected_column,
metadata_column, inspected_default, metadata_default,
rendered_metadata_default):
# return True if the defaults are different,
# False if not, or None to allow the default implementation
# to compare these defaults
return None
context.configure(
# ...
compare_server_default = my_compare_server_default
)
``inspected_column`` is a dictionary structure as returned by
:meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
the local model environment.
A return value of ``None`` indicates to allow default server default
comparison
to proceed. Note that some backends such as Postgresql actually
execute
the two defaults on the database side to compare for equivalence.
.. seealso::
:paramref:`.EnvironmentContext.configure.compare_type`
:param include_object: A callable function which is given
the chance to return ``True`` or ``False`` for any object,
indicating if the given object should be considered in the
autogenerate sweep.
The function accepts the following positional arguments:
* ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such as a
:class:`~sqlalchemy.schema.Table` or :class:`~sqlalchemy.schema.Column`
object
* ``name``: the name of the object. This is typically available
via ``object.name``.
* ``type``: a string describing the type of object; currently
``"table"`` or ``"column"``
* ``reflected``: ``True`` if the given object was produced based on
table reflection, ``False`` if it's from a local :class:`.MetaData`
object.
* ``compare_to``: the object being compared against, if available,
else ``None``.
E.g.::
def include_object(object, name, type_, reflected, compare_to):
if (type_ == "column" and
not reflected and
object.info.get("skip_autogenerate", False)):
return False
else:
return True
context.configure(
# ...
include_object = include_object
)
:paramref:`.EnvironmentContext.configure.include_object` can also
be used to filter on specific schemas to include or omit, when
the :paramref:`.EnvironmentContext.configure.include_schemas`
flag is set to ``True``. The :attr:`.Table.schema` attribute
on each :class:`.Table` object reflected will indicate the name of the
schema from which the :class:`.Table` originates.
.. versionadded:: 0.6.0
.. seealso::
:paramref:`.EnvironmentContext.configure.include_schemas`
:param include_symbol: A callable function which, given a table name
and schema name (may be ``None``), returns ``True`` or ``False``, indicating
if the given table should be considered in the autogenerate sweep.
.. deprecated:: 0.6.0 :paramref:`.EnvironmentContext.configure.include_symbol`
is superceded by the more generic
:paramref:`.EnvironmentContext.configure.include_object`
parameter.
E.g.::
def include_symbol(tablename, schema):
return tablename not in ("skip_table_one", "skip_table_two")
context.configure(
# ...
include_symbol = include_symbol
)
.. seealso::
:paramref:`.EnvironmentContext.configure.include_schemas`
:paramref:`.EnvironmentContext.configure.include_object`
:param include_schemas: If True, autogenerate will scan across
all schemas located by the SQLAlchemy
:meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names`
method, and include all differences in tables found across all
those schemas. When using this option, you may want to also
use the :paramref:`.EnvironmentContext.configure.include_object`
option to specify a callable which
can filter the tables/schemas that get included.
.. versionadded :: 0.4.0
.. seealso::
:paramref:`.EnvironmentContext.configure.include_object`
:param render_item: Callable that can be used to override how
any schema item, i.e. column, constraint, type,
etc., is rendered for autogenerate. The callable receives a
string describing the type of object, the object, and
the autogen context. If it returns False, the
default rendering method will be used. If it returns None,
the item will not be rendered in the context of a Table
construct, that is, can be used to skip columns or constraints
within op.create_table()::
def my_render_column(type_, col, autogen_context):
if type_ == "column" and isinstance(col, MySpecialCol):
return repr(col)
else:
return False
context.configure(
# ...
render_item = my_render_column
)
Available values for the type string include: ``"column"``,
``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``,
``"type"``, ``"server_default"``.
.. versionadded:: 0.5.0
.. seealso::
:ref:`autogen_render_types`
:param upgrade_token: When autogenerate completes, the text of the
candidate upgrade operations will be present in this template
variable when ``script.py.mako`` is rendered. Defaults to
``upgrades``.
:param downgrade_token: When autogenerate completes, the text of the
candidate downgrade operations will be present in this
template variable when ``script.py.mako`` is rendered. Defaults to
``downgrades``.
:param alembic_module_prefix: When autogenerate refers to Alembic
:mod:`alembic.operations` constructs, this prefix will be used
(i.e. ``op.create_table``) Defaults to "``op.``".
Can be ``None`` to indicate no prefix.
:param sqlalchemy_module_prefix: When autogenerate refers to
SQLAlchemy
:class:`~sqlalchemy.schema.Column` or type classes, this prefix
will be used
(i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``".
Can be ``None`` to indicate no prefix.
Note that when dialect-specific types are rendered, autogenerate
will render them using the dialect module name, i.e. ``mssql.BIT()``,
``postgresql.UUID()``.
:param user_module_prefix: When autogenerate refers to a SQLAlchemy
type (e.g. :class:`.TypeEngine`) where the module name is not
under the ``sqlalchemy`` namespace, this prefix will be used
within autogenerate, if non-``None``; if left at its default of
``None``, the
:paramref:`.EnvironmentContext.configure.sqlalchemy_module_prefix`
is used instead.
.. versionadded:: 0.6.3 added
:paramref:`.EnvironmentContext.configure.user_module_prefix`
.. seealso::
:ref:`autogen_module_prefix`
Parameters specific to individual backends:
:param mssql_batch_separator: The "batch separator" which will
be placed between each statement when generating offline SQL Server
migrations. Defaults to ``GO``. Note this is in addition to the
customary semicolon ``;`` at the end of each statement; SQL Server
considers the "batch separator" to denote the end of an
individual statement execution, and cannot group certain
dependent operations in one step.
:param oracle_batch_separator: The "batch separator" which will
be placed between each statement when generating offline
Oracle migrations. Defaults to ``/``. Oracle doesn't add a
semicolon between statements like most other backends.
"""
opts = self.context_opts
if transactional_ddl is not None:
opts["transactional_ddl"] = transactional_ddl
if output_buffer is not None:
opts["output_buffer"] = output_buffer
elif self.config.output_buffer is not None:
opts["output_buffer"] = self.config.output_buffer
if starting_rev:
opts['starting_rev'] = starting_rev
if tag:
opts['tag'] = tag
if template_args and 'template_args' in opts:
opts['template_args'].update(template_args)
opts['target_metadata'] = target_metadata
opts['include_symbol'] = include_symbol
opts['include_object'] = include_object
opts['include_schemas'] = include_schemas
opts['upgrade_token'] = upgrade_token
opts['downgrade_token'] = downgrade_token
opts['sqlalchemy_module_prefix'] = sqlalchemy_module_prefix
opts['alembic_module_prefix'] = alembic_module_prefix
opts['user_module_prefix'] = user_module_prefix
if render_item is not None:
opts['render_item'] = render_item
if compare_type is not None:
opts['compare_type'] = compare_type
if compare_server_default is not None:
opts['compare_server_default'] = compare_server_default
opts['script'] = self.script
opts.update(kw)
self._migration_context = MigrationContext.configure(
connection=connection,
url=url,
dialect_name=dialect_name,
opts=opts
)
def run_migrations(self, **kw):
"""Run migrations as determined by the current command line
configuration
as well as versioning information present (or not) in the current
database connection (if one is present).
The function accepts optional ``**kw`` arguments. If these are
passed, they are sent directly to the ``upgrade()`` and
``downgrade()``
functions within each target revision file. By modifying the
``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
functions accept arguments, parameters can be passed here so that
contextual information, usually information to identify a particular
database in use, can be passed from a custom ``env.py`` script
to the migration functions.
This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
with Operations.context(self._migration_context):
self.get_context().run_migrations(**kw)
def execute(self, sql, execution_options=None):
"""Execute the given SQL using the current change context.
The behavior of :meth:`.execute` is the same
as that of :meth:`.Operations.execute`. Please see that
function's documentation for full detail including
caveats and limitations.
This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
self.get_context().execute(sql,
execution_options=execution_options)
def static_output(self, text):
"""Emit text directly to the "offline" SQL stream.
Typically this is for emitting comments that
start with --. The statement is not treated
as a SQL execution, no ; or batch separator
is added, etc.
"""
self.get_context().impl.static_output(text)
def begin_transaction(self):
"""Return a context manager that will
enclose an operation within a "transaction",
as defined by the environment's offline
and transactional DDL settings.
e.g.::
with context.begin_transaction():
context.run_migrations()
:meth:`.begin_transaction` is intended to
"do the right thing" regardless of
calling context:
* If :meth:`.is_transactional_ddl` is ``False``,
returns a "do nothing" context manager
which otherwise produces no transactional
state or directives.
* If :meth:`.is_offline_mode` is ``True``,
returns a context manager that will
invoke the :meth:`.DefaultImpl.emit_begin`
and :meth:`.DefaultImpl.emit_commit`
methods, which will produce the string
directives ``BEGIN`` and ``COMMIT`` on
the output stream, as rendered by the
target backend (e.g. SQL Server would
emit ``BEGIN TRANSACTION``).
* Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin`
on the current online connection, which
returns a :class:`sqlalchemy.engine.Transaction`
object. This object demarcates a real
transaction and is itself a context manager,
which will roll back if an exception
is raised.
Note that a custom ``env.py`` script which
has more specific transactional needs can of course
manipulate the :class:`~sqlalchemy.engine.Connection`
directly to produce transactional state in "online"
mode.
"""
if not self.is_transactional_ddl():
@contextmanager
def do_nothing():
yield
return do_nothing()
elif self.is_offline_mode():
@contextmanager
def begin_commit():
self.get_context().impl.emit_begin()
yield
self.get_context().impl.emit_commit()
return begin_commit()
else:
return self.get_bind().begin()
def get_context(self):
"""Return the current :class:`.MigrationContext` object.
If :meth:`.EnvironmentContext.configure` has not been
called yet, raises an exception.
"""
if self._migration_context is None:
raise Exception("No context has been configured yet.")
return self._migration_context
def get_bind(self):
"""Return the current 'bind'.
In "online" mode, this is the
:class:`sqlalchemy.engine.Connection` currently being used
to emit SQL to the database.
This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
return self.get_context().bind
def get_impl(self):
return self.get_context().impl
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._dns_forwarding_rulesets_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_resource_group_request, build_list_by_virtual_network_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DnsForwardingRulesetsOperations:
"""DnsForwardingRulesetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~dns_resolver_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
dns_forwarding_ruleset_name: str,
parameters: "_models.DnsForwardingRuleset",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> Optional["_models.DnsForwardingRuleset"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.DnsForwardingRuleset"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DnsForwardingRuleset')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
dns_forwarding_ruleset_name=dns_forwarding_ruleset_name,
content_type=content_type,
json=_json,
if_match=if_match,
if_none_match=if_none_match,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DnsForwardingRuleset', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DnsForwardingRuleset', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsForwardingRulesets/{dnsForwardingRulesetName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
dns_forwarding_ruleset_name: str,
parameters: "_models.DnsForwardingRuleset",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.DnsForwardingRuleset"]:
"""Creates or updates a DNS forwarding ruleset.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param dns_forwarding_ruleset_name: The name of the DNS forwarding ruleset.
:type dns_forwarding_ruleset_name: str
:param parameters: Parameters supplied to the CreateOrUpdate operation.
:type parameters: ~dns_resolver_management_client.models.DnsForwardingRuleset
:param if_match: ETag of the resource. Omit this value to always overwrite the current
resource. Specify the last-seen ETag value to prevent accidentally overwriting any concurrent
changes.
:type if_match: str
:param if_none_match: Set to '*' to allow a new resource to be created, but to prevent updating
an existing resource. Other values will be ignored.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DnsForwardingRuleset or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~dns_resolver_management_client.models.DnsForwardingRuleset]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DnsForwardingRuleset"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
dns_forwarding_ruleset_name=dns_forwarding_ruleset_name,
parameters=parameters,
if_match=if_match,
if_none_match=if_none_match,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DnsForwardingRuleset', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsForwardingRulesets/{dnsForwardingRulesetName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
dns_forwarding_ruleset_name: str,
parameters: "_models.DnsForwardingRulesetPatch",
if_match: Optional[str] = None,
**kwargs: Any
) -> Optional["_models.DnsForwardingRuleset"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.DnsForwardingRuleset"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DnsForwardingRulesetPatch')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
dns_forwarding_ruleset_name=dns_forwarding_ruleset_name,
content_type=content_type,
json=_json,
if_match=if_match,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DnsForwardingRuleset', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsForwardingRulesets/{dnsForwardingRulesetName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
dns_forwarding_ruleset_name: str,
parameters: "_models.DnsForwardingRulesetPatch",
if_match: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.DnsForwardingRuleset"]:
"""Updates a DNS forwarding ruleset.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param dns_forwarding_ruleset_name: The name of the DNS forwarding ruleset.
:type dns_forwarding_ruleset_name: str
:param parameters: Parameters supplied to the Update operation.
:type parameters: ~dns_resolver_management_client.models.DnsForwardingRulesetPatch
:param if_match: ETag of the resource. Omit this value to always overwrite the current
resource. Specify the last-seen ETag value to prevent accidentally overwriting any concurrent
changes.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DnsForwardingRuleset or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~dns_resolver_management_client.models.DnsForwardingRuleset]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DnsForwardingRuleset"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
dns_forwarding_ruleset_name=dns_forwarding_ruleset_name,
parameters=parameters,
if_match=if_match,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DnsForwardingRuleset', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsForwardingRulesets/{dnsForwardingRulesetName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
dns_forwarding_ruleset_name: str,
if_match: Optional[str] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
dns_forwarding_ruleset_name=dns_forwarding_ruleset_name,
if_match=if_match,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsForwardingRulesets/{dnsForwardingRulesetName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
dns_forwarding_ruleset_name: str,
if_match: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a DNS forwarding ruleset. WARNING: This operation cannot be undone. All forwarding
rules within the ruleset will be deleted.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param dns_forwarding_ruleset_name: The name of the DNS forwarding ruleset.
:type dns_forwarding_ruleset_name: str
:param if_match: ETag of the resource. Omit this value to always overwrite the current
resource. Specify the last-seen ETag value to prevent accidentally overwriting any concurrent
changes.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
dns_forwarding_ruleset_name=dns_forwarding_ruleset_name,
if_match=if_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsForwardingRulesets/{dnsForwardingRulesetName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
dns_forwarding_ruleset_name: str,
**kwargs: Any
) -> "_models.DnsForwardingRuleset":
"""Gets a DNS forwarding ruleset properties.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param dns_forwarding_ruleset_name: The name of the DNS forwarding ruleset.
:type dns_forwarding_ruleset_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DnsForwardingRuleset, or the result of cls(response)
:rtype: ~dns_resolver_management_client.models.DnsForwardingRuleset
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DnsForwardingRuleset"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
dns_forwarding_ruleset_name=dns_forwarding_ruleset_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DnsForwardingRuleset', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsForwardingRulesets/{dnsForwardingRulesetName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DnsForwardingRulesetListResult"]:
"""Lists DNS forwarding rulesets within a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param top: The maximum number of results to return. If not specified, returns up to 100
results.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DnsForwardingRulesetListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~dns_resolver_management_client.models.DnsForwardingRulesetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DnsForwardingRulesetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
top=top,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DnsForwardingRulesetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsForwardingRulesets'} # type: ignore
@distributed_trace
def list(
self,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DnsForwardingRulesetListResult"]:
"""Lists DNS forwarding rulesets in all resource groups of a subscription.
:param top: The maximum number of results to return. If not specified, returns up to 100
results.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DnsForwardingRulesetListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~dns_resolver_management_client.models.DnsForwardingRulesetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DnsForwardingRulesetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DnsForwardingRulesetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/dnsForwardingRulesets'} # type: ignore
@distributed_trace
def list_by_virtual_network(
self,
resource_group_name: str,
virtual_network_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkDnsForwardingRulesetListResult"]:
"""Lists DNS forwarding ruleset resource IDs attached to a virtual network.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param top: The maximum number of results to return. If not specified, returns up to 100
results.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkDnsForwardingRulesetListResult or
the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~dns_resolver_management_client.models.VirtualNetworkDnsForwardingRulesetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkDnsForwardingRulesetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_virtual_network_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
top=top,
template_url=self.list_by_virtual_network.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_virtual_network_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualNetworkDnsForwardingRulesetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_virtual_network.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/listDnsForwardingRulesets'} # type: ignore
|
|
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
from neutron.api.v2 import attributes as attrs
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as qdbapi
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as st_db
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import agent_scheduler
from neutron.services import provider_configuration as pconf
from neutron.services import service_base
LOG = logging.getLogger(__name__)
class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
agent_scheduler.LbaasAgentSchedulerDbMixin):
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas",
"lbaas_agent_scheduler",
"service-type"]
# lbaas agent notifiers to handle agent update operations;
# can be updated by plugin drivers while loading;
# will be extracted by neutron manager when loading service plugins;
agent_notifiers = {}
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
qdbapi.register_models()
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
self._load_drivers()
def _load_drivers(self):
"""Loads plugin-drivers specified in configuration."""
self.drivers, self.default_provider = service_base.load_drivers(
constants.LOADBALANCER, self)
# we're at the point when extensions are not loaded yet
# so prevent policy from being loaded
ctx = context.get_admin_context(load_admin_roles=False)
# stop service in case provider was removed, but resources were not
self._check_orphan_pool_associations(ctx, self.drivers.keys())
def _check_orphan_pool_associations(self, context, provider_names):
"""Checks remaining associations between pools and providers.
If admin has not undeployed resources with provider that was deleted
from configuration, neutron service is stopped. Admin must delete
resources prior to removing providers from configuration.
"""
pools = self.get_pools(context)
lost_providers = set([pool['provider'] for pool in pools
if pool['provider'] not in provider_names])
# resources are left without provider - stop the service
if lost_providers:
msg = _("Delete associated loadbalancer pools before "
"removing providers %s") % list(lost_providers)
LOG.exception(msg)
raise SystemExit(msg)
def _get_driver_for_provider(self, provider):
if provider in self.drivers:
return self.drivers[provider]
# raise if not associated (should never be reached)
raise n_exc.Invalid(_("Error retrieving driver for provider %s") %
provider)
def _get_driver_for_pool(self, context, pool_id):
pool = self.get_pool(context, pool_id)
try:
return self.drivers[pool['provider']]
except KeyError:
raise n_exc.Invalid(_("Error retrieving provider for pool %s") %
pool_id)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Neutron LoadBalancer Service Plugin"
def create_vip(self, context, vip):
v = super(LoadBalancerPlugin, self).create_vip(context, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.create_vip(context, v)
return v
def update_vip(self, context, id, vip):
if 'status' not in vip['vip']:
vip['vip']['status'] = constants.PENDING_UPDATE
old_vip = self.get_vip(context, id)
v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.update_vip(context, old_vip, v)
return v
def _delete_db_vip(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_vip(context, id)
def delete_vip(self, context, id):
self.update_status(context, ldb.Vip,
id, constants.PENDING_DELETE)
v = self.get_vip(context, id)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.delete_vip(context, v)
def _get_provider_name(self, context, pool):
if ('provider' in pool and
pool['provider'] != attrs.ATTR_NOT_SPECIFIED):
provider_name = pconf.normalize_provider_name(pool['provider'])
self.validate_provider(provider_name)
return provider_name
else:
if not self.default_provider:
raise pconf.DefaultServiceProviderNotFound(
service_type=constants.LOADBALANCER)
return self.default_provider
def create_pool(self, context, pool):
provider_name = self._get_provider_name(context, pool['pool'])
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
self.service_type_manager.add_resource_association(
context,
constants.LOADBALANCER,
provider_name, p['id'])
#need to add provider name to pool dict,
#because provider was not known to db plugin at pool creation
p['provider'] = provider_name
driver = self.drivers[provider_name]
driver.create_pool(context, p)
return p
def update_pool(self, context, id, pool):
if 'status' not in pool['pool']:
pool['pool']['status'] = constants.PENDING_UPDATE
old_pool = self.get_pool(context, id)
p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
driver = self._get_driver_for_provider(p['provider'])
driver.update_pool(context, old_pool, p)
return p
def _delete_db_pool(self, context, id):
# proxy the call until plugin inherits from DBPlugin
# rely on uuid uniqueness:
try:
with context.session.begin(subtransactions=True):
self.service_type_manager.del_resource_associations(
context, [id])
super(LoadBalancerPlugin, self).delete_pool(context, id)
except Exception:
# that should not happen
# if it's still a case - something goes wrong
# log the error and mark the pool as ERROR
LOG.error(_('Failed to delete pool %s, putting it in ERROR state'),
id)
with excutils.save_and_reraise_exception():
self.update_status(context, ldb.Pool,
id, constants.ERROR)
def delete_pool(self, context, id):
# check for delete conditions and update the status
# within a transaction to avoid a race
with context.session.begin(subtransactions=True):
self.update_status(context, ldb.Pool,
id, constants.PENDING_DELETE)
self._ensure_pool_delete_conditions(context, id)
p = self.get_pool(context, id)
driver = self._get_driver_for_provider(p['provider'])
driver.delete_pool(context, p)
def create_member(self, context, member):
m = super(LoadBalancerPlugin, self).create_member(context, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.create_member(context, m)
return m
def update_member(self, context, id, member):
if 'status' not in member['member']:
member['member']['status'] = constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(LoadBalancerPlugin, self).update_member(context, id, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.update_member(context, old_member, m)
return m
def _delete_db_member(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_member(context, id)
def delete_member(self, context, id):
self.update_status(context, ldb.Member,
id, constants.PENDING_DELETE)
m = self.get_member(context, id)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.delete_member(context, m)
def create_health_monitor(self, context, health_monitor):
hm = super(LoadBalancerPlugin, self).create_health_monitor(
context,
health_monitor
)
return hm
def update_health_monitor(self, context, id, health_monitor):
old_hm = self.get_health_monitor(context, id)
hm = super(LoadBalancerPlugin, self).update_health_monitor(
context,
id,
health_monitor
)
with context.session.begin(subtransactions=True):
qry = context.session.query(
ldb.PoolMonitorAssociation
).filter_by(monitor_id=hm['id']).join(ldb.Pool)
for assoc in qry:
driver = self._get_driver_for_pool(context, assoc['pool_id'])
driver.update_pool_health_monitor(context, old_hm,
hm, assoc['pool_id'])
return hm
def _delete_db_pool_health_monitor(self, context, hm_id, pool_id):
super(LoadBalancerPlugin, self).delete_pool_health_monitor(context,
hm_id,
pool_id)
def _delete_db_health_monitor(self, context, id):
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def delete_health_monitor(self, context, id):
with context.session.begin(subtransactions=True):
hm = self.get_health_monitor(context, id)
qry = context.session.query(
ldb.PoolMonitorAssociation
).filter_by(monitor_id=id).join(ldb.Pool)
for assoc in qry:
driver = self._get_driver_for_pool(context, assoc['pool_id'])
driver.delete_pool_health_monitor(context,
hm,
assoc['pool_id'])
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
context,
health_monitor,
pool_id
)
monitor_id = health_monitor['health_monitor']['id']
hm = self.get_health_monitor(context, monitor_id)
driver = self._get_driver_for_pool(context, pool_id)
driver.create_pool_health_monitor(context, hm, pool_id)
return retval
def delete_pool_health_monitor(self, context, id, pool_id):
self.update_pool_health_monitor(context, id, pool_id,
constants.PENDING_DELETE)
hm = self.get_health_monitor(context, id)
driver = self._get_driver_for_pool(context, pool_id)
driver.delete_pool_health_monitor(context, hm, pool_id)
def stats(self, context, pool_id):
driver = self._get_driver_for_pool(context, pool_id)
stats_data = driver.stats(context, pool_id)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
if stats_data:
super(LoadBalancerPlugin, self).update_pool_stats(
context,
pool_id,
stats_data
)
return super(LoadBalancerPlugin, self).stats(context,
pool_id)
def populate_vip_graph(self, context, vip):
"""Populate the vip with: pool, members, healthmonitors."""
pool = self.get_pool(context, vip['pool_id'])
vip['pool'] = pool
vip['members'] = [self.get_member(context, member_id)
for member_id in pool['members']]
vip['health_monitors'] = [self.get_health_monitor(context, hm_id)
for hm_id in pool['health_monitors']]
return vip
def validate_provider(self, provider):
if provider not in self.drivers:
raise pconf.ServiceProviderNotFound(
provider=provider, service_type=constants.LOADBALANCER)
|
|
from migen import *
from migen.genlib.coding import Decoder as OneHotDecoder
from litex.soc.interconnect import stream
from litex.soc.interconnect.csr import AutoCSR, CSRStorage, CSRStatus, CSR
from litex.soc.integration.doc import AutoDoc, ModuleDoc
from litedram.frontend.dma import LiteDRAMDMAReader, LiteDRAMDMAWriter
class PatternMemory(Module):
"""Memory for storing access pattern
It consists of two separate memories: `data` and `addr`, each of `mem_depth`,
but with different word widths. BIST modules read corresponding pairs (`data`,
`addr`) during operation. BISTWriter writes `data` to the given `addr`,
BISTReader reads `addr` and compares the data read to `data` from the pattern.
"""
def __init__(self, data_width, mem_depth, addr_width=32, pattern_init=None):
addr_init, data_init = None, None
if pattern_init is not None:
addr_init, data_init = zip(*pattern_init)
self.data = Memory(data_width, mem_depth, init=data_init)
self.addr = Memory(addr_width, mem_depth, init=addr_init)
self.specials += self.data, self.addr
class AddressSelector(Module):
# Selects addresses given two mask as done in:
# https://github.com/google/hammer-kit/blob/40f3988cac39e20ed0294d20bc886e17376ef47b/hammer.c#L270
def __init__(self, nbits):
self.address = Signal(nbits) # part of address used for selection
self.selected = Signal() # 1 if selection_mask matches
self.divisor_mask = Signal(nbits) # modulo division using provided mask
self.selection_mask = Signal(2**nbits) # used to select addresses after division
decoder = OneHotDecoder(len(self.selection_mask))
self.submodules += decoder
assert len(decoder.i) == len(self.divisor_mask)
assert len(decoder.o) == len(self.selection_mask)
self.comb += [
decoder.i.eq(self.address & self.divisor_mask),
self.selected.eq((self.selection_mask & decoder.o) != 0),
]
class RowDataInverter(Module, AutoCSR):
"""Inverts data for given range of row bits
Specify small range, e.g. rowbits=5, keep in mind that
AddressSelector has to construct one-hot encoded signal
with width of 2**rowbits with 1 bit per row, so it quickly
becomes huge.
"""
def __init__(self, addr, data_in, data_out, rowbits, row_shift):
nrows = 2**rowbits
assert rowbits <= 6, \
'High rowbits={} leads to {}-bit selection_mask, this is most likely not desired.'.format(rowbits, nrows) \
+ ' See:\n{}'.format(self.__doc__)
self.submodules.selector = selector = AddressSelector(nbits=rowbits)
self.comb += [
selector.address.eq(addr[row_shift:row_shift + rowbits]),
If(selector.selected,
data_out.eq(~data_in)
).Else(
data_out.eq(data_in)
)
]
def add_csrs(self):
self._divisor_mask = CSRStorage(len(self.selector.divisor_mask),
description="Divisor mask for selecting rows for which pattern data gets inverted")
self._selection_mask = CSRStorage(len(self.selector.selection_mask),
description="Selection mask for selecting rows for which pattern data gets inverted")
self.comb += [
self.selector.divisor_mask.eq(self._divisor_mask.storage),
self.selector.selection_mask.eq(self._selection_mask.storage),
]
class BISTModule(Module):
"""
Provides RAM to store access pattern: `mem_addr` and `mem_data`.
The pattern address space can be limited using the `data_mask`.
For example, having `mem_adr` filled with `[ 0x04, 0x02, 0x03, ... ]`
and `mem_data` filled with `[ 0xff, 0xaa, 0x55, ... ]` and setting
`data_mask = 0b01`, the pattern [(address, data), ...] written will be:
`[(0x04, 0xff), (0x02, 0xaa), (0x04, 0xff), ...]` (wraps due to masking).
DRAM memory range that is being accessed can be configured using `mem_mask`.
To use this module, make sure that `ready` is 1, then write the desired
number of transfers to `count`. Writing to the `start` CSR will initialize
the operation. When the operation is ongoing `ready` will be 0.
"""
def __init__(self, pattern_mem):
self.start = Signal()
self.ready = Signal()
self.count = Signal(32)
self.done = Signal(32)
self.mem_mask = Signal(32)
self.data_mask = Signal(32)
self.data_port = pattern_mem.data.get_port()
self.addr_port = pattern_mem.addr.get_port()
self.specials += self.data_port, self.addr_port
def add_csrs(self):
self._start = CSR()
self._start.description = 'Write to the register starts the transfer (if ready=1)'
self._ready = CSRStatus(description='Indicates that the transfer is not ongoing')
self._count = CSRStorage(size=len(self.count), description='Desired number of DMA transfers')
self._done = CSRStatus(size=len(self.done), description='Number of completed DMA transfers')
self._mem_mask = CSRStorage(
size = len(self.mem_mask),
description = 'DRAM address mask for DMA transfers'
)
self._data_mask = CSRStorage(
size = len(self.mem_mask),
description = 'Pattern memory address mask'
)
self.comb += [
self.start.eq(self._start.re),
self._ready.status.eq(self.ready),
self.count.eq(self._count.storage),
self._done.status.eq(self.done),
self.mem_mask.eq(self._mem_mask.storage),
self.data_mask.eq(self._data_mask.storage),
]
class Writer(BISTModule, AutoCSR, AutoDoc):
def __init__(self, dram_port, pattern_mem, *, rowbits, row_shift):
super().__init__(pattern_mem)
self.doc = ModuleDoc("""
DMA DRAM writer.
Allows to fill DRAM with a predefined pattern using DMA.
Pattern
-------
{common}
""".format(common=BISTModule.__doc__))
dma = LiteDRAMDMAWriter(dram_port, fifo_depth=4)
self.submodules += dma
cmd_counter = Signal(32)
self.comb += [
self.done.eq(cmd_counter),
# pattern
self.data_port.adr.eq(cmd_counter & self.data_mask),
self.addr_port.adr.eq(cmd_counter & self.data_mask),
# DMA
dma.sink.address.eq(self.addr_port.dat_r + (cmd_counter & self.mem_mask)),
]
# DMA data may be inverted using AddressSelector
self.submodules.inverter = RowDataInverter(
addr = dma.sink.address,
data_in = self.data_port.dat_r,
data_out = dma.sink.data,
rowbits = rowbits,
row_shift = row_shift,
)
self.submodules.fsm = fsm = FSM()
fsm.act("READY",
self.ready.eq(1),
If(self.start,
NextValue(cmd_counter, 0),
NextState("WAIT"),
)
)
fsm.act("WAIT", # TODO: we could pipeline the access
If(cmd_counter >= self.count,
NextState("READY")
).Else(
NextState("RUN")
)
)
fsm.act("RUN",
dma.sink.valid.eq(1),
If(dma.sink.ready,
NextValue(cmd_counter, cmd_counter + 1),
NextState("WAIT")
)
)
def add_csrs(self):
super().add_csrs()
self.inverter.add_csrs()
class Reader(BISTModule, AutoCSR, AutoDoc):
def __init__(self, dram_port, pattern_mem, *, rowbits, row_shift):
super().__init__(pattern_mem)
self.doc = ModuleDoc("""
DMA DRAM reader.
Allows to check DRAM contents against a predefined pattern using DMA.
Pattern
-------
{common}
Reading errors
--------------
This module allows to check the locations of errors in the memory.
It scans the configured memory area and compares the values read to
the predefined pattern. If `skip_fifo` is 0, this module will stop
after each error encountered, so that it can be examined. Wait until
the `error_ready` CSR is 1. Then use the CSRs `error_offset`,
`error_data` and `error_expected` to examine the errors in the current
transfer. To continue reading, write 1 to `error_continue` CSR.
Setting `skip_fifo` to 1 will disable this behaviour entirely.
The final nubmer of errors can be read from `error_count`.
NOTE: This value represents the number of erroneous *DMA transfers*.
The current progress can be read from the `done` CSR.
""".format(common=BISTModule.__doc__))
error_desc = [
('offset', 32),
('data', dram_port.data_width),
('expected', dram_port.data_width),
]
self.error_count = Signal(32)
self.skip_fifo = Signal()
self.error = stream.Endpoint(error_desc)
dma = LiteDRAMDMAReader(dram_port, fifo_depth=4)
self.submodules += dma
# pass addresses from address FSM (command producer) to pattern FSM (data consumer)
address_fifo = stream.SyncFIFO([('address', len(dma.sink.address))], depth=4)
self.submodules += address_fifo
# ----------------- Address FSM -----------------
counter_addr = Signal(32)
self.comb += [
self.addr_port.adr.eq(counter_addr & self.data_mask),
dma.sink.address.eq(self.addr_port.dat_r + (counter_addr & self.mem_mask)),
]
# Using temporary state 'WAIT' to obtain address offset from memory
self.submodules.fsm_addr = fsm_addr = FSM()
fsm_addr.act("READY",
If(self.start,
NextValue(counter_addr, 0),
NextState("WAIT"),
)
)
fsm_addr.act("WAIT",
# FIXME: should be possible to write the address in WR_ADDR
address_fifo.sink.valid.eq(counter_addr != 0),
If(address_fifo.sink.ready | (counter_addr == 0),
If(counter_addr >= self.count,
NextState("READY")
).Else(
NextState("WR_ADDR")
)
)
)
fsm_addr.act("WR_ADDR",
dma.sink.valid.eq(1),
If(dma.sink.ready,
# send the address in WAIT
NextValue(address_fifo.sink.address, dma.sink.address),
NextValue(counter_addr, counter_addr + 1),
NextState("WAIT")
)
)
# ------------- Pattern FSM ----------------
counter_gen = Signal(32)
# Unmatched memory offsets
error_fifo = stream.SyncFIFO(error_desc, depth=2, buffered=False)
self.submodules += error_fifo
# DMA data may be inverted using AddressSelector
data_expected = Signal.like(dma.source.data)
self.submodules.inverter = RowDataInverter(
addr = address_fifo.source.address,
data_in = self.data_port.dat_r,
data_out = data_expected,
rowbits = rowbits,
row_shift = row_shift,
)
self.comb += [
self.data_port.adr.eq(counter_gen & self.data_mask),
self.error.offset.eq(error_fifo.source.offset),
self.error.data.eq(error_fifo.source.data),
self.error.expected.eq(error_fifo.source.expected),
self.error.valid.eq(error_fifo.source.valid),
error_fifo.source.ready.eq(self.error.ready | self.skip_fifo),
self.done.eq(counter_gen),
]
self.submodules.fsm_pattern = fsm_pattern = FSM()
fsm_pattern.act("READY",
self.ready.eq(1),
If(self.start,
NextValue(counter_gen, 0),
NextValue(self.error_count, 0),
NextState("WAIT"),
)
)
fsm_pattern.act("WAIT", # TODO: we could pipeline the access
If(counter_gen >= self.count,
NextState("READY")
).Else(
NextState("RD_DATA")
)
)
fsm_pattern.act("RD_DATA",
If(dma.source.valid & address_fifo.source.valid,
# we must now change FSM state in single cycle
dma.source.ready.eq(1),
address_fifo.source.ready.eq(1),
# count the command
NextValue(counter_gen, counter_gen + 1),
# next state depends on if there was an error
If(dma.source.data != data_expected,
NextValue(self.error_count, self.error_count + 1),
NextValue(error_fifo.sink.offset, address_fifo.source.address),
NextValue(error_fifo.sink.data, dma.source.data),
NextValue(error_fifo.sink.expected, data_expected),
If(self.skip_fifo,
NextState("WAIT")
).Else(
NextState("WR_ERR")
)
).Else(
NextState("WAIT")
)
)
)
fsm_pattern.act("WR_ERR",
error_fifo.sink.valid.eq(1),
If(error_fifo.sink.ready | self.skip_fifo,
NextState("WAIT")
)
)
def add_csrs(self):
super().add_csrs()
self.inverter.add_csrs()
self._error_count = CSRStatus(size=len(self.error_count), description='Number of errors detected')
self._skip_fifo = CSRStorage(description='Skip waiting for user to read the errors FIFO')
self._error_offset = CSRStatus(size=len(self.mem_mask), description='Current offset of the error')
self._error_data = CSRStatus(size=len(self.data_port.dat_r), description='Erroneous value read from DRAM memory')
self._error_expected = CSRStatus(size=len(self.data_port.dat_r), description='Value expected to be read from DRAM memory')
self._error_ready = CSRStatus(description='Error detected and ready to read')
self._error_continue = CSR()
self._error_continue.description = 'Continue reading until the next error'
self.comb += [
self._error_count.status.eq(self.error_count),
self.skip_fifo.eq(self._skip_fifo.storage),
self._error_offset.status.eq(self.error.offset),
self._error_data.status.eq(self.error.data),
self._error_expected.status.eq(self.error.expected),
self.error.ready.eq(self._error_continue.re),
self._error_ready.status.eq(self.error.valid),
]
|
|
""" Module containing the definition of the generic interface of Kernel objects
"""
import numpy as np
from .utils import center_gram, center_rows, safe_len, mpmap
DTYPE = np.float64
class AbstractClassError(Exception):
pass
class GenericKernel(object):
""" Abstract class for kernel operations between vectors and matrices
Classes implementing this interface can define the following methods:
v2v(self, vec1, vec2): (*must* be implemented by subclasses)
kernel evaluation between two vectors
v2m(self, vec, mat):
array of all kernel evaluations between a vector and a matrix
m2m(self, mat1, mat2):
pairwise kernel evaluations between all vectors in mat1 and all
vectors in mat2
gram(self, mat):
kernel gram matrix between all vectors in mat
and set the following attributes:
use_rbf: boolean, indicate whether it is a (Gaussian) RBF kernel
gamma: float, optional: bandwidth parameters for RBF kernels
Parameters
----------
name: string,
the name of the kernel,
libsvm_fmt: boolean, optional, default: False,
whether to add an extra first column of numerical sample
index to comply with libsvm's format
center: boolean, optional, default: False,
whether to center in feature space the kernel
gamma: float, optional, default: None,
the bandwidth parameter for RBF kernels
constant_diag: boolean, optional, default: False,
fix diagonal of Gram to 1
num_threads: int, optional, default: 1,,
number of threads to use for m2m and gram methods
(use 0 for 1 thread per core)
"""
# API methods
def __init__(self, name, libsvm_fmt=False, center=False, gamma=None,
use_rbf=False, constant_diag=False, num_threads=1):
self.name = name
self.libsvm_fmt = libsvm_fmt
self.center = center
self.gamma = gamma
self.use_rbf = use_rbf
self.constant_diag = constant_diag
self.num_threads = num_threads
self.cms_ = None # array of column means over the Gram matrix
self.mcm_ = None # 1/n*mean of all the elements of the Gram matrix
self.is_additive = False # whether the kernel is additive or not
@classmethod
def is_kernel_for(cls, name):
""" Abstract method to determine the link between name and class
Needed for '__subclasses__()' to work in factory function
"""
raise AbstractClassError(
"Abstract kernel: use SparseKernel or DenseKernel instead")
def v2v(self, vec1, vec2):
""" Return kernel value between two vectors
Parameters
----------
vec1: (d, ) numpy array,
a d-dimensional vector
vec2: (d, ) numpy array,
a d-dimensional vector
Returns
-------
kern_val: float,
the kernel evaluation between vec1 and vec2
Notes
-----
Abstract method that must be implemented by children classes.
"""
raise AbstractClassError(
"Abstract kernel: use SparseKernel or DenseKernel instead")
def v2m(self, vec, mat):
""" Return array of all kernel evaluations between a vector and a matrix
Parameters
----------
vec: (d, ) numpy array,
a d-dimensional vector
mat: (m, d) numpy array,
m d-dimensional vectors stacked row-wise
Returns
-------
kern_row: (m, ) numpy array,
the kernel evaluations between vec and all lines of mat
(contains additional first element if libsvm_fmt attribute
is set, and is centered if center attribute is set)
Notes
-----
Default version: calls v2v repeatedly on the rows of mat
"""
m = len(mat)
offset = (self.libsvm_fmt and 1) or 0
kern_row = np.ones(offset + m, dtype=DTYPE)
kern_row[offset:] = [self.v2v(vec, vecy) for vecy in mat]
# eventually center (in place) the kernel row in the feature space
if self.center:
self._center_rows(kern_row[offset:])
return kern_row
def m2m(self, mat1, mat2):
""" Return the pairwise kernel evaluations between all vectors in mat1
and all vectors in mat2
Parameters
----------
mat1: (m1, d) numpy array,
m1 d-dimensional vectors stacked row-wise
mat2: (m2, d) numpy array,
m2 d-dimensional vectors stacked row-wise
Returns
-------
kern_mat: (m1, m2) numpy array,
the kernel evaluations between all lines of mat1 and mat2
(contains additional first column if libsvm_fmt attribute
is set, and rows are centered if center attribute is set)
Notes
-----
Default version: calls v2m repeatedly on the rows of mat1
"""
# build the kernel matrix
f = lambda i: self.v2m(mat1[i], mat2)
kern_mat = np.array(
mpmap(f, range(safe_len(mat1)), ncpus=self.num_threads),
dtype=DTYPE)
# v2m has already centered and added an extra 1st col
# update 'id' col to contain line number (starts from 1 for libsvm)
if self.libsvm_fmt:
kern_mat[:, 0] = np.arange(1, kern_mat.shape[0] + 1)
# eventually center (in place) the kernel rows in the feature space
if self.center:
offset = (self.libsvm_fmt and 1) or 0
for _row in kern_mat:
self._center_rows(_row[offset:])
return kern_mat
def gram(self, mat):
"""Return the kernel gram matrix between all vectors in mat
Parameters
----------
mat: (m, d) numpy array,
m d-dimensional vectors stacked row-wise
Returns
-------
kern_mat: (m, m) numpy array,
the kernel evaluations between all lines of mat1 and mat2
(contains additional first column if libsvm_fmt attribute
is set, and is centered if center attribute is set)
Notes
-----
Default version: calls v2v repeatedly
"""
n = safe_len(mat)
# compute the kernel values
f = lambda (i, j): (i, j, self.v2v(mat[i], mat[j]))
if self.constant_diag:
# don't compute the diag
ijs = [(i, j) for i in range(n) for j in range(i + 1, n)]
else:
ijs = [(i, j) for i in range(n) for j in range(i, n)]
kern_vals = mpmap(f, ijs, ncpus=self.num_threads)
# fill the kernel matrix
kern_mat = np.ones((n, n), dtype=DTYPE)
for i, j, kval in kern_vals:
kern_mat[i, j] = kval
kern_mat[j, i] = kval
# eventually center (in place) the kernel matrix in the feature space
if self.center:
self._center_gram(kern_mat) # additionnally sets centering params
# make 'id' col to contain line number (index starts from 1 for libsvm)
if self.libsvm_fmt:
kern_mat = np.c_[np.arange(1, kern_mat.shape[0] + 1), kern_mat]
return kern_mat
# Internals
def _center_gram(self, kern_mat):
""" Center (in place) the Gram (or kernel) matrix in the feature space
Mathematical operation: K <- PKP where P = eye(n) - 1/n ones((n,n))
Additionally sets the self.cms_ (column means of the original kernel
matrix) and self.mcm_ (mean of the original column means), which are
parameters needed to center in the same way the future kernel
evaluations
"""
self.cms_, self.mcm_ = center_gram(kern_mat)
def _center_rows(self, kern_rows):
""" Center (in place) kernel rows in the feature space
Assumes self.cms_ and self.mcm_ are already defined
"""
if self.cms_ is None or self.mcm_ is None:
raise ValueError('Training Gram matrix must be precomputed before '
'rows can be centered')
center_rows(kern_rows, self.cms_, self.mcm_)
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from neutron_lib.api import converters
from neutron_lib.api import extensions as api_extensions
from neutron_lib.api import validators
from neutron_lib import constants as const
from neutron_lib.db import constants as db_const
from neutron_lib import exceptions as nexception
from neutron_lib.plugins import directory
from oslo_utils import netutils
import six
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.common import exceptions
from neutron.conf import quota
from neutron.quota import resource_registry
# Security group Exceptions
class SecurityGroupInvalidPortRange(nexception.InvalidInput):
message = _("For TCP/UDP protocols, port_range_min must be "
"<= port_range_max")
class SecurityGroupInvalidPortValue(nexception.InvalidInput):
message = _("Invalid value for port %(port)s")
class SecurityGroupInvalidIcmpValue(nexception.InvalidInput):
message = _("Invalid value for ICMP %(field)s (%(attr)s) "
"%(value)s. It must be 0 to 255.")
class SecurityGroupEthertypeConflictWithProtocol(nexception.InvalidInput):
message = _("Invalid ethertype %(ethertype)s for protocol "
"%(protocol)s.")
class SecurityGroupMissingIcmpType(nexception.InvalidInput):
message = _("ICMP code (port-range-max) %(value)s is provided"
" but ICMP type (port-range-min) is missing.")
class SecurityGroupInUse(nexception.InUse):
message = _("Security Group %(id)s %(reason)s.")
def __init__(self, **kwargs):
if 'reason' not in kwargs:
kwargs['reason'] = _("in use")
super(SecurityGroupInUse, self).__init__(**kwargs)
class SecurityGroupCannotRemoveDefault(nexception.InUse):
message = _("Insufficient rights for removing default security group.")
class SecurityGroupCannotUpdateDefault(nexception.InUse):
message = _("Updating default security group not allowed.")
class SecurityGroupDefaultAlreadyExists(nexception.InUse):
message = _("Default security group already exists.")
class SecurityGroupRuleInvalidProtocol(nexception.InvalidInput):
message = _("Security group rule protocol %(protocol)s not supported. "
"Only protocol values %(values)s and integer representations "
"[0 to 255] are supported.")
class SecurityGroupRulesNotSingleTenant(nexception.InvalidInput):
message = _("Multiple tenant_ids in bulk security group rule create"
" not allowed")
class SecurityGroupRemoteGroupAndRemoteIpPrefix(nexception.InvalidInput):
message = _("Only remote_ip_prefix or remote_group_id may "
"be provided.")
class SecurityGroupProtocolRequiredWithPorts(nexception.InvalidInput):
message = _("Must also specify protocol if port range is given.")
class SecurityGroupNotSingleGroupRules(nexception.InvalidInput):
message = _("Only allowed to update rules for "
"one security profile at a time")
class SecurityGroupNotFound(nexception.NotFound):
message = _("Security group %(id)s does not exist")
class SecurityGroupRuleNotFound(nexception.NotFound):
message = _("Security group rule %(id)s does not exist")
class DuplicateSecurityGroupRuleInPost(nexception.InUse):
message = _("Duplicate Security Group Rule in POST.")
class SecurityGroupRuleExists(nexception.InUse):
message = _("Security group rule already exists. Rule id is %(rule_id)s.")
class SecurityGroupRuleInUse(nexception.InUse):
message = _("Security Group Rule %(id)s %(reason)s.")
def __init__(self, **kwargs):
if 'reason' not in kwargs:
kwargs['reason'] = _("in use")
super(SecurityGroupRuleInUse, self).__init__(**kwargs)
class SecurityGroupRuleParameterConflict(nexception.InvalidInput):
message = _("Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s")
class SecurityGroupConflict(nexception.Conflict):
message = _("Error %(reason)s while attempting the operation.")
class SecurityGroupRuleInvalidEtherType(nexception.InvalidInput):
message = _("Security group rule for ethertype '%(ethertype)s' not "
"supported. Allowed values are %(values)s.")
def convert_protocol(value):
if value is None:
return
try:
val = int(value)
if val >= 0 and val <= 255:
# Set value of protocol number to string due to bug 1381379,
# PostgreSQL fails when it tries to compare integer with string,
# that exists in db.
return str(value)
raise SecurityGroupRuleInvalidProtocol(
protocol=value, values=sg_supported_protocols)
except (ValueError, TypeError):
if value.lower() in sg_supported_protocols:
return value.lower()
raise SecurityGroupRuleInvalidProtocol(
protocol=value, values=sg_supported_protocols)
except AttributeError:
raise SecurityGroupRuleInvalidProtocol(
protocol=value, values=sg_supported_protocols)
def convert_ethertype_to_case_insensitive(value):
if isinstance(value, six.string_types):
for ethertype in sg_supported_ethertypes:
if ethertype.lower() == value.lower():
return ethertype
raise SecurityGroupRuleInvalidEtherType(
ethertype=value, values=sg_supported_ethertypes)
def convert_validate_port_value(port):
if port is None:
return port
if netutils.is_valid_port(port):
return int(port)
else:
raise SecurityGroupInvalidPortValue(port=port)
def convert_ip_prefix_to_cidr(ip_prefix):
if not ip_prefix:
return
try:
cidr = netaddr.IPNetwork(ip_prefix)
return str(cidr)
except (ValueError, TypeError, netaddr.AddrFormatError):
raise exceptions.InvalidCIDR(input=ip_prefix)
def _validate_name_not_default(data, max_len=db_const.NAME_FIELD_SIZE):
msg = validators.validate_string(data, max_len)
if msg:
return msg
if data.lower() == "default":
raise SecurityGroupDefaultAlreadyExists()
validators.add_validator('name_not_default', _validate_name_not_default)
sg_supported_protocols = ([None] + list(const.IP_PROTOCOL_MAP.keys()))
sg_supported_ethertypes = ['IPv4', 'IPv6']
SECURITYGROUPS = 'security_groups'
SECURITYGROUPRULES = 'security_group_rules'
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
SECURITYGROUPS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': '',
'validate': {
'type:name_not_default': db_const.NAME_FIELD_SIZE}},
'description': {'allow_post': True, 'allow_put': True,
'validate': {
'type:string': db_const.DESCRIPTION_FIELD_SIZE},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True},
SECURITYGROUPRULES: {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
SECURITYGROUPRULES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'security_group_id': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'required_by_policy': True},
'remote_group_id': {'allow_post': True, 'allow_put': False,
'default': None, 'is_visible': True},
'direction': {'allow_post': True, 'allow_put': False,
'is_visible': True,
'validate': {'type:values': ['ingress', 'egress']}},
'protocol': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'convert_to': convert_protocol},
'port_range_min': {'allow_post': True, 'allow_put': False,
'convert_to': convert_validate_port_value,
'default': None, 'is_visible': True},
'port_range_max': {'allow_post': True, 'allow_put': False,
'convert_to': convert_validate_port_value,
'default': None, 'is_visible': True},
'ethertype': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': 'IPv4',
'convert_to': convert_ethertype_to_case_insensitive,
'validate': {'type:values': sg_supported_ethertypes}},
'remote_ip_prefix': {'allow_post': True, 'allow_put': False,
'default': None, 'is_visible': True,
'convert_to': convert_ip_prefix_to_cidr},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True},
}
}
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {SECURITYGROUPS: {'allow_post': True,
'allow_put': True,
'is_visible': True,
'convert_to':
converters.convert_none_to_empty_list,
'validate': {'type:uuid_list': None},
'default': const.ATTR_NOT_SPECIFIED}}}
# Register the configuration options
quota.register_quota_opts(quota.security_group_quota_opts)
class Securitygroup(api_extensions.ExtensionDescriptor):
"""Security group extension."""
@classmethod
def get_name(cls):
return "security-group"
@classmethod
def get_alias(cls):
return "security-group"
@classmethod
def get_description(cls):
return "The security groups extension."
@classmethod
def get_updated(cls):
return "2012-10-05T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = directory.get_plugin()
for resource_name in ['security_group', 'security_group_rule']:
collection_name = resource_name.replace('_', '-') + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict())
resource_registry.register_resource_by_name(resource_name)
controller = base.create_resource(collection_name,
resource_name,
plugin, params, allow_bulk=True,
allow_pagination=True,
allow_sorting=True)
ex = extensions.ResourceExtension(collection_name,
controller,
attr_map=params)
exts.append(ex)
return exts
def update_attributes_map(self, attributes):
super(Securitygroup, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) +
list(RESOURCE_ATTRIBUTE_MAP.items()))
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class SecurityGroupPluginBase(object):
@abc.abstractmethod
def create_security_group(self, context, security_group):
pass
@abc.abstractmethod
def update_security_group(self, context, id, security_group):
pass
@abc.abstractmethod
def delete_security_group(self, context, id):
pass
@abc.abstractmethod
def get_security_groups(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
@abc.abstractmethod
def get_security_group(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_security_group_rule(self, context, security_group_rule):
pass
@abc.abstractmethod
def delete_security_group_rule(self, context, id):
pass
@abc.abstractmethod
def get_security_group_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
@abc.abstractmethod
def get_security_group_rule(self, context, id, fields=None):
pass
|
|
#!/usr/bin/python
# Copyright [yyyy] [name of copyright owner]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import getopt
import ConfigParser
import re
import keystoneclient.v2_0.client as ksclient
import swiftclient
import keystoneclient.openstack.common.apiclient.exceptions
from sqlalchemy import create_engine, insert, text
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy import Table, Column, Text, Float, MetaData, DateTime
from datetime import datetime, timedelta
from pytz import timezone
import pytz
import numpy
import json
import pprint
import subprocess
class RepCephOSdu:
def __init__(self, config_file="/etc/osdc_cloud_accounting/settings.py", debug=None, storage_type='object'):
"""Polls gluster for quotas and save into dict"""
self.settings = {}
#read in settings
Config = ConfigParser.ConfigParser()
Config.read(config_file)
sections = ['general','repcephosdu']
self.settings = {}
for section in sections:
options = Config.options(section)
self.settings[section]={}
for option in options:
try:
self.settings[section][option] = Config.get(section, option)
except:
sys.stderr.write("exception on [%s] %s!" % section, option)
self.re_novarc=re.compile('OS_(\S+)=(\S+)')
self.start_time = None
self.end_time = None
self.now_time = datetime.now(tz=pytz.timezone(self.settings['general']['timezone']))
if storage_type == "object":
self.table_name = self.settings['repcephosdu']['db_object_table']
if storage_type == "block":
self.table_name = self.settings['repcephosdu']['db_block_table']
self.force_updates_for=self.settings['repcephosdu']['force_update_for']
self.debug=debug
def get_novarc_creds(self, path=None,debug=None):
"""
Parse a users novarc file, since I can not figure out how to spoof a user using the admin token
Retruns the dictionary of key/values (keys in lowercase)
"""
novarc_creds = {}
f = open(path,'r')
novarc = f.read()
if debug or self.debug:
sys.stderr.write( "DEBUG: Read in %s:\n%s" %(path,novarc) )
f.close()
for key,value in self.re_novarc.findall( novarc ):
novarc_creds[key.lower()] = value
if debug or self.debug:
sys.stderr.write( "DEBUG: novarc_creds = %s" %(novarc_creds) )
return novarc_creds
def get_swift_du_for_tenant(self,username=None, password=None, auth_url=None, tenant_name=None, debug=None):
"""
Takes the openstack credentials, gets a list of containers and their sizes, returns sum
"""
os_creds={}
strip='\'"'
os_creds['user'] = username.translate(None,strip)
os_creds['key'] = password.translate(None,strip)
os_creds['tenant_name'] = tenant_name.translate(None,strip)
os_creds['authurl'] = auth_url.translate(None,strip)
try:
keystone = ksclient.Client(username=os_creds['user'], password=password.translate(None,strip), auth_url=os_creds['authurl'], tenant_name=os_creds['tenant_name'])
except keystoneclient.apiclient.exceptions.Unauthorized:
raise Exception("User: %s Tenant: %s disabled in keystone can not load disk usage"%(username, tenant_name))
auth_token=keystone.auth_token
object_store_url=str(keystone.service_catalog.get_urls(service_type='object-store', endpoint_type='internal')[0])
os_creds['preauthtoken'] = keystone.auth_token
os_creds['preauthurl'] = object_store_url
if debug or self.debug:
sys.stderr.write( "DEBUG: os_creds = %s" %(os_creds) )
swift_conn=swiftclient.client.Connection( **os_creds )
account_reply = swift_conn.get_account()
buckets = account_reply[-1]
if debug or self.debug:
sys.stderr.write( "DEBUG: buckets = %s" %(buckets) )
total_bucket_du = 0
for bucket in buckets:
total_bucket_du += bucket['bytes']
if debug or self.debug:
sys.stderr.write( "DEBUG: %s = %s; total = %s" %(bucket['name'], bucket['bytes'], total_bucket_du) )
return total_bucket_du
def get_rados_du_for_tenant(self,username=None, password=None, auth_url=None, tenant_name=None, debug=None):
"""
Takes the openstack credentials, gets a list of containers and their sizes, returns sum
"""
os_creds={}
strip='\'"'
#os_creds['user'] = username.translate(None,strip)
#os_creds['key'] = password.translate(None,strip)
#os_creds['tenant_name'] = tenant_name.translate(None,strip)
#os_creds['authurl'] = auth_url.translate(None,strip)
cmd = [ '/usr/local/sbin/get_tenant_cephs3_stats.sh','%s'%(tenant_name.translate(None,strip)) ]
#Set quota
if debug:
pprint.pprint(cmd)
try:
stats_json=subprocess.check_output(cmd)
if debug:
pprint.pprint( stats_json )
stats=json.loads(stats_json)
if debug:
pprint.pprint( stats )
total_bucket_du = stats['stats']['total_bytes']
except subprocess.CalledProcessError, e:
sys.stderr.write("Error getting tenant $s stats \n" % tenant )
sys.stderr.write("%s\n" % e.output)
total_bucket_du = None
return total_bucket_du
def update_db(self, username, tenant_name, du, debug ):
metadata = MetaData()
table = Table(self.table_name, metadata,
Column('date', DateTime), # Date of check
Column('username', Text), # Name of Tenant/User
Column('tenant_name', Text), # Name of Tenant/User
Column('value', Float), # Value in bytes ?
)
self.write_to_db(table=table, username=username, tenant_name=tenant_name, du=du, debug=debug )
def db_connect(self, db):
try:
dsn = "mysql://%s:%s@%s/%s" % (self.settings['repcephosdu']['db_user'],
self.settings['repcephosdu']['db_passwd'], self.settings['repcephosdu']['db_server'], db)
engine = create_engine(dsn)
return engine.connect()
except SQLAlchemyError, e:
print e
def write_to_db(self,table=None, username=None, tenant_name=None, du=None, debug=None ):
"""Push it out to a file"""
if self.debug:
debug=True
conn = self.db_connect(self.settings['repcephosdu']['db_database'])
insert = []
insert.append({'date': self.now_time.strftime(self.settings['general']['timeformat']),
'username': username,
'tenant_name': tenant_name,
'value': int(du)
})
if debug:
print "DEBUG: insert %s" %(insert)
conn.execute(table.insert(), insert)
def get_percentile_du(self, start_date=None, end_date=None, username=None, tenant_name=None, path=None, debug=None, percentile=95):
"""Get the 95th percentile of the du"""
#For backwards compatibility:
if path and not username:
username = path
if start_date is None or end_date is None:
sys.stderr.write(
"ERROR: Start and End Dates no specified in get_95thp_du")
sys.exit(1)
if username:
query_field = 'username'
query_value = username
elif tenant_name:
query_field = 'tenant_name'
query_value = tenant_name
my_query = "SELECT value FROM %s where ( date >= '%s' and date <= '%s' ) and %s = '%s'" % (
self.table_name,
start_date,
end_date,
query_field,
query_value)
if debug or self.debug:
sys.stderr.write( "my_query: %s\n" %(my_query))
try:
dus=[]
conn = self.db_connect(self.settings['repcephosdu']['db_database'])
s = text(my_query)
results = conn.execute(s).fetchall()
except SQLAlchemyError as e:
sys.stderr.write("Erroring querying the databases in %s: %s\n" %(__name__,str(e)))
sys.exit(1)
try:
if results:
for x in results:
dus.append(float(x[0]))
if debug or self.debug:
sys.stderr.write( "du: %s\n" %(dus))
result = numpy.percentile(a=dus,q=float(percentile))
if debug or self.debug:
sys.stderr.write( "result(%s): %s" %(percentile, result))
return result
else:
return 0
except Exception as e:
sys.stderr.write( "Unknown error in %s: %s" %(__name__,str(e)))
def is_quota_leader(self, tenant_name, username):
""" Only update for people who are makred leaders in SF. Prevents dulpicate entries """
admin_repcephosddu = RepCephOSdu(storage_type='object')
admin_creds = admin_repcephosddu.get_novarc_creds("/etc/osdc_cloud_accounting/admin_auth", debug=debug)
#Find list of the quota leaders, who the quota needs to apply to
kc = ksclient.Client(**admin_creds)
users = {}
tenants = {}
for user in kc.users.list():
users[user.name] = user.id
for tenant in kc.tenants.list():
tenants[tenant.name] = tenant.id
if debug:
print users
print tenants
roles = kc.roles.roles_for_user(user=users[username],tenant=tenants[tenant_name])
if debug:
print roles
if [t for t in roles if "quota_leader" in t.name]:
return True
return False
def force_update(self, tenant_name, username):
""" Do we force an update even if not a quota leader """
if debug:
print "Do we force update %s:%s in %s" %( tenant_name, username, self.force_updates_for.split(','))
if "%s:%s"%(tenant_name,username) in self.force_updates_for.split(','):
#if datamanager users
return True
return False
def get_project_overrides(self, filename=None, debug=None):
project_overrides = {}
re_overrides = re.compile('(\S+)=(\S+)')
f = open(filename,'r')
overrides_file = f.read()
if debug or self.debug:
sys.stderr.write( "DEBUG: Read in %s:\n%s" %(filename,overrides_file) )
f.close()
for key,value in re_overrides.findall( overrides_file ):
project_overrides[key] = value
return project_overrides
if __name__ == "__main__":
#getopt args
update = False # Update the database
debug = False # Debug statements
novarc = None # novarc file for the tenant
s3apitype = "rados" #assume swft cmd or rados
project_override_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["debug", "update", "rados", "swift","novarc=","project_override_file=" ])
except getopt.GetoptError:
sys.stderr.write("ERROR: Getopt\n")
sys.exit(2)
for opt, arg in opts:
if opt in ("--debug"):
debug = True
elif opt in ("--update"):
update = True
elif opt in ("--novarc"):
novarc = arg
elif opt in ("--rados"):
s3apitype = "rados"
elif opt in ("--swift"):
s3apitype = "swift"
elif opt in ("--project_override_file"):
project_override_file = arg
project_overrides={}
if len(sys.argv) <= 1:
sys.stderr.write( "Usage: %s --novarc=/PATH/to/.novarc [--project_override_file /path/to/override][--debug] [--update]\n"%(__file__) )
sys.exit(0)
if novarc:
if os.path.isfile(novarc):
user_repcephosddu = RepCephOSdu(storage_type='object')
novarc_creds = user_repcephosddu.get_novarc_creds(novarc, debug=debug)
tenant_name=novarc_creds['tenant_name']
username=novarc_creds['username']
force_updates_for=user_repcephosddu.force_updates_for
try:
if s3apitype == "swift":
swift_du = user_repcephosddu.get_swift_du_for_tenant( debug=debug, **novarc_creds)
elif s3apitype == "rados":
swift_du = user_repcephosddu.get_rados_du_for_tenant( debug=debug, **novarc_creds)
if debug:
print "Swift du stage 1 = %s" % (swift_du)
# If their is an override file, loop through summing up. Otherwise run once and exit
if project_override_file:
project_overrides=user_repcephosddu.get_project_overrides(filename=project_override_file, debug=debug)
if debug:
print "Project Overrrides"
pprint.pprint( project_overrides )
if tenant_name in project_overrides.keys():
if debug:
print "Additonal project: %s" %(tenant_name)
for project in project_overrides[tenant_name].split(','):
swift_du += user_repcephosddu.get_rados_du_for_tenant( debug=debug, tenant_name=project)
if debug:
print "Swift du stage N = %s" % (swift_du)
except Exception as e:
sys.stderr.write("WARN: %s\n" % e)
sys.exit(1)
if debug:
print "%s = %s" % (str(swift_du), str(novarc_creds))
#If we are updating db then we do
if not update:
print "%s = %s bytes" %(novarc_creds['username'], swift_du)
if update:
if user_repcephosddu.is_quota_leader(tenant_name=tenant_name, username=username):
user_repcephosddu.update_db(username=username, tenant_name=tenant_name, du=swift_du, debug=debug )
if debug:
print "Update Quota Leader: %s:%s=%s" % (username,tenant_name, swift_du)
if user_repcephosddu.force_update(tenant_name=tenant_name, username=username):
#if datamanager users
user_repcephosddu.update_db(username=username, tenant_name=tenant_name, du=swift_du, debug=debug )
if debug:
print "Update Forced: %s:%s=%s" % (username,tenant_name, swift_du)
|
|
#!/usr/bin/python3
"""
This is a blame game simulator (also known as swarte piet or Schwarzer Peter).
To run it just call it with a list of names and watch the game:
blamegame.py John Marry Joe
If you want to speed up or slow down the game, set a delay value (default
is 1 second) with the -d option:
blamegame.py -d 0.5 John Marry Joe Anna
You can also call it with option -i, in which case the game becomes
interactive and the first player can choose on wich card to draw.
"""
import argparse
import random
import time
class Card(object):
"Represents a single card."
def __init__(self, subject):
"Create Card with subject `subject`."
self.subject = subject
def is_pair_with(self, other_card):
"Return True if self and other_card make a pair."
return self.subject == other_card.subject
def __repr__(self):
"Return the 'offical' string representation of the object."
return '<Card subject {}>'.format(self.subject)
def __str__(self):
"Return the informal string representation of the object."
return self.subject
class Deck:
"A deck of cards."
SUBJECTS = ['Elefant', 'Vogel', 'Hund', 'Katze', 'Affe', 'Frosch',
'Hase', 'Fuchs', 'Wolf', 'Maus', 'Eule', 'Schwein',
'Pinguin', 'Rind', 'Schaf']
def __init__(self):
self._cards = []
# create a pair of cards for each subject
for subject in self.SUBJECTS:
self._cards.append(Card(subject))
self._cards.append(Card(subject))
# add the swarte piet
self._cards.append(Card('Schwarzer Peter'))
self.shuffle()
def show(self):
"Show all cards in actual order."
for card in self._cards:
print(card)
def shuffle(self):
"Shuffle cards."
random.shuffle(self._cards)
def __iter__(self):
"Provide iterator for Deck which returns card after card."
for card in self._cards:
yield card
def __len__(self):
"Return the number of cards."
return len(self._cards)
class Player:
"Represents a player."
def __init__(self, name):
"Create a new player with name name."
self.name = name
self._cards = []
self._pairs = []
def count_cards(self):
"Return number of cards the player has left."
return len(self._cards)
def add_card(self, card):
"""Give the player a card.
If the new card makes a pair complete both cards will be removed.
"""
pairing_card = self._find_pairing(card)
if pairing_card is not None:
print('{} legt ein Paar ab: {}.'.format(self.name, card.subject))
self._pairs.append(card, pairing_card)
self._cards.remove(pairing_card)
if self.count_cards() == 0:
print('{} hat keine Karten mehr und ist aus dem Spiel'.format(
self.name))
elif self.count_cards() == 1:
print("{} hat noch 1 Karte.".format(self.name))
else:
print("{} hat noch {} Karten.".format(self.name,
self.count_cards()))
else:
self._cards.append(card)
def draw_from(self, other_player):
"Draw card from other_player."
new_card = other_player.remove_card()
self.add_card(new_card)
def remove_card(self, index=None):
"""Return a random card.
This card is removed from this player.
"""
if index is None:
index = random.randint(1, len(self._cards))
return self._cards.pop(index-1)
def _find_pairing(self, other_card):
"""Return the card pairing with other_card or None if player has no
pairing card.
"""
pairing_card = None
for card in self._cards:
if card.is_pair_with(other_card):
pairing_card = card
break
return pairing_card
def __repr__(self):
return self.name
class HumanPlayer(Player):
"""Human Player is a Player which does not draw a random card but
asks the user to draw one.
"""
@staticmethod
def _ask_for_card(player_name, remaining_cards):
"Ask for a number to select a card."
while True:
print('Draw a card from {}!'.format(player_name))
selected_card = input('Enter a number between 1 and {}: '.format(
remaining_cards))
try:
card_index = int(selected_card)
if card_index > 0 and card_index <= remaining_cards:
return card_index
else:
print('Input must be between 1 and {}!'.format(
remaining_cards))
except ValueError:
print('Input must be between 1 and {}!'.format(
remaining_cards))
def draw_from(self, other_player):
"Draw card from other_player."
card_index = self._ask_for_card(other_player.name,
other_player.count_cards())
new_card = other_player.remove_card(card_index)
self.add_card(new_card)
class Game:
"A single game of Black Peter."
def __init__(self, players, delay=1):
"""Create a game of Black Peter.
Args:
players: a list of Player objects.
delay: pause n seconds after each draw.
"""
self.players = players
self.delay = delay
self.deck = Deck()
def _get_next_active_player(self, player):
"Return the next player who has cards left."
next_player = None
index = self.players.index(player)
while True:
index += 1
# Continue with the first if we reached the last player
if index == len(self.players):
index = 0
next_player = self.players[index]
# ignore players without cards
if next_player.count_cards() > 0:
break
return next_player
def deal(self):
"Deal out cards one by one to all players."
for i, card in enumerate(self.deck):
# 0 % 4 = 0; 1 % 4 = 1; 2 % 4 = 2; 3 % 4 = 3; 4 % 4 = 0 etc
self.players[i % len(self.players)].add_card(card)
print('Nach dem Geben:')
for player in self.players:
print('\t{} hat {} Karten.'.format(player.name,
player.count_cards()))
def start(self):
"Start the game."
self.deal()
# Choose a random player to start
active_player = random.choice(self.players)
# active player draws form next_player
next_player = self._get_next_active_player(active_player)
# stop if only one active player is left
while active_player is not next_player:
print("{} zieht von {}".format(active_player.name,
next_player.name))
active_player.draw_from(next_player)
# next_player becomes active player; find a new next_player
active_player = self._get_next_active_player(active_player)
next_player = self._get_next_active_player(active_player)
time.sleep(self.delay)
print('\n{} hat den Schwarzen Peter.'.format(active_player.name))
def parse_args():
"Parse command line options."
parser = argparse.ArgumentParser(description='Schwarzer Peter Simulator.')
parser.add_argument('playernames', metavar='NAME', nargs='+',
help='One or more player names')
parser.add_argument('-d', '--delay', type=float, metavar='SECONDS',
default=1,
help='Delay between each draw in seconds.')
parser.add_argument('-i', '--interactive', action='store_true',
help=('If this option is set, the first player will '
'be asked to draw a specific card.'))
return parser.parse_args()
def run(delay, interactive, playernames):
"Run the script using options stored in opts."
players = []
for i, pname in enumerate(playernames):
if interactive and i == 0:
players.append(HumanPlayer(pname))
else:
players.append(Player(pname))
game = Game(players, delay)
game.start()
if __name__ == '__main__':
ARGS = parse_args()
run(ARGS.delay, ARGS.interactive, ARGS.playernames)
|
|
import codecs
import logging
import os
import re
import sys
from xml.dom import minidom
import gflags
from bibtexparser.bparser import BibTexParser
gflags.DEFINE_string('mindmap_file', None, 'the mindmap filename')
gflags.DEFINE_boolean('use_absolute_paths_for_images', False,
'when set, will use absolute paths for images')
gflags.DEFINE_string('html_file', None, 'the html filename')
gflags.DEFINE_string('latex_file', None, 'the latex filename')
gflags.DEFINE_string('beamer_latex_file', None, 'the beamer latex filename')
gflags.DEFINE_string('bib_file', '~/Dropbox/bib.bib',
'bib file location')
class BibDatabase(object):
def __init__(self, bib_file_location=None):
if bib_file_location is None:
bib_file_location = gflags.FLAGS.bib_file
bib_file_location = re.sub('~', os.environ['HOME'], bib_file_location)
with open(bib_file_location) as bibfile:
content = bibfile.read()
bp = BibTexParser(content)
self.entry_map = {}
for ent in bp.get_entry_list():
self.entry_map[ent['id']] = ent
def _RetrieveEntry(self, name):
return self.entry_map[name]
db = None
@staticmethod
def GetTheDB():
if BibDatabase.db is None:
BibDatabase.db = BibDatabase()
return BibDatabase.db
@staticmethod
def GetFormattedAuthor(bib_authorname):
names = bib_authorname.split(' and ')
first_author_lastname = names[0].split(',')[0]
if len(names) == 1:
return first_author_lastname
if len(names) >= 3:
return "%s et. al. " % first_author_lastname
second_author_lastname = names[1].split(',')[0]
return "%s and %s" % (first_author_lastname,
second_author_lastname)
def GetOneArtCiteHTML(self, name):
try:
ent = self._RetrieveEntry(name)
except KeyError as _:
return "InvalidBibEntry:%s" % name
return "<span class=\"citation\" title=\"%s\">%s, %s</span>" % (
ent["title"],
self.GetFormattedAuthor(ent['author']),
ent['year'])
def GetOneArtNewciteHTML(self, name):
try:
ent = self._RetrieveEntry(name)
except KeyError as _:
return "InvalidBibEntry:%s" % name
return "<span class=\"citation\" title=\"%s\">%s (%s)</span>" % (
ent["title"],
self.GetFormattedAuthor(ent['author']),
ent['year'])
def GetCiteHTML(self, name):
return '(%s)' % (
"; ".join(self.GetOneArtCiteHTML(x) for x in name.split(',')))
def GetNewciteHTML(self, name):
return ", ".join(self.GetOneArtNewciteHTML(x) for x in name.split(','))
class Node(object):
accepted_nodes = ['node', 'richcontent']
def __init__(self, dom_node, level=0):
self.type = dom_node.nodeName
self.level = level
try:
self.nodeid = dom_node.attributes['ID'].value
except KeyError as _:
self.nodeid = "NONE"
try:
self.text = dom_node.attributes['TEXT'].value
except KeyError as _:
self.text = "NONE"
self.printing_func = None
self.children = []
for child in dom_node.childNodes:
self.AddInfoForChild(child)
def AddInfoForChild(self, child):
if child.nodeType == child.TEXT_NODE:
return
if child.nodeName not in Node.accepted_nodes:
return
if child.nodeName == 'richcontent':
self.children.append(ImageNode(child, self.level + 1))
return
if 'TEXT' not in child.attributes.keys():
for g in child.childNodes:
self.AddInfoForChild(g)
return
if child.attributes['TEXT'].value.startswith('#'):
return
self.children.append(Node(child, self.level + 1))
def __str__(self):
pass
def GetText(self, print_format='html'): # pylint: disable=too-many-locals
def ReplaceCitations(s):
def get_cite_html(mo):
citation = BibDatabase.GetTheDB().GetCiteHTML(mo.group(1))
return citation
def get_newcite_html(mo):
citation = BibDatabase.GetTheDB().GetNewciteHTML(mo.group(1))
return citation
s = re.sub(
r'\\cite{(.*?)}',
get_cite_html,
s)
s = re.sub(
r'\\newcite{(.*?)}',
get_newcite_html,
s)
return s
def ReplaceEmphMarkups(s):
return re.sub(
r'\\emph{(.*?)}',
lambda x: '<i>%s</i>' % x.group(1),
s)
def ReplaceSubScores(s):
return re.sub(
r'\_',
'_',
s)
def ReplacePercScores(s):
return re.sub(
r'\%',
'%',
s)
def ReplaceTextBFMarkups(s):
return re.sub(
r'\\textbf{(.*?)}',
lambda x: '<b>%s</b>' % x.group(1),
s)
def ReplaceFootnoteMarkups(s):
return re.sub(
r'\\footnote{(.*)}',
lambda x: '<span title="%s" class="footnote">FOOTNOTE</span>' % x.group(
1),
s)
def ReplaceUnderlineMarkups(s):
return re.sub(
r'\\underline{(.*?)}',
lambda x: '<u>%s</u>' % x.group(1),
s)
def ReplaceTextSFMarkups(s):
return re.sub(
r'\\textsf{(.*?)}',
lambda x: '<span class="sf">%s</span>' % x.group(1),
s)
def ReplaceSoutMarkups(s):
return re.sub(
r'\\sout{(.*?)}',
lambda x: '<strike>%s</strike>' % x.group(1),
s)
def ReplaceTildas(s):
return s.replace('~', ' ')
def ReplaceLdots(s):
return s.replace('\\ldots', '...')
def ReplaceDollarSigns(s):
s1 = re.sub(r'\$\$(.*?)\$\$', lambda mo: r"\[%s\]" % mo.group(1), s)
s2 = re.sub(r'\$(.*?)\$', lambda mo: r"\(%s\)" % mo.group(1), s1)
return s2
filters = [ReplaceTildas,
ReplacePercScores,
ReplaceTextBFMarkups,
ReplaceEmphMarkups,
ReplaceTextSFMarkups,
ReplaceSoutMarkups,
ReplaceUnderlineMarkups,
ReplaceCitations,
ReplaceLdots,
ReplaceDollarSigns,
ReplaceSubScores,
ReplaceFootnoteMarkups,
]
txt = self.text
if print_format == 'beamer_latex':
print_format = 'latex'
if print_format == 'latex':
if '<TABLE' in txt or '<table' in txt:
return "TABLE"
return txt
for f in reversed(filters):
txt = f(txt)
return txt
def PrintSelfToWriter(self, writer, print_format='html'):
if self.level == 0:
return
writer.write(self.GetText(print_format))
def SetPrintingFunc(self, func):
self.printing_func = func
def GetChildren(self):
return self.children
def GetPrintableChildren(self):
return [child for child in self.GetChildren() if child.IsPrintable()]
def HasPrinter(self):
return self.printing_func is not None
def GetPrinter(self):
assert self.printing_func is not None
return self.printing_func
def GetLevel(self):
return self.level
def IsFormattingNode(self):
return (self.GetText() in ['SECTIONS', 'SUBSECTIONS', 'SUBSUBSECTIONS',
'LIST', 'ULIST', 'HLIST']) or (
self.GetText().startswith('WIDTH='))
def GetTheFormattingChildNode(self):
for child in self.GetChildren():
if child.IsFormattingNode():
return child
return None
def IsImageNode(self): # pylint: disable=no-self-use
return False
def IsCommentNode(self):
return self.GetText().startswith('Comment:')
def IsStoryNode(self):
return self.GetText().startswith('Story:')
def IsPrintable(self):
return not self.IsFormattingNode()
def IsGraphNodeDescription(self):
cld = self.GetPrintableChildren()
return (len(cld) == 1) and (cld[0].IsImageNode())
def IsHelperNode(self):
return self.IsStoryNode() or self.IsCommentNode()
def IsLeafNode(self):
formatting_node = self.GetTheFormattingChildNode()
if formatting_node is not None and (
formatting_node.GetText() == 'LIST'
or formatting_node.GetText() == 'ULIST'
or formatting_node.GetText() == 'HLIST'
):
return True
return len(self.GetPrintableChildren()) == 0 or self.IsGraphNodeDescription(
) or all(child.IsHelperNode() for child in self.GetPrintableChildren())
def QualifyAsParagraph(self):
cld = self.GetPrintableChildren()
return (not self.IsLeafNode()
and all(child.IsLeafNode() for child in cld)
and not self.IsGraphNodeDescription())
class ImageNode(Node):
def __init__(self, dom_node, level):
Node.__init__(self, dom_node, level)
rel_loc = dom_node.getElementsByTagName("img")[0].attributes['src'].value
loc = rel_loc
if gflags.FLAGS.use_absolute_paths_for_images:
loc = os.path.abspath(os.path.join(
os.path.dirname(gflags.FLAGS.mindmap_file), rel_loc))
self.img = loc
def IsImageNode(self):
return True
def GetImageLoc(self):
return self.img
def IsLeafNode(self):
return True
def GetPrinterFromFormattingNode(formatting_node, node):
fn_text = formatting_node.GetText()
mo = re.match(r'WIDTH=(\d+(\.\d+))', fn_text)
if mo is not None:
return OutputImage(node, width=float(mo.group(1)))
tags_map = {'SECTIONS': PrintCurrentAsSection(node, 'h2'),
'SUBSECTIONS': PrintCurrentAsSection(node, 'h3'),
'SUBSUBSECTIONS': PrintCurrentAsSection(node, 'h4'),
}
return tags_map[fn_text]
class Organization(object):
def __init__(self, mm_file_content):
dom = minidom.parseString(mm_file_content)
self.doc = self.SaveFromDom(dom)
self.LabelTree(self.doc)
@staticmethod
def SaveFromDom(dom):
return Node(dom.childNodes[0])
def _TraverseAllDescendents(self, node=None):
"""An iterator to yield all the descendents in a DFS manner
Args:
node: the current node. When it is none, will start from the root.
Returns:
An iterator of all descendents, including itself.
"""
if node is None:
node = self.doc
yield node
for child in node.children:
for grand_kid in self._TraverseAllDescendents(child):
yield grand_kid
def LabelAllIntoLayers(self, node):
formatting_node = node.GetTheFormattingChildNode()
if formatting_node is not None:
if formatting_node.GetText() == 'LIST':
node.SetPrintingFunc(OutputOrderedList(node))
for child in node.GetChildren():
self.LabelAllIntoLayers(child)
return
if formatting_node.GetText() == 'ULIST':
node.SetPrintingFunc(OutputUnorderedList(node))
for child in node.GetChildren():
self.LabelAllIntoLayers(child)
return
if formatting_node.GetText() == 'HLIST':
node.SetPrintingFunc(OutputHAlignedList(node))
for child in node.GetChildren():
self.LabelAllIntoLayers(child)
return
if not node.HasPrinter():
node.SetPrintingFunc(DirectlyPrintThisAndSub(node))
for child in node.GetPrintableChildren():
child.SetPrintingFunc(
GetPrinterFromFormattingNode(formatting_node, child))
if node.GetLevel() == 1:
node.SetPrintingFunc(PrintTopLevel(node))
if node.IsCommentNode():
node.SetPrintingFunc(OutputComment(node))
elif node.IsStoryNode():
node.SetPrintingFunc(OutputStory(node))
elif node.IsGraphNodeDescription():
node.SetPrintingFunc(DirectlyPrintThisAndSub(node))
if not node.HasPrinter():
if node.IsImageNode():
node.SetPrintingFunc(OutputImage(node))
elif node.QualifyAsParagraph():
node.SetPrintingFunc(OutputParagraph(node))
else:
node.SetPrintingFunc(OutputOrderedList(node))
for child in node.GetChildren():
self.LabelAllIntoLayers(child)
def LabelTree(self, node):
self.LabelAllIntoLayers(node)
node.SetPrintingFunc(DirectlyPrintSub(node))
def LabelErrorsOnFrames(self, node_error_mapping):
"""Label frames in the graph to output error messages instead.
It will label the frame in a way to print its contents as they are,
with the error message on the title.
Args:
node_error_mapping: mappings between frames' corresponding
node IDs and the error they produce.
"""
for node in self._TraverseAllDescendents():
if node.nodeid in node_error_mapping:
node.SetPrintingFunc(
OutputFrameAndDebugMessage(
node, node_error_mapping[node.nodeid]))
def OutputToHTML(self, filename):
with codecs.open(filename, 'w', 'utf8') as outputfile:
print >> outputfile, """
<meta charset="UTF-8">
<style>
span.citation {
color : blue;
}
span.footnote {
color : green;
font-size: 50%;
vertical-align: top;
}
span.sf {
font-family: "Arial Black", Gadget, sans-serif
}
</style>
<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script language="javascript">
var should_hide = false;
window.onload = function() {
should_hide = true;
SetVisability();
}
function SetVisability() {
var cols = document.getElementsByClassName('help');
for(i=0; i<cols.length; i++) {
cols[i].hidden = should_hide;
}
}
function ToggleComments() {
should_hide = !should_hide;
SetVisability();
}
</script>
<button onclick="ToggleComments()">show/hide comments</button>
"""
self.doc.GetPrinter()(outputfile)
def OutputToLatex(self, filename):
with codecs.open(filename, 'w', 'utf8') as outputfile:
self.doc.GetPrinter()(outputfile, 'latex')
def OutputToBeamerLatex(self, filename):
with codecs.open(filename, 'w', 'utf8') as outputfile:
self.doc.GetPrinter()(outputfile, 'beamer_latex')
def OutputOrderedList(current_node):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer)
elif print_format == 'latex':
PrintInLatexFormat(writer)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
def PrintInBeamerLatexFormat(writer):
PrintInLatexFormatWithTag(writer, 'beamer_latex')
def PrintInHTMLFormat(writer):
current_node.PrintSelfToWriter(writer)
if current_node.GetPrintableChildren():
writer.write('<ol>')
for t in current_node.GetPrintableChildren():
if t.IsStoryNode() or t.IsCommentNode():
t.GetPrinter()(writer)
writer.write('<br>')
else:
writer.write('<li>')
t.GetPrinter()(writer)
writer.write('</li>')
writer.write('</ol>')
def PrintInLatexFormat(writer):
PrintInLatexFormatWithTag(writer, 'latex')
def PrintInLatexFormatWithTag(writer, tag='latex'):
current_node.PrintSelfToWriter(writer, tag)
if current_node.GetPrintableChildren():
writer.write(r'\begin{enumerate}')
for t in current_node.GetPrintableChildren():
if t.IsStoryNode() or t.IsCommentNode():
t.GetPrinter()(writer, tag)
writer.write('\n')
else:
writer.write(r'\item ')
t.GetPrinter()(writer, tag)
writer.write('\n')
writer.write(r'\end{enumerate}')
return PrintTo
def OutputUnorderedList(current_node):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer)
elif print_format == 'latex':
PrintInLatexFormat(writer)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
def PrintInBeamerLatexFormat(writer):
PrintInLatexFormatWithFormatTag(writer, 'beamer_latex')
def PrintInHTMLFormat(writer):
current_node.PrintSelfToWriter(writer)
if current_node.GetPrintableChildren():
writer.write('<ul>')
for t in current_node.GetPrintableChildren():
if t.IsStoryNode() or t.IsCommentNode():
t.GetPrinter()(writer)
writer.write('<br>')
else:
writer.write('<li>')
t.GetPrinter()(writer)
writer.write('</li>')
writer.write('</ul>')
def PrintInLatexFormat(writer):
PrintInLatexFormatWithFormatTag(writer, 'latex')
def PrintInLatexFormatWithFormatTag(writer, tag='latex'):
current_node.PrintSelfToWriter(writer, tag)
if current_node.GetPrintableChildren():
writer.write(r'\begin{itemize}')
for t in current_node.GetPrintableChildren():
if t.IsStoryNode() or t.IsCommentNode():
t.GetPrinter()(writer, tag)
writer.write('\n')
else:
writer.write(r'\item ')
t.GetPrinter()(writer, tag)
writer.write('\n')
writer.write(r'\end{itemize}')
return PrintTo
def OutputHAlignedList(current_node):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer)
elif print_format == 'latex':
PrintInLatexFormat(writer)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
def PrintInBeamerLatexFormat(writer):
PrintInLatexFormatWithFormatTag(writer, 'beamer_latex')
def PrintInHTMLFormat(writer):
current_node.PrintSelfToWriter(writer)
if current_node.GetPrintableChildren():
writer.write('<ul>')
for t in current_node.GetPrintableChildren():
if t.IsStoryNode() or t.IsCommentNode():
t.GetPrinter()(writer)
writer.write('<br>')
else:
writer.write('<li>')
t.GetPrinter()(writer)
writer.write('</li>')
writer.write('</ul>')
def PrintInLatexFormat(writer):
PrintInLatexFormatWithFormatTag(writer, 'latex')
def PrintInLatexFormatWithFormatTag(writer, tag='latex'):
current_node.PrintSelfToWriter(writer, tag)
if current_node.GetPrintableChildren():
all_children = current_node.GetPrintableChildren()
algned_children = [t for t in all_children if not (
t.IsStoryNode() or t.IsCommentNode())]
n = len(algned_children)
writer.write(r'\vspace{0.2cm}\begin{columns}[onlytextwidth]')
col_width = 0.9 / n
for t in all_children:
if t.IsStoryNode() or t.IsCommentNode():
t.GetPrinter()(writer, tag)
writer.write('\n')
else:
writer.write(
r'\begin{column}{%.2f\textwidth} \centering ' %
col_width)
t.GetPrinter()(writer, tag)
writer.write(r'\end{column}')
writer.write(r'\end{columns}')
return PrintTo
def OutputStory(current_node):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer)
elif print_format == 'latex':
PrintInLatexFormat(writer)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
def PrintInBeamerLatexFormat(writer):
pass
def PrintInHTMLFormat(writer):
writer.write('<i><span class="help">')
current_node.PrintSelfToWriter(writer)
writer.write('</span></i>')
def PrintInLatexFormat(writer):
writer.write('%%')
current_node.PrintSelfToWriter(writer, 'latex')
return PrintTo
def OutputImage(current_node, width=None):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer, current_node, width)
elif print_format == 'latex':
PrintInLatexFormat(writer, current_node, width)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer, current_node, width)
def PrintInBeamerLatexFormat(writer, current_node, width):
if width is None:
width = r'.7\textwidth'
elif width <= 1:
width = r'%.2f\textwidth' % width
else:
width = r'%.2fpx' % width
writer.write(r'\begin{centering}\includegraphics[width=%s]{%s}' % (
width, current_node.GetImageLoc()))
writer.write(r'\end{centering}')
def PrintInHTMLFormat(writer, current_node, width):
if width is None:
width = 500
writer.write(
'<center><img src="%s" width="%.2fpx">' %
(current_node.GetImageLoc(), width))
writer.write('</img></center>')
def PrintInLatexFormat(writer, current_node, width):
if width is None:
width = r'.7\textwidth'
else:
width = r'%.2f\textwidth' % width
writer.write(r'\begin{figure}\includegraphics[width=%s]{%s}' % (
width, current_node.GetImageLoc()))
writer.write(r'\end{figure}')
return PrintTo
def OutputComment(current_node):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer)
elif print_format == 'latex':
PrintInLatexFormat(writer)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
def PrintInBeamerLatexFormat(writer):
PrintInLatexFormat(writer)
def PrintInHTMLFormat(writer):
writer.write('<span class="help" style="color:red">')
current_node.PrintSelfToWriter(writer)
writer.write('</span>')
def PrintInLatexFormat(writer):
writer.write(r'\todo[size=\tiny]{')
current_node.PrintSelfToWriter(writer, 'latex')
writer.write(r'}')
return PrintTo
def OutputParagraph(current_node):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer)
elif print_format == 'latex':
PrintInLatexFormat(writer)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
def PrintInBeamerLatexFormat(writer):
writer.write("\n%%frame: {}%%\n".format(current_node.nodeid))
writer.write(r'\begin{frame}{')
current_node.PrintSelfToWriter(writer, 'beamer_latex')
writer.write(r'}')
for i in current_node.GetPrintableChildren():
i.GetPrinter()(writer, 'beamer_latex')
writer.write('\n')
writer.write(r'\end{frame}')
def PrintInHTMLFormat(writer):
writer.write(
'<p><span class="help" style="font-size:120%; font-style:italic">')
current_node.PrintSelfToWriter(writer)
writer.write('</span>')
for i in current_node.GetPrintableChildren():
writer.write('<br class="help"> ')
i.GetPrinter()(writer)
writer.write('</span></p>')
def PrintInLatexFormat(writer):
writer.write('\n%%')
current_node.PrintSelfToWriter(writer, 'latex')
writer.write('\n')
for i in current_node.GetPrintableChildren():
i.GetPrinter()(writer, 'latex')
writer.write('\n')
writer.write('\n')
return PrintTo
def DirectlyPrintSub(current_node):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer)
elif print_format == 'latex':
PrintInLatexFormat(writer)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
def PrintInBeamerLatexFormat(writer):
for t in current_node.GetPrintableChildren():
t.GetPrinter()(writer, 'beamer_latex')
writer.write('\n')
def PrintInHTMLFormat(writer):
for t in current_node.GetPrintableChildren():
t.GetPrinter()(writer)
writer.write(' <br class="help">')
def PrintInLatexFormat(writer):
for t in current_node.GetPrintableChildren():
t.GetPrinter()(writer, 'latex')
writer.write('\n')
return PrintTo
def OutputFrameAndDebugMessage(current_node, error_messages):
"""Output the error message as title, and normal content as content.
This printer is used when there is an error on this page.
Args:
current_node: the current node (a frame).
error_messages: a list of latex compilation messages for errors in this
frame.
Returns:
A printer for printing the latex code into a writer.
"""
def PrintTo(writer, print_format='beamer_latex'):
if print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
else:
logging.fatal("Unsupported format %s", format)
def PrintInBeamerLatexFormat(writer):
writer.write(r'\begin{frame}[fragile]{Error on page\ldots}')
writer.write(r'\begin{verbatim}')
writer.write('\n')
for msg in error_messages:
writer.write(msg)
writer.write("\n")
writer.write(r'\end{verbatim}')
writer.write('\n')
writer.write(r'\end{frame}')
return PrintTo
def DirectlyPrintThisAndSub(current_node):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer)
elif print_format == 'latex':
PrintInLatexFormat(writer)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
def PrintInBeamerLatexFormat(writer):
writer.write(current_node.GetText(print_format='beamer_latex'))
writer.write('\n')
for t in current_node.GetPrintableChildren():
t.GetPrinter()(writer, 'beamer_latex')
writer.write('\n')
def PrintInHTMLFormat(writer):
writer.write(current_node.GetText())
writer.write('<br>')
for t in current_node.GetPrintableChildren():
t.GetPrinter()(writer)
writer.write('<br>')
def PrintInLatexFormat(writer):
writer.write(current_node.GetText(print_format='latex'))
writer.write('\n')
for t in current_node.GetPrintableChildren():
t.GetPrinter()(writer, 'latex')
writer.write('\n')
return PrintTo
def PrintCurrentAsSection(current_node, tag):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer)
elif print_format == 'latex':
PrintInLatexFormat(writer)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
def PrintInBeamerLatexFormat(writer):
latex_tag = None
if tag == 'h2':
latex_tag = 'section'
elif tag == 'h3':
latex_tag = 'subsection'
elif tag == 'h4':
latex_tag = 'subsubsection'
assert latex_tag is not None
txt = current_node.GetText('latex')
if txt.strip():
writer.write(r"\%s{" % latex_tag)
writer.write(txt)
writer.write("}\n")
DirectlyPrintSub(current_node)(writer, 'beamer_latex')
def PrintInHTMLFormat(writer):
writer.write("<%s>" % tag)
writer.write(current_node.GetText())
writer.write("</%s>" % tag)
DirectlyPrintSub(current_node)(writer)
def PrintInLatexFormat(writer):
latex_tag = None
if tag == 'h2':
latex_tag = 'section'
elif tag == 'h3':
latex_tag = 'subsection'
elif tag == 'h4':
latex_tag = 'subsubsection'
assert latex_tag is not None
txt = current_node.GetText('latex')
if txt.strip():
writer.write(r"\%s{" % latex_tag)
writer.write(txt)
writer.write("}\n")
DirectlyPrintSub(current_node)(writer, 'latex')
return PrintTo
def PrintTopLevel(current_node):
def PrintTo(writer, print_format='html'):
if print_format == 'html':
PrintInHTMLFormat(writer)
elif print_format == 'latex':
PrintInLatexFormat(writer)
elif print_format == 'beamer_latex':
PrintInBeamerLatexFormat(writer)
def PrintInBeamerLatexFormat(writer):
cur_text_lines = current_node.GetText().split("\n")
title = cur_text_lines[0]
subtitle = ""
author = ""
if len(cur_text_lines) >= 2:
subtitle = cur_text_lines[1]
if len(cur_text_lines) >= 3:
author = cur_text_lines[2]
writer.write(r"""
\title{%s}
\subtitle{%s}
\author[%s]{%s}
\date{}
\begin{frame}
\maketitle
\end{frame}
""" % (title, subtitle, author, author))
DirectlyPrintSub(current_node)(writer, print_format='beamer_latex')
def PrintInLatexFormat(writer):
cur_text_lines = current_node.GetText().split("\n")
title = cur_text_lines[0]
subtitle = ""
author = ""
if len(cur_text_lines) >= 2:
subtitle = cur_text_lines[1]
if len(cur_text_lines) >= 3:
author = cur_text_lines[2]
writer.write(r"""
\title{%s}
\date{%s}
\author{%s}
\maketitle
""" % (title, subtitle, author))
DirectlyPrintSub(current_node)(writer, print_format='latex')
def PrintInHTMLFormat(writer):
writer.write("<center><h1>")
writer.write(current_node.GetText())
writer.write("</h1></center>")
DirectlyPrintSub(current_node)(writer)
return PrintTo
def main():
try:
gflags.FLAGS(sys.argv)
except gflags.FlagsError as e:
print '%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], gflags.FLAGS)
sys.exit(1)
logging.basicConfig(level=logging.INFO)
if gflags.FLAGS.mindmap_file is None:
print 'Usage: %s ARGS\n%s' % (sys.argv[0], gflags.FLAGS)
sys.exit(1)
org = Organization(
codecs.open(gflags.FLAGS.mindmap_file, 'r', 'utf8').read())
if gflags.FLAGS.html_file is not None:
org.OutputToHTML(gflags.FLAGS.html_file)
if gflags.FLAGS.beamer_latex_file is not None:
org.OutputToBeamerLatex(gflags.FLAGS.beamer_latex_file)
if gflags.FLAGS.latex_file is not None:
org.OutputToLatex(gflags.FLAGS.latex_file)
if __name__ == "__main__":
main()
|
|
# -*- test-case-name: xmantissa.test.test_websession -*-
# Copyright 2005 Divmod, Inc. See LICENSE file for details
"""
Sessions that persist in the database.
Every L{SESSION_CLEAN_FREQUENCY} seconds, a pass is made over all persistent
sessions, and those that are more than L{PERSISTENT_SESSION_LIFETIME} seconds
old are deleted. Transient sessions die after L{TRANSIENT_SESSION_LIFETIME}
seconds.
These three globals can be overridden by passing appropriate values to the
L{PersistentSessionWrapper} constructor: C{sessionCleanFrequency},
C{persistentSessionLifetime}, and C{transientSessionLifetime}.
"""
from datetime import timedelta
from twisted.cred import credentials
from twisted.internet import reactor
from epsilon import extime
from axiom import attributes, item, userbase
from nevow import guard
SESSION_CLEAN_FREQUENCY = 60 * 60 * 25 # 1 day, almost
PERSISTENT_SESSION_LIFETIME = 60 * 60 * 24 * 7 * 2 # 2 weeks
TRANSIENT_SESSION_LIFETIME = 60 * 12 + 32 # 12 minutes, 32 seconds.
def usernameFromRequest(request):
"""
Take an HTTP request and return a username of the form <user>@<domain>.
@type request: L{inevow.IRequest}
@param request: A HTTP request
@return: A C{str}
"""
username = request.args.get('username', [''])[0]
if '@' not in username:
username = '%s@%s' % (
username, request.getHeader('host').split(':')[0])
return username
class PersistentSession(item.Item):
"""
A session that persists on the database.
These sessions should not store any state, but are used only to determine
that the user has previously authenticated and should be given a transient
session (a regular guard session, not database persistent) without
providing credentials again.
"""
typeName = 'persistent_session'
schemaVersion = 1
sessionKey = attributes.bytes(allowNone=False, indexed=True)
lastUsed = attributes.timestamp(defaultFactory=extime.Time, indexed=True)
authenticatedAs = attributes.bytes(allowNone=False, doc="""
The username and domain that this session was authenticated as.
""")
def renew(self):
"""
Renew the lifetime of this object.
Call this when the user logs in so this session does not expire.
"""
self.lastUsed = extime.Time()
class DBPassthrough(object):
"""
A dictionaryish thing that manages sessions and interfaces with guard.
This is set as the C{sessions} attribute on a L{nevow.guard.SessionWrapper}
instance, or in this case, a subclass. Guard uses a vanilla dict by
default; here we pretend to be a dict and introduce persistent-session
behaviour.
"""
def __init__(self, wrapper):
self.wrapper = wrapper
self._transientSessions = {}
def __contains__(self, key):
# We use __getitem__ here so that transient sessions are always
# created. Otherwise, sometimes guard will call __contains__ and assume
# the transient session is there, without creating it.
try:
self[key]
except KeyError:
return False
return True
has_key = __contains__
def __getitem__(self, key):
if key is None:
raise KeyError("None is not a valid session key")
try:
return self._transientSessions[key]
except KeyError:
if self.wrapper.authenticatedUserForKey(key):
session = self.wrapper.sessionFactory(self.wrapper, key)
self._transientSessions[key] = session
session.setLifetime(self.wrapper.sessionLifetime) # screw you guard!
session.checkExpired()
return session
raise
def __setitem__(self, key, value):
self._transientSessions[key] = value
def __delitem__(self, key):
del self._transientSessions[key]
def __repr__(self):
return 'DBPassthrough at %i; %r' % (id(self), self._transientSessions)
class PersistentSessionWrapper(guard.SessionWrapper):
"""
Extends L{nevow.guard.SessionWrapper} to reauthenticate previously
authenticated users.
There are 4 possible states:
1. new user, no persistent session, no transient session
2. anonymous user, no persistent session, transient session
3. returning user, persistent session, no transient session
4. active user, persistent session, transient session
Guard will look in the sessions dict, and if it finds a key matching a
cookie sent by the client, will return the value as the session. However,
if a user has a persistent session cookie, but no transient session, one is
created here.
"""
def __init__(
self,
store,
portal,
transientSessionLifetime=TRANSIENT_SESSION_LIFETIME,
persistentSessionLifetime=PERSISTENT_SESSION_LIFETIME,
sessionCleanFrequency=SESSION_CLEAN_FREQUENCY,
enableSubdomains=False,
domains=(),
clock=None,
**kw):
guard.SessionWrapper.__init__(self, portal, **kw)
self.store = store
self.sessions = DBPassthrough(self)
self.cookieKey = 'divmod-user-cookie'
self.sessionLifetime = transientSessionLifetime
self.persistentSessionLifetime = persistentSessionLifetime
self.sessionCleanFrequency = sessionCleanFrequency
self._enableSubdomains = enableSubdomains
self._domains = domains
self._clock = reactor if clock is None else clock
if self.store is not None:
self._cleanSessions()
def createSessionForKey(self, key, user):
"""
Create a persistent session in the database.
@type key: L{bytes}
@param key: The persistent session identifier.
@type user: L{bytes}
@param user: The username the session will belong to.
"""
PersistentSession(
store=self.store,
sessionKey=key,
authenticatedAs=user)
def authenticatedUserForKey(self, key):
"""
Find a persistent session for a user.
@type key: L{bytes}
@param key: The persistent session identifier.
@rtype: L{bytes} or C{None}
@return: The avatar ID the session belongs to, or C{None} if no such
session exists.
"""
session = self.store.findFirst(
PersistentSession, PersistentSession.sessionKey == key)
if session is None:
return None
else:
session.renew()
return session.authenticatedAs
def removeSessionWithKey(self, key):
"""
Remove a persistent session, if it exists.
@type key: L{bytes}
@param key: The persistent session identifier.
"""
self.store.query(
PersistentSession,
PersistentSession.sessionKey == key).deleteFromStore()
def _cleanSessions(self):
"""
Clean expired sesisons.
"""
tooOld = extime.Time() - timedelta(seconds=PERSISTENT_SESSION_LIFETIME)
self.store.query(
PersistentSession,
PersistentSession.lastUsed < tooOld).deleteFromStore()
self._lastClean = self._clock.seconds()
def _maybeCleanSessions(self):
"""
Clean expired sessions if it's been long enough since the last clean.
"""
sinceLast = self._clock.seconds() - self._lastClean
if sinceLast > self.sessionCleanFrequency:
self._cleanSessions()
def cookieDomainForRequest(self, request):
"""
Pick a domain to use when setting cookies.
@type request: L{nevow.inevow.IRequest}
@param request: Request to determine cookie domain for
@rtype: C{str} or C{None}
@return: Domain name to use when setting cookies, or C{None} to
indicate that only the domain in the request should be used
"""
host = request.getHeader('host')
if host is None:
# This is a malformed request that we cannot possibly handle
# safely, fall back to the default behaviour.
return None
host = host.split(':')[0]
for domain in self._domains:
suffix = "." + domain
if host == domain:
# The request is for a domain which is directly recognized.
if self._enableSubdomains:
# Subdomains are enabled, so the suffix is returned to
# enable the cookie for this domain and all its subdomains.
return suffix
# Subdomains are not enabled, so None is returned to allow the
# default restriction, which will enable this cookie only for
# the domain in the request, to apply.
return None
if self._enableSubdomains and host.endswith(suffix):
# The request is for a subdomain of a directly recognized
# domain and subdomains are enabled. Drop the unrecognized
# subdomain portion and return the suffix to enable the cookie
# for this domain and all its subdomains.
return suffix
if self._enableSubdomains:
# No directly recognized domain matched the request. If subdomains
# are enabled, prefix the request domain with "." to make the
# cookie valid for that domain and all its subdomains. This
# probably isn't extremely useful. Perhaps it shouldn't work this
# way.
return "." + host
# Subdomains are disabled and the domain from the request was not
# recognized. Return None to get the default behavior.
return None
def savorSessionCookie(self, request):
"""
Make the session cookie last as long as the persistent session.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request object for the guard login URL.
"""
cookieValue = request.getSession().uid
request.addCookie(
self.cookieKey, cookieValue, path='/',
max_age=PERSISTENT_SESSION_LIFETIME,
domain=self.cookieDomainForRequest(request))
def login(self, request, session, creds, segments):
"""
Called to check the credentials of a user.
Here we extend guard's implementation to preauthenticate users if they
have a valid persistent session.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request being handled.
@type session: L{nevow.guard.GuardSession}
@param session: The user's current session.
@type creds: L{twisted.cred.credentials.ICredentials}
@param creds: The credentials the user presented.
@type segments: L{tuple}
@param segments: The remaining segments of the URL.
@return: A deferred firing with the user's avatar.
"""
self._maybeCleanSessions()
if isinstance(creds, credentials.Anonymous):
preauth = self.authenticatedUserForKey(session.uid)
if preauth is not None:
self.savorSessionCookie(request)
creds = userbase.Preauthenticated(preauth)
def cbLoginSuccess(input):
"""
User authenticated successfully.
Create the persistent session, and associate it with the
username. (XXX it doesn't work like this now)
"""
user = request.args.get('username')
if user is not None:
# create a database session and associate it with this user
cookieValue = session.uid
if request.args.get('rememberMe'):
self.createSessionForKey(cookieValue, creds.username)
self.savorSessionCookie(request)
return input
return (
guard.SessionWrapper.login(
self, request, session, creds, segments)
.addCallback(cbLoginSuccess))
def explicitLogout(self, session):
"""
Handle a user-requested logout.
Here we override guard's behaviour for the logout action to delete the
persistent session. In this case the user has explicitly requested a
logout, so the persistent session must be deleted to require the user
to log in on the next request.
@type session: L{nevow.guard.GuardSession}
@param session: The session of the user logging out.
"""
guard.SessionWrapper.explicitLogout(self, session)
self.removeSessionWithKey(session.uid)
def getCredentials(self, request):
"""
Derive credentials from an HTTP request.
Override SessionWrapper.getCredentials to add the Host: header to the
credentials. This will make web-based virtual hosting work.
@type request: L{nevow.inevow.IRequest}
@param request: The request being handled.
@rtype: L{twisted.cred.credentials.1ICredentials}
@return: Credentials derived from the HTTP request.
"""
username = usernameFromRequest(request)
password = request.args.get('password', [''])[0]
return credentials.UsernamePassword(username, password)
|
|
"""
Copyright 2017 Ismail Deganii
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from kivy.clock import Clock
from kivy.graphics.texture import Texture
from kivy.core.camera import CameraBase
import select
import v4l2capture
from PIL import Image
from threading import Thread
from kivy.logger import Logger
import sys
from time import sleep
import time
import datetime
import numpy as np
import cv2
import cv2.cv as cv
import os
class Camera_Object(CameraBase):
def __init__(self, **kwargs):
kwargs.setdefault('fourcc', 'GRAY')
self._user_buffer = None
self._format = 'rgb'
self._video_src = 'v4l'
self._device = None
self._texture_size = None
self._fourcc = kwargs.get('fourcc')
self._mode = kwargs.get('mode')
self._capture_resolution = kwargs.get('capture_resolution')
self._capture_fourcc = kwargs.get('capture_fourcc')
self.capture_requested = False
self.ref_requested = False
self._exposure_requested = False
self._requested_exposure = 0
self._exposure = 0
self._object_detection = False
self._fps = 0
if self._mode is None:
self._mode = self._get_mode_from_fourcc(self._fourcc)
super(Camera_Object, self).__init__(**kwargs)
def _get_mode_from_fourcc(self, fourcc):
return "I;16" if fourcc == "Y16 " else "L"
def init_camera(self):
self._device = '/dev/video%d' % self._index
if not self.stopped:
self.start()
def _do_capture(self, is_ref):
try:
device = self._device
video = v4l2capture.Video_device(device)
(res_x, res_y) = self._capture_resolution
fourcc = self._capture_fourcc
(size_x, size_y) = video.set_format(res_x, res_y, fourcc=fourcc)
capture_texture_size = (size_x, size_y)
video.create_buffers(1)
video.queue_all_buffers()
video.start()
select.select((video,), (), ())
image_data = video.read_and_queue()
Logger.debug("Obtained a frame of size %d", len(image_data))
image = Image.frombytes(self._get_mode_from_fourcc(fourcc),
capture_texture_size, image_data)
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%Hh-%Mm-%Ss')
if is_ref:
file = '/home/pi/d3-captures/reference-%s.tiff' % st
else:
file = '/home/pi/d3-captures/capture-%s.tiff' % st
image.save(file, format='PNG')
video.close()
except:
e = sys.exc_info()[0]
Logger.exception('Exception! %s', e)
Clock.schedule_once(self.stop)
def _v4l_init_video(self):
device = self._device
(res_x, res_y) = self.resolution
fourcc = self._fourcc
Logger.info("video_thread started")
video = v4l2capture.Video_device(device)
(size_x, size_y) = video.set_format(res_x, res_y, fourcc=fourcc)
self._texture_size = (size_x, size_y)
Logger.info("Received resolution: %d,%d", size_x, size_y)
video.create_buffers(1)
video.queue_all_buffers()
video.start()
self._reset_fps()
return video
def _v4l_loop(self):
video = None
while True:
try:
video = self._v4l_init_video()
# set to the auto on startup
# video.set_exposure_absolute(400)
except:
e = sys.exc_info()[0]
Logger.exception('Exception on video thread startup! %s', e)
try:
if video is not None:
video.close()
except:
e2 = sys.exc_info()[0]
Logger.info("Exception while trying to close video stream for retry... %s", e2)
Logger.info("Trying to restart video stream")
# Try again in a second...
sleep(2.0)
os.system("sudo ./usbreset /dev/bus/usb/001/007")
sleep(5.0)
break # get out of the loop once this works...
while not self.stopped:
try:
# Logger.debug("Obtaining a frame...")
select.select((video,), (), ())
image_data = video.read_and_queue()
# Logger.debug("Obtained a frame of size %d", len(image_data))
image = Image.frombytes(self._mode, self._texture_size, image_data)
self._user_buffer = image
# convert to rgb for display on-screen
while (self._buffer is not None):
# make this an event object?
sleep(0.02)
#self._buffer = image.convert('RGB').tobytes("raw", "RGB")
image = image.convert('RGB')
# draw some hough circles on the RGB buffer as an overlay
if self._object_detection:
# convert from PIL RGB colorspace to opencv's BGR
color_imcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
gray_imcv = np.asarray(self._user_buffer)
circles = cv2.HoughCircles(gray_imcv, cv.CV_HOUGH_GRADIENT, 1, 2, np.array([]), 100, 10,0,10)
if circles is not None:
a, b, c = circles.shape
for i in range(b):
cv2.circle(color_imcv, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv2.CV_AA)
cv2.circle(color_imcv, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv2.CV_AA) # draw center of circle
# convert back from opencv's BGR colorspace to PIL RGB
image = Image.fromarray(cv2.cvtColor(color_imcv,cv2.COLOR_BGR2RGB))
# convert to RGB in order to display on-screen
self._buffer = image.tobytes("raw", "RGB")
self._fps_tick()
Clock.schedule_once(self._update)
self._exposure = video.get_exposure_absolute()
if(self._exposure_requested):
video.set_exposure_absolute(self._requested_exposure)
self._exposure_requested = False
if(self.capture_requested or self.ref_requested):
# need to switch to high res mode
video.close()
self._do_capture(self.ref_requested)
self.capture_requested = False
self.ref_requested = False
# reinitialize
video = self._v4l_init_video()
except:
e = sys.exc_info()[0]
Logger.exception('Exception! %s', e)
video.close()
Logger.info("Trying to restart video stream")
# Try again...
sleep(1.0)
video = self._v4l_init_video()
#Clock.schedule_once(self.stop)
Logger.info("closing video object")
video.close()
Logger.info("video_thread exiting")
def _reset_fps(self):
self.TICK_SAMPLES = 25
self._ticksum = 0
self._tickindex = 0
self._tick_samples = np.zeros(self.TICK_SAMPLES)
self._lasttime = time.time()
self._fps = 0
def _fps_tick(self):
newtime = time.time()
newtick = newtime - self._lasttime
self._ticksum -= self._tick_samples[self._tickindex]
self._ticksum += newtick
self._tick_samples[self._tickindex] = newtick
self._tickindex = (self._tickindex + 1) % self.TICK_SAMPLES
self._fps = self.TICK_SAMPLES / self._ticksum
self._lasttime = newtime
def start(self):
print("Starting camera")
Logger.info("d3 camera start() called")
super(Camera_Object, self).start()
t = Thread(name='video_thread',
target=self._v4l_loop)
t.start()
def stop(self, dt=None):
super(Camera_Object, self).stop()
def get_current_frame(self):
return self._user_buffer
def capture__full_res_frame(self):
self.capture_requested = True
def capture__full_res_ref(self):
self.ref_requested = True
def get_fps(self):
return self._fps
def set_exposure(self, val):
self._requested_exposure = val
self._exposure_requested = True
def get_exposure(self):
return self._exposure
def set_object_detection(self, val):
self._object_detection = val
def get_object_detection(self):
return self._object_detection
def _update(self, dt):
if self._buffer is None:
return
Logger.debug("Rendering a frame...")
if self._texture is None and self._texture_size is not None:
Logger.debug("Creating a new texture...")
self._texture = Texture.create(
size=self._texture_size, colorfmt='rgb')
self._texture.flip_vertical()
self.dispatch('on_load')
self._copy_to_gpu()
#def _capture_complete(self):
# self.dispatch('on_capture_complete')
def on_texture(self):
pass
def on_load(self):
pass
|
|
import unittest
import time
import json
import redis
from redset import SortedSet, TimeSortedSet, ScheduledSet
from redset.interfaces import Serializer
class SortedSetTest(unittest.TestCase):
def setUp(self):
self.key = 'ss_test'
self.ss = SortedSet(redis.Redis(), self.key)
def tearDown(self):
self.ss.clear()
def test_repr(self):
"""Just make sure it doesn't blow up."""
str(self.ss)
def test_length(self):
for i in range(5):
self.ss.add(i)
self.assertEquals(
len(self.ss),
5,
)
def test_add_with_score(self):
item = 'samere'
score = 123
self.ss.add(item, score)
assert self.ss.score(item) == score
def test_and_and_update_score(self):
item = 'samere'
score = 123
self.ss.add(item, score)
new_score = 456
self.ss.add(item, new_score)
assert self.ss.score(item) == new_score
def test_contains(self):
for i in range(5):
self.ss.add(i)
self.assertTrue(
0 in self.ss
)
self.assertFalse(
-1 in self.ss
)
def test_ordering(self):
for i in range(5):
self.ss.add(i, score=i)
self.assertEquals(
[str(i) for i in range(5)],
[self.ss.pop() for __ in range(5)],
)
def test_empty_pop(self):
with self.assertRaises(KeyError):
self.ss.pop()
def test_empty_peek(self):
with self.assertRaises(KeyError):
self.ss.peek()
def test_add_dup(self):
for i in range(5):
self.ss.add(i)
dup_added_at = 10
self.ss.add(0, score=dup_added_at)
self.assertEquals(
len(self.ss),
5,
)
self.assertEquals(
int(self.ss.score(0)),
int(dup_added_at),
)
def test_clear(self):
self.assertFalse(self.ss.clear())
for i in range(5):
self.ss.add(i)
self.assertTrue(self.ss.clear())
self.assertEquals(
len(self.ss),
0,
)
def test_discard(self):
self.ss.add(0)
self.assertTrue(self.ss.discard(0))
self.assertFalse(self.ss.discard(0))
def test_peek(self):
with self.assertRaises(KeyError):
self.ss.peek()
self.ss.add(0)
for __ in range(2):
self.assertEquals(
self.ss.peek(),
'0',
)
with self.assertRaises(KeyError):
self.ss.peek(position=1)
self.ss.add(1)
for __ in range(2):
self.assertEquals(
self.ss.peek(position=1),
'1',
)
def test_take(self):
for i in range(5):
self.ss.add(i)
self.assertEquals(
set([str(i) for i in range(2)]),
set(self.ss.take(2)),
)
self.assertEquals(
set([str(i + 2) for i in range(3)]),
set(self.ss.take(100)),
)
self.assertEquals(
len(self.ss),
0,
)
self.assertEquals(
self.ss.take(0),
[],
)
self.assertEquals(
self.ss.take(-1),
[],
)
class SerializerTest(unittest.TestCase):
class FakeJsonSerializer(Serializer):
"""
Handles JSON serialization.
"""
def dumps(self, item):
return json.dumps(item)
def loads(self, item):
if 'uhoh' in item:
raise Exception("omg unserializing failed!")
return json.loads(item)
def setUp(self):
self.key = 'json_ss_test'
self.ss = SortedSet(
redis.Redis(),
self.key,
serializer=self.FakeJsonSerializer(),
)
# has a bad serializer
self.ss2 = SortedSet(
redis.Redis(),
self.key + '2',
serializer=object(),
)
def tearDown(self):
self.ss.clear()
self.ss2.clear()
def test_add_and_pop(self):
self.ss.add({'yo': 'json'}, score=1)
self.ss.add({'yo': 'yaml'}, score=0)
self.assertTrue(
{'yo': 'json'} in self.ss
)
self.assertEqual(
self.ss.pop(),
{'yo': 'yaml'},
)
self.assertEqual(
self.ss.pop(),
{'yo': 'json'},
)
self.assertEqual(
0,
len(self.ss),
)
def test_cant_deserialize(self):
self.ss.add({'yo': 'foo'}, score=0)
self.ss.add({'yo': 'uhoh!'}, score=1)
self.ss.add({'yo': 'hey'}, score=2)
self.assertEquals(
self.ss.take(3),
[{'yo': 'foo'},
{'yo': 'hey'}],
)
def test_bad_serializer(self):
self.ss2.add(1, score=0)
self.ss2.add(2, score=1)
assert '2' in self.ss2
# gets deserialied as a str, not an int
self.assertEquals(
'1',
self.ss2.pop(),
)
class ScorerTest(unittest.TestCase):
def setUp(self):
self.key = 'scorer_ss_test'
class Ser(Serializer):
loads = int
dumps = str
self.ss = SortedSet(
redis.Redis(),
self.key,
scorer=lambda i: i * -1,
serializer=Ser(),
)
def tearDown(self):
self.ss.clear()
def test_scorer(self):
for i in range(5):
self.ss.add(i)
self.assertEqual(
[4, 3, 2, 1, 0],
self.ss.take(5),
)
class TimeSortedSetTest(unittest.TestCase):
def setUp(self):
self.key = 'tss_test'
self.now = time.time()
self.tss = TimeSortedSet(redis.Redis(), self.key)
def tearDown(self):
self.tss.clear()
def test_length(self):
for i in range(5):
self.tss.add(i)
self.assertEquals(
len(self.tss),
5,
)
def test_contains(self):
for i in range(5):
self.tss.add(i)
self.assertTrue(
0 in self.tss
)
self.assertFalse(
-1 in self.tss
)
def test_add_at(self):
for i in range(5):
self.tss.add(i, score=(self.now - i))
self.assertEquals(
[str(i) for i in reversed(range(5))],
[self.tss.pop() for __ in range(5)],
)
def test_add_dup(self):
for i in range(5):
self.tss.add(i)
dup_added_at = self.now + 10
self.tss.add(0, score=dup_added_at)
self.assertEquals(
len(self.tss),
5,
)
self.assertEquals(
int(self.tss.score(0)),
int(dup_added_at),
)
def test_clear(self):
self.assertFalse(self.tss.clear())
for i in range(5):
self.tss.add(i)
self.assertTrue(self.tss.clear())
self.assertEquals(
len(self.tss),
0,
)
def test_discard(self):
self.tss.add(0)
self.assertTrue(self.tss.discard(0))
self.assertFalse(self.tss.discard(0))
def test_peek(self):
with self.assertRaises(KeyError):
self.tss.peek()
self.tss.add(0)
for __ in range(2):
self.assertEquals(
self.tss.peek(),
'0',
)
with self.assertRaises(KeyError):
self.tss.peek(position=1)
self.tss.add(1)
for __ in range(2):
self.assertEquals(
self.tss.peek(position=1),
'1',
)
def test_score(self):
self.assertEquals(
None,
self.tss.score(0),
)
self.tss.add(0, self.now)
self.assertEquals(
int(self.now),
int(self.tss.score(0)),
)
def test_oldest_time(self):
self.assertEquals(
None,
self.tss.peek_score(),
)
for i in range(3):
self.tss.add(i, self.now - i)
self.assertEquals(
int(self.now - 2),
int(self.tss.peek_score()),
)
self.tss.pop()
self.assertEquals(
int(self.now - 1),
int(self.tss.peek_score()),
)
class ScheduledSetTest(unittest.TestCase):
def setUp(self):
self.key = 'scheduled_set_test'
self.now = time.time() - 1 # offset to avoid having to sleep
self.ss = ScheduledSet(redis.Redis(), self.key)
def tearDown(self):
self.ss.clear()
def test_schedule(self):
self.ss.add(1, self.now)
self.ss.add(2, self.now + 1000)
next_item = self.ss.pop()
self.assertEquals(next_item, '1')
with self.assertRaises(KeyError):
self.ss.pop()
self.assertEquals(len(self.ss), 1)
def test_peek(self):
with self.assertRaises(KeyError):
self.ss.peek()
self.ss.add(1, self.now - 1000)
self.ss.add(2, self.now)
self.ss.add(3, self.now + 1000)
self.assertEquals(
self.ss.peek(),
'1',
)
self.assertEquals(
self.ss.peek(position=1),
'2',
)
with self.assertRaises(KeyError):
self.ss.peek(position=2)
self.ss.pop()
self.ss.pop()
with self.assertRaises(KeyError):
self.ss.peek()
self.assertEquals(len(self.ss), 1)
def test_take(self):
self.ss.add('1', self.now - 3)
self.ss.add('2', self.now - 2)
self.ss.add('3', self.now - 1)
items = self.ss.take(2)
self.assertEquals(len(items), 2)
self.assertEquals(['1', '2'], items)
self.assertEquals(self.ss.pop(), '3')
self.assertEquals(
len(self.ss),
0,
)
self.assertEquals(
self.ss.take(0),
[],
)
self.assertEquals(
self.ss.take(-1),
[],
)
def test_length(self):
for i in range(2):
self.ss.add(i, self.now + 50)
for i in range(3):
self.ss.add(i + 2, self.now - 50)
self.assertEquals(
len(self.ss),
5,
)
def test_length_available(self):
for i in range(2):
self.ss.add(i, self.now + 50)
for i in range(3):
self.ss.add(i + 2, self.now - 50)
self.assertEquals(
self.ss.available(),
3,
)
def test_contains(self):
for i in range(5):
self.ss.add(i)
self.assertTrue(
0 in self.ss
)
self.assertFalse(
-1 in self.ss
)
def test_add_dup(self):
for i in range(5):
self.ss.add(i)
dup_added_at = 10
self.ss.add(0, score=dup_added_at)
self.assertEquals(
len(self.ss),
5,
)
self.assertEquals(
int(self.ss.score(0)),
int(dup_added_at),
)
def test_clear(self):
self.assertFalse(self.ss.clear())
for i in range(5):
self.ss.add(i)
self.assertTrue(self.ss.clear())
self.assertEquals(
len(self.ss),
0,
)
def test_discard(self):
self.ss.add(0)
self.ss.add(1, self.now + 50)
self.assertTrue(self.ss.discard(0))
self.assertFalse(self.ss.discard(0))
self.assertTrue(self.ss.discard(1))
self.assertFalse(self.ss.discard(1))
def test_peek_score(self):
self.assertEquals(
None,
self.ss.peek_score(),
)
for i in range(3):
self.ss.add(i, self.now - i)
self.assertEquals(
int(self.now - 2),
int(self.ss.peek_score()),
)
self.ss.pop()
self.assertEquals(
int(self.now - 1),
int(self.ss.peek_score()),
)
|
|
#!/usr/bin/env python
# @HEADER
# ************************************************************************
#
# TriBITS: Tribal Build, Integrate, and Test System
# Copyright 2013 Sandia Corporation
#
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Corporation nor the names of the
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************
# @HEADER
# Prerequisites:
# - python is version 2.4 or later
import glob
import os
import shutil
import sys
# See if we should be verbose and print everything!
if "TDD_DEBUG_VERBOSE" in os.environ:
verboseEnvValue = os.environ["TDD_DEBUG_VERBOSE"]
if verboseEnvValue == "1":
verbose = True
else:
verbose = False
else:
verbose = False
# tribitsDDDir is the directory where *this* script is:
#
this_path = os.path.abspath(os.path.realpath(__file__))
tribitsDDDir = os.path.dirname(this_path)
print "tribitsDDDir = '"+tribitsDDDir+"'"
# Load the general script support python code
pythonUtilsDir = os.path.join(tribitsDDDir, "../python_utils")
sys.path = [pythonUtilsDir] + sys.path
from GeneralScriptSupport import *
def install_ctest(tddDashboardRootDir, tribitsDir):
# dashboardToolsDir is the directory to which any needed tools will be downloaded.
#
dashboardToolsDir = tddDashboardRootDir + "/tools"
print "dashboardToolsDir = '"+dashboardToolsDir+"'"
# Make sure tools directory exists:
#
if not os.path.exists(dashboardToolsDir):
os.makedirs(dashboardToolsDir)
if not os.path.exists(dashboardToolsDir):
print "error: could not create directory \"" + dashboardToolsDir + "\""
sys.exit(1)
# Download and install CMake/CTest to use for the outer driver
#
cmakeTddDownloadBaseDir = dashboardToolsDir + "/cmake-TDD"
TDD_CMAKE_INSTALLER_TYPE = "release"
if "TDD_CMAKE_INSTALLER_TYPE" in os.environ:
TDD_CMAKE_INSTALLER_TYPE = os.environ["TDD_CMAKE_INSTALLER_TYPE"]
TDD_FORCE_CMAKE_INSTALL = "1"
if "TDD_FORCE_CMAKE_INSTALL" in os.environ:
TDD_FORCE_CMAKE_INSTALL = os.environ["TDD_FORCE_CMAKE_INSTALL"]
TDD_HTTP_PROXY = ""
if "TDD_HTTP_PROXY" in os.environ:
TDD_HTTP_PROXY = os.environ["TDD_HTTP_PROXY"]
# Only install cmake-TDD if necessary or if requested.
# (Requires network connectivity; avoid when possible.)
#
print "\n***"
print "*** Downloading and installing CMake to \"" + cmakeTddDownloadBaseDir + "\"..."
print "***\n"
installMasterCMake = False
if not os.path.exists(cmakeTddDownloadBaseDir):
print "Forcing install of master CMake because '"+cmakeTddDownloadBaseDir+"' does not exist!"
installMasterCMake = True
elif TDD_FORCE_CMAKE_INSTALL == "1":
print "Forcing install of master CMake because" \
+ " TDD_FORCE_CMAKE_INSTALL == 1!"
installMasterCMake = True
else:
print "Leaving current CMake in place ..." \
if installMasterCMake:
cmnd = sys.executable + " " \
+ tribitsDir + "/python_utils/download-cmake.py" \
+ " --skip-detect" \
+ " --install-dir="+cmakeTddDownloadBaseDir \
+ " --installer-type="+TDD_CMAKE_INSTALLER_TYPE
if TDD_HTTP_PROXY:
cmnd += " --http-proxy="+TDD_HTTP_PROXY
try:
echoRunSysCmnd( cmnd,
timeCmnd = True,
workingDir = dashboardToolsDir \
)
except Exception, e:
print "WARNING! The following command failed!\n"+cmnd
print "However, not updating CMake is not the end of the world!"
# Find ctest under cmakeTddDownloadBaseDir:
#
ctestGlobStr = glob.glob(cmakeTddDownloadBaseDir + "/bin/ctest*")
if 0 == len(ctestGlobStr):
ctestGlobStr = glob.glob(cmakeTddDownloadBaseDir + "/*/bin/ctest*")
if 0 == len(ctestGlobStr):
ctestGlobStr = glob.glob(cmakeTddDownloadBaseDir + "/*/*/bin/ctest*")
if 1 != len(ctestGlobStr):
print "error: could not find ctest executable after download..."
sys.exit(2)
ctestExe = ctestGlobStr[0]
return ctestExe
def invoke_ctest(ctestExe, script, tddDashboardRootDir, environment = {}):
"""
Invokes CTest using the executable given by the ctestExe argument,
the script file specified by the script argument, in the working
directory specified by the tddDashboardRootDir argument and the set of
environment variables specified in the environment map.
"""
# We have to pass parameters to CTest through environment
# variables. It would be nice to have another way to do this, but
# until ctest supports something like CMake's -D argument, this is
# how it has to be done.
if environment:
print "environment =", environment
cmd = ctestExe
if verbose:
cmd = ctestExe + " -VV"
ctestRtn = echoRunSysCmnd(
cmd + " -S" + " " + script,
throwExcept=False,
timeCmnd=True,
workingDir=tddDashboardRootDir,
extraEnv = environment
)
print "ctestRtn: '" + str(ctestRtn) + "'"
if ctestRtn != 0:
print "error: ctest returned non-zero error value, script will exit with " + str(ctestRtn)
# Propagate ctest return value
#
return ctestRtn
def run_driver(ctestSourceDirectory, projectRepoBaseDir):
"""
Run the dashboard driver. The ctestSourceDirectory argument specifies
where the directory that CTest will run over. There should be a
CMakeLists.txt file in this location. The projectRepoBaseDir argument
specifies root of the source code repository for the project.
"""
origDir = os.getcwd()
try:
print "\n******************************************************************"
print "*** Tribits Driver Dashboard tdd_driver.py ***"
print "******************************************************************\n"
print "\nPWD=\""+os.getcwd()+"\"...\n"
print "projectRepoBaseDir = '" + projectRepoBaseDir + "'"
print "tribitsDDDir = '" + tribitsDDDir + "'"
# tribitsDir is the root directory of the TriBITS system:
#
tribitsDir = os.path.abspath(os.path.join(tribitsDDDir, ".."))
print "tribitsDir = '"+tribitsDir+"'"
# dashboardBaseDir is the parent directory of our containing source tree,
# which we compute relative to tribitsDir:
#
tddDashboardRootDir = os.path.dirname(projectRepoBaseDir)
if "TDD_DASHBOARD_ROOT" in os.environ:
tddDashboardRootDir = os.environ["TDD_DASHBOARD_ROOT"]
print "tddDashboardRootDir = '"+tddDashboardRootDir+"'"
os.chdir(tddDashboardRootDir)
if verbose: "\nNew PWD = '"+os.getcwd()+"'"
tddUseSystemCTest = False
if "TRIBITS_TDD_USE_SYSTEM_CTEST" in os.environ \
and os.environ["TRIBITS_TDD_USE_SYSTEM_CTEST"] == "1" \
:
tddUseSystemCTest = True
print "tddUseSystemCTest =", tddUseSystemCTest
if tddUseSystemCTest:
ctestExe = getCmndOutput("which ctest", True, False)
else:
ctestExe = install_ctest(tddDashboardRootDir, tribitsDir)
print "\nctestExe = '" + ctestExe + "'"
if not os.path.exists(ctestExe):
print "error: ctest does not exist after installation..."
sys.exit(3)
# Escape any spaces in the path of the ctest exe. This has to be done
# here instead of where we set the ctestExe the first time because
# the check for existence cannot handle the "\"
#
ctestExe = ctestExe.replace(" ", "\ ")
# Verify ctest works with a simple --version call first:
#
ctestVersion = getCmndOutput(ctestExe+" --version", True, False)
print "ctestVersion = '"+ctestVersion+"'"
# Run one driver dashboard for this source tree:
#
print "\n***"
print "*** Running the main dashboards as CTest tests .."
print "***\n"
sys.exit(
invoke_ctest(ctestExe,
os.path.join(tribitsDDDir, "TribitsDriverDashboard.cmake"),
tddDashboardRootDir,
{
"TDD_DASHBOARD_ROOT" : tddDashboardRootDir,
"CTEST_SOURCE_DIRECTORY" : ctestSourceDirectory,
"CTEST_UPDATE_DIRECTORY" : projectRepoBaseDir,
"CTEST_BINARY_DIRECTORY" : tddDashboardRootDir+"/TDD_BUILD",
}
)
)
finally:
os.chdir(origDir)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from __future__ import with_statement
import re
from itertools import izip
from .basecase import (BaseTestCase, cqlshlog, dedent, at_a_time, cqlsh,
TEST_HOST, TEST_PORT)
from .cassconnect import (get_test_keyspace, testrun_cqlsh, testcall_cqlsh,
cassandra_cursor, split_cql_commands, quote_name)
from .ansi_colors import (ColoredText, lookup_colorcode, lookup_colorname,
lookup_colorletter, ansi_seq)
CONTROL_C = '\x03'
CONTROL_D = '\x04'
class TestCqlshOutput(BaseTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assertNoHasColors(self, text, msg=None):
self.assertNotRegexpMatches(text, ansi_seq, msg='ANSI CSI sequence found in %r' % text)
def assertHasColors(self, text, msg=None):
self.assertRegexpMatches(text, ansi_seq, msg=msg)
def assertColored(self, coloredtext, colorname):
wanted_colorcode = lookup_colorcode(colorname)
for num, c in enumerate(coloredtext):
if not c.isspace():
ccolor = c.colorcode()
self.assertEqual(ccolor, wanted_colorcode,
msg='Output text %r (char #%d) is colored %s, not %s'
% (coloredtext, num, lookup_colorname(ccolor), colorname))
def assertColorFromTags(self, coloredtext, tags):
for (char, tag) in izip(coloredtext, tags):
if char.isspace():
continue
if tag.isspace():
tag = 'n' # neutral
self.assertEqual(char.colorcode(), lookup_colorletter(tag),
msg='Coloring mismatch.\nExpected coloring: %s\n'
'Actually got: %s\ncolor code: %s'
% (tags, coloredtext.colored_version(), coloredtext.colortags()))
def assertCqlverQueriesGiveColoredOutput(self, queries_and_expected_outputs,
cqlver=(cqlsh.DEFAULT_CQLVER,), **kwargs):
if not isinstance(cqlver, (tuple, list)):
cqlver = (cqlver,)
for ver in cqlver:
self.assertQueriesGiveColoredOutput(queries_and_expected_outputs, cqlver=ver, **kwargs)
def assertQueriesGiveColoredOutput(self, queries_and_expected_outputs, **kwargs):
"""
Allow queries and expected output to be specified in structured tuples,
along with expected color information.
"""
with testrun_cqlsh(tty=True, **kwargs) as c:
for query, expected in queries_and_expected_outputs:
cqlshlog.debug('Testing %r' % (query,))
output = c.cmd_and_response(query).lstrip("\r\n")
c_output = ColoredText(output)
pairs = at_a_time(dedent(expected).split('\n'), 2)
outlines = c_output.splitlines()
for (plain, colorcodes), outputline in zip(pairs, outlines):
self.assertEqual(outputline.plain().rstrip(), plain)
self.assertColorFromTags(outputline, colorcodes)
def test_no_color_output(self):
for termname in ('', 'dumb', 'vt100'):
cqlshlog.debug('TERM=%r' % termname)
with testrun_cqlsh(tty=True, env={'TERM': termname}) as c:
c.send('select * from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertNoHasColors(c.read_to_next_prompt())
def test_no_prompt_or_colors_output(self):
for termname in ('', 'dumb', 'vt100', 'xterm'):
cqlshlog.debug('TERM=%r' % termname)
query = 'select * from has_all_types limit 1;'
output, result = testcall_cqlsh(prompt=None, env={'TERM': termname},
tty=False, input=query + '\n')
output = output.splitlines()
for line in output:
self.assertNoHasColors(line)
self.assertNotRegexpMatches(line, r'^cqlsh\S*>')
self.assertEqual(len(output), 6,
msg='output: %r' % '\n'.join(output))
self.assertEqual(output[0], '')
self.assertNicelyFormattedTableHeader(output[1])
self.assertNicelyFormattedTableRule(output[2])
self.assertNicelyFormattedTableData(output[3])
self.assertEqual(output[4].strip(), '')
self.assertEqual(output[5].strip(), '(1 rows)')
def test_color_output(self):
for termname in ('xterm', 'unknown-garbage'):
cqlshlog.debug('TERM=%r' % termname)
with testrun_cqlsh(tty=True, env={'TERM': termname}) as c:
c.send('select * from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertHasColors(c.read_to_next_prompt())
def test_count_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('select count(*) from has_all_types;', """
count
MMMMM
-------
5
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM empty_table;', """
count
MMMMM
-------
0
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM empty_composite_table;', """
count
MMMMM
-------
0
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM twenty_rows_table limit 10;', """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM twenty_rows_table limit 1000000;', """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
q = 'select COUNT(*) FROM twenty_rows_composite_table limit 1000000;'
self.assertQueriesGiveColoredOutput((
(q, """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_static_cf_output(self):
self.assertCqlverQueriesGiveColoredOutput((
("select a, b from twenty_rows_table where a in ('1', '13', '2');", """
a | b
RR MM
----+----
1 | 1
YY YY
13 | 13
YY YY
2 | 2
YY YY
(3 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
self.assertQueriesGiveColoredOutput((
('select * from dynamic_columns;', """
somekey | column1 | value
RRRRRRR CCCCCCC MMMMM
---------+---------+-------------------------
1 | 1.2 | one point two
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
2 | 2.3 | two point three
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | -0.0001 | negative ten thousandth
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 3.46 | three point four six
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 99 | ninety-nine point oh
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
(5 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_empty_cf_output(self):
# we print the header after CASSANDRA-6910
self.assertCqlverQueriesGiveColoredOutput((
('select * from empty_table;', """
lonelykey | lonelycol
RRRRRRRRR MMMMMMMMM
-----------+-----------
(0 rows)
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
q = 'select * from has_all_types where num = 999;'
# same query should show up as empty in cql 3
self.assertQueriesGiveColoredOutput((
(q, """
num | asciicol | bigintcol | blobcol | booleancol | decimalcol | doublecol | floatcol | intcol | smallintcol | textcol | timestampcol | tinyintcol | uuidcol | varcharcol | varintcol
RRR MMMMMMMM MMMMMMMMM MMMMMMM MMMMMMMMMM MMMMMMMMMM MMMMMMMMM MMMMMMMM MMMMMM MMMMMMMMMMM MMMMMMM MMMMMMMMMMMM MMMMMMMMMM MMMMMMM MMMMMMMMMM MMMMMMMMM
-----+----------+-----------+---------+------------+------------+-----------+----------+--------+-------------+---------+--------------+------------+---------+------------+-----------
(0 rows)
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_columnless_key_output(self):
q = "select a from twenty_rows_table where a in ('1', '2', '-9192');"
self.assertQueriesGiveColoredOutput((
(q, """
a
R
---
1
Y
2
Y
(2 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_numeric_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('''select intcol, bigintcol, varintcol \
from has_all_types \
where num in (0, 1, 2, 3, 4);''', """
intcol | bigintcol | varintcol
MMMMMM MMMMMMMMM MMMMMMMMM
-------------+----------------------+-----------------------------
-12 | 1234567890123456789 | 10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
2147483647 | 9223372036854775807 | 9
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
0 | 0 | 0
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
-2147483648 | -9223372036854775808 | -10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
| |
nnnnnnnnnnn nnnnnnnnnnnnnnnnnnnn nnnnnnnnnnnnnnnnnnnnnnnnnnn
(5 rows)
nnnnnnnn
"""),
('''select decimalcol, doublecol, floatcol \
from has_all_types \
where num in (0, 1, 2, 3, 4);''', """
decimalcol | doublecol | floatcol
MMMMMMMMMM MMMMMMMMM MMMMMMMM
------------------+-----------+----------
19952.11882 | 1 | -2.1
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
1E-14 | 1e+07 | 1e+05
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
0.0 | 0 | 0
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
10.0000000000000 | -1004.1 | 1e+08
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
| |
nnnnnnnnnnnnnnnn nnnnnnn nnnnn
(5 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_timestamp_output(self):
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
--------------------------
2012-05-14 12:53:20+0000
GGGGGGGGGGGGGGGGGGGGGGGG
(1 rows)
nnnnnnnn
"""),
), env={'TZ': 'Etc/UTC'})
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
--------------------------
2012-05-14 07:53:20-0500
GGGGGGGGGGGGGGGGGGGGGGGG
(1 rows)
nnnnnnnn
"""),
), env={'TZ': 'EST'})
def test_boolean_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('select num, booleancol from has_all_types where num in (0, 1, 2, 3);', """
num | booleancol
RRR MMMMMMMMMM
-----+------------
0 | True
G GGGGG
1 | True
G GGGGG
2 | False
G GGGGG
3 | False
G GGGGG
(4 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_null_output(self):
# column with metainfo but no values
self.assertCqlverQueriesGiveColoredOutput((
("select k, c, notthere from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
R M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
(2 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
# all-columns, including a metainfo column has no values (cql3)
self.assertQueriesGiveColoredOutput((
("select * from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
R M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
(2 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_string_output_ascii(self):
self.assertCqlverQueriesGiveColoredOutput((
("select * from ascii_with_special_chars where k in (0, 1, 2, 3);", r"""
k | val
R MMM
---+-----------------------------------------------
0 | newline:\n
G YYYYYYYYmm
1 | return\rand null\x00!
G YYYYYYmmYYYYYYYYmmmmY
2 | \x00\x01\x02\x03\x04\x05control chars\x06\x07
G mmmmmmmmmmmmmmmmmmmmmmmmYYYYYYYYYYYYYmmmmmmmm
3 | fake special chars\x00\n
G YYYYYYYYYYYYYYYYYYYYYYYY
(4 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_string_output_utf8(self):
# many of these won't line up visually here, to keep the source code
# here ascii-only. note that some of the special Unicode characters
# here will render as double-width or zero-width in unicode-aware
# terminals, but the color-checking machinery here will still treat
# it as one character, so those won't seem to line up visually either.
self.assertCqlverQueriesGiveColoredOutput((
("select * from utf8_with_special_chars where k in (0, 1, 2, 3, 4, 5, 6);", u"""
k | val
R MMM
---+-------------------------------
0 | Normal string
G YYYYYYYYYYYYY
1 | Text with\\nnewlines\\n
G YYYYYYYYYmmYYYYYYYYmm
2 | Text with embedded \\x01 char
G YYYYYYYYYYYYYYYYYYYmmmmYYYYY
3 | \u24c8\u24c5\u24ba\u24b8\u24be\u24b6\u24c1\u2008\u249e\u24a3\u249c\u24ad\u24ae and normal ones
G YYYYYYYYYYYYYYYYYYYYYYYYYYYYY
4 | double wides: \u2f91\u2fa4\u2f9a
G YYYYYYYYYYYYYYYYY
5 | zero width\u200bspace
G YYYYYYYYYYYYYYYY
6 | fake special chars\\x00\\n
G YYYYYYYYYYYYYYYYYYYYYYYY
(7 rows)
nnnnnnnn
""".encode('utf-8')),
), cqlver=cqlsh.DEFAULT_CQLVER, env={'LANG': 'en_US.UTF-8'})
def test_blob_output(self):
self.assertCqlverQueriesGiveColoredOutput((
("select num, blobcol from has_all_types where num in (0, 1, 2, 3);", r"""
num | blobcol
RRR MMMMMMM
-----+----------------------
0 | 0x000102030405fffefd
G mmmmmmmmmmmmmmmmmmmm
1 | 0xffffffffffffffffff
G mmmmmmmmmmmmmmmmmmmm
2 | 0x
G mmmmmmmmmmmmmmmmmmmm
3 | 0x80
G mmmmmmmmmmmmmmmmmmmm
(4 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_prompt(self):
with testrun_cqlsh(tty=True, keyspace=None, cqlver=cqlsh.DEFAULT_CQLVER) as c:
self.assertTrue(c.output_header.splitlines()[-1].endswith('cqlsh> '))
c.send('\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertTrue(output.endswith('cqlsh> '))
cmd = "USE \"%s\";\n" % get_test_keyspace().replace('"', '""')
c.send(cmd)
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertTrue(output.endswith('cqlsh:%s> ' % (get_test_keyspace())))
c.send('use system;\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertTrue(output.endswith('cqlsh:system> '))
c.send('use NONEXISTENTKEYSPACE;\n')
outputlines = c.read_to_next_prompt().splitlines()
self.assertEqual(outputlines[0], 'use NONEXISTENTKEYSPACE;')
self.assertTrue(outputlines[2].endswith('cqlsh:system> '))
midline = ColoredText(outputlines[1])
self.assertEqual(midline.plain(),
'InvalidRequest: code=2200 [Invalid query] message="Keyspace \'nonexistentkeyspace\' does not exist"')
self.assertColorFromTags(midline,
"RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR")
def test_describe_keyspace_output(self):
fullcqlver = cqlsh.DEFAULT_CQLVER
with testrun_cqlsh(tty=True, cqlver=fullcqlver) as c:
ks = get_test_keyspace()
qks = quote_name(ks)
for cmd in ('describe keyspace', 'desc keyspace'):
for givename in ('system', '', qks):
for semicolon in ('', ';'):
fullcmd = cmd + (' ' if givename else '') + givename + semicolon
desc = c.cmd_and_response(fullcmd)
self.check_describe_keyspace_output(desc, givename or qks, fullcqlver)
# try to actually execute that last keyspace description, with a
# new keyspace name
new_ks_name = 'COPY_OF_' + ks
copy_desc = desc.replace(ks, new_ks_name)
statements = split_cql_commands(copy_desc)
do_drop = True
with cassandra_cursor(cql_version=fullcqlver) as curs:
try:
for stmt in statements:
cqlshlog.debug('TEST EXEC: %s' % stmt)
curs.execute(stmt)
finally:
curs.execute('use system')
if do_drop:
curs.execute('drop keyspace %s' % quote_name(new_ks_name))
def check_describe_keyspace_output(self, output, qksname, fullcqlver):
expected_bits = [r'(?im)^CREATE KEYSPACE %s WITH\b' % re.escape(qksname),
r';\s*$',
r'\breplication = {\'class\':']
for expr in expected_bits:
self.assertRegexpMatches(output, expr)
def test_describe_columnfamily_output(self):
# we can change these to regular expressions if/when it makes sense
# to do so; these will likely be subject to lots of adjustments.
# note columns are now comparator-ordered instead of original-order.
table_desc3 = dedent("""
CREATE TABLE %s.has_all_types (
num int PRIMARY KEY,
asciicol ascii,
bigintcol bigint,
blobcol blob,
booleancol boolean,
decimalcol decimal,
doublecol double,
floatcol float,
intcol int,
smallintcol smallint,
textcol text,
timestampcol timestamp,
tinyintcol tinyint,
uuidcol uuid,
varcharcol text,
varintcol varint
) WITH bloom_filter_fp_chance = 0.01
AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND crc_check_chance = 1.0
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = '99PERCENTILE';
""" % quote_name(get_test_keyspace()))
with testrun_cqlsh(tty=True, cqlver=cqlsh.DEFAULT_CQLVER) as c:
for cmdword in ('describe table', 'desc columnfamily'):
for semicolon in (';', ''):
output = c.cmd_and_response('%s has_all_types%s' % (cmdword, semicolon))
self.assertNoHasColors(output)
self.assertSequenceEqual(output.split('\n'), table_desc3.split('\n'))
def test_describe_columnfamilies_output(self):
output_re = r'''
\n
Keyspace [ ] (?P<ksname> \S+ ) \n
-----------* \n
(?P<cfnames> .*? )
\n
'''
ks = get_test_keyspace()
with testrun_cqlsh(tty=True, keyspace=None, cqlver=cqlsh.DEFAULT_CQLVER) as c:
# when not in a keyspace
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
ksnames = []
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, '(?xs) ^ ( %s )+ $' % output_re)
for section in re.finditer('(?xs)' + output_re, output):
ksname = section.group('ksname')
ksnames.append(ksname)
cfnames = section.group('cfnames')
self.assertNotIn('\n\n', cfnames)
if ksname == ks:
self.assertIn('ascii_with_special_chars', cfnames)
self.assertIn('system', ksnames)
self.assertIn(quote_name(ks), ksnames)
# when in a keyspace
c.send('USE %s;\n' % quote_name(ks))
c.read_to_next_prompt()
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertEqual(output[0], '\n')
self.assertEqual(output[-1], '\n')
self.assertNotIn('Keyspace %s' % quote_name(ks), output)
self.assertIn('undefined_values_table', output)
def test_describe_cluster_output(self):
output_re = r'''(?x)
^
\n
Cluster: [ ] (?P<clustername> .* ) \n
Partitioner: [ ] (?P<partitionername> .* ) \n
\n
'''
ringinfo_re = r'''
Range[ ]ownership: \n
(
[ ] .*? [ ][ ] \[ ( \d+ \. ){3} \d+ \] \n
)+
\n
'''
with testrun_cqlsh(tty=True, keyspace=None, cqlver=cqlsh.DEFAULT_CQLVER) as c:
# not in a keyspace
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, output_re + '$')
c.send('USE %s;\n' % quote_name(get_test_keyspace()))
c.read_to_next_prompt()
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, output_re + ringinfo_re + '$')
def test_describe_schema_output(self):
with testrun_cqlsh(tty=True) as c:
for semicolon in ('', ';'):
output = c.cmd_and_response('desc full schema' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, '^\nCREATE KEYSPACE')
self.assertIn("\nCREATE KEYSPACE system WITH replication = {'class': 'LocalStrategy'} AND durable_writes = true;\n",
output)
self.assertRegexpMatches(output, ';\s*$')
def test_show_output(self):
with testrun_cqlsh(tty=True) as c:
output = c.cmd_and_response('show version;')
self.assertRegexpMatches(output,
'^\[cqlsh \S+ \| Cassandra \S+ \| CQL spec \S+ \| Native protocol \S+\]$')
output = c.cmd_and_response('show host;')
self.assertHasColors(output)
self.assertRegexpMatches(output, '^Connected to .* at %s:%d\.$'
% (re.escape(TEST_HOST), TEST_PORT))
def test_eof_prints_newline(self):
with testrun_cqlsh(tty=True) as c:
c.send(CONTROL_D)
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, '\n')
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_exit_prints_no_newline(self):
for semicolon in ('', ';'):
with testrun_cqlsh(tty=True) as c:
cmd = 'exit%s\n' % semicolon
c.send(cmd)
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, cmd)
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_help_types(self):
with testrun_cqlsh(tty=True) as c:
c.cmd_and_response('help types')
def test_help(self):
pass
def test_printing_parse_error(self):
pass
def test_printing_lex_error(self):
pass
def test_multiline_statements(self):
pass
def test_cancel_statement(self):
pass
def test_printing_integrity_error(self):
pass
def test_printing_cql_error(self):
pass
def test_empty_line(self):
pass
def test_user_types_output(self):
self.assertCqlverQueriesGiveColoredOutput((
("select addresses from users;", r"""
addresses
MMMMMMMMM
--------------------------------------------------------------------------------------------------------------------------------------------
{{city: 'Chelyabinsk', address: '3rd street', zip: null}, {city: 'Chigirinsk', address: null, zip: '676722'}}
BBYYYYBBYYYYYYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYBBYYYBBRRRRBBBBYYYYBBYYYYYYYYYYYYBBYYYYYYYBBRRRRBBYYYBBYYYYYYYYBB
{{city: 'Austin', address: '902 East 5th St. #202', zip: '78702'}, {city: 'Sunnyvale', address: '292 Gibraltar Drive #107', zip: '94089'}}
BBYYYYBBYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYYYYYYYYYYYYBBYYYBBYYYYYYYBBBBYYYYBBYYYYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYYYYYYYYYYYYYYYBBYYYBBYYYYYYYBB
(2 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
self.assertCqlverQueriesGiveColoredOutput((
("select phone_numbers from users;", r"""
phone_numbers
MMMMMMMMMMMMM
-------------------------------------------------------------------------------------
{{country: null, number: '03'}, {country: '+7', number: null}}
BBYYYYYYYBBRRRRBBYYYYYYBBYYYYBBBBYYYYYYYBBYYYYBBYYYYYYBBRRRRBB
{{country: '+1', number: '512-537-7809'}, {country: '+44', number: '208 622 3021'}}
BBYYYYYYYBBYYYYBBYYYYYYBBYYYYYYYYYYYYYYBBBBYYYYYYYBBYYYYYBBYYYYYYBBYYYYYYYYYYYYYYBB
(2 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_user_types_with_collections(self):
self.assertCqlverQueriesGiveColoredOutput((
("select info from songs;", r"""
info
MMMM
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
{founded: 188694000, members: {'Adrian Smith', 'Bruce Dickinson', 'Dave Murray', 'Janick Gers', 'Nicko McBrain', 'Steve Harris'}, description: 'Pure evil metal'}
BYYYYYYYBBGGGGGGGGGBBYYYYYYYBBBYYYYYYYYYYYYYYBBYYYYYYYYYYYYYYYYYBBYYYYYYYYYYYYYBBYYYYYYYYYYYYYBBYYYYYYYYYYYYYYYBBYYYYYYYYYYYYYYBBBYYYYYYYYYYYBBYYYYYYYYYYYYYYYYYB
(1 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
self.assertCqlverQueriesGiveColoredOutput((
("select tags from songs;", r"""
tags
MMMM
-------------------------------------------------
{tags: {'genre': 'metal', 'origin': 'england'}}
BYYYYBBBYYYYYYYBBYYYYYYYBBYYYYYYYYBBYYYYYYYYYBB
(1 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
|
|
# First the monkeypatch stuff
import sys
import os
import pkg_resources
from pip.exceptions import InstallationError
from pip.log import logger
from pip.backwardcompat import HTTPError
from pip.index import Link
from pip.req import InstallRequirement
from pip.util import display_path
from pip.download import url_to_path
def prettify(req):
req = '\033[31m%s\033[0m' % req
req = req.replace(' (from', ' \033[33m(from')
return req
investigate = []
# This is a copy of pip.req's prepare_files, but with the line "FIXME: check
# for conflict" replaced with some code that - checks for conflicts.
def prepare_files(self, finder, force_root_egg_info=False, bundle=False):
"""Prepare process. Create temp directories, download and/or unpack files."""
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
best_installed = False
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
if not self.force_reinstall:
try:
url = finder.find_requirement(
req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
best_installed = True
install = False
else:
# Avoid the need to call find_requirement again
req_to_install.url = url.url
if not best_installed:
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if req_to_install.satisfied_by:
if best_installed:
logger.notify('Requirement already up-to-date: %s'
% req_to_install)
else:
logger.notify('Requirement already satisfied '
'(use --upgrade to upgrade): %s'
% req_to_install)
if req_to_install.editable:
logger.notify('Obtaining %s' % req_to_install)
elif install:
if req_to_install.url and req_to_install.url.lower().startswith('file:'):
logger.notify('Unpacking %s' % display_path(url_to_path(req_to_install.url)))
else:
logger.notify('Downloading/unpacking %s' % req_to_install)
logger.indent += 2
try:
is_bundle = False
if req_to_install.editable:
if req_to_install.source_dir is None:
location = req_to_install.build_location(self.src_dir)
req_to_install.source_dir = location
else:
location = req_to_install.source_dir
if not os.path.exists(self.build_dir):
_make_build_dir(self.build_dir)
req_to_install.update_editable(not self.is_download)
if self.is_download:
req_to_install.run_egg_info()
req_to_install.archive(self.download_dir)
else:
req_to_install.run_egg_info()
elif install:
##@@ if filesystem packages are not marked
##editable in a req, a non deterministic error
##occurs when the script attempts to unpack the
##build directory
location = req_to_install.build_location(self.build_dir, not self.is_download)
## FIXME: is the existance of the checkout good enough to use it? I don't think so.
unpack = True
url = None
if not os.path.exists(os.path.join(location, 'setup.py')):
## FIXME: this won't upgrade when there's an existing package unpacked in `location`
if req_to_install.url is None:
url = finder.find_requirement(req_to_install, upgrade=self.upgrade)
else:
## FIXME: should req_to_install.url already be a link?
url = Link(req_to_install.url)
assert url
if url:
try:
self.unpack_url(url, location, self.is_download)
except HTTPError:
e = sys.exc_info()[1]
logger.fatal('Could not install requirement %s because of error %s'
% (req_to_install, e))
raise InstallationError(
'Could not install requirement %s because of HTTP error %s for URL %s'
% (req_to_install, e, url))
else:
unpack = False
if unpack:
is_bundle = req_to_install.is_bundle
if is_bundle:
req_to_install.move_bundle_files(self.build_dir, self.src_dir)
for subreq in req_to_install.bundle_requirements():
reqs.append(subreq)
self.add_requirement(subreq)
elif self.is_download:
req_to_install.source_dir = location
req_to_install.run_egg_info()
if url and url.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
if force_root_egg_info:
# We need to run this to make sure that the .egg-info/
# directory is created for packing in the bundle
req_to_install.run_egg_info(force_root_egg_info=True)
req_to_install.assert_source_matches_version()
#@@ sketchy way of identifying packages not grabbed from an index
if bundle and req_to_install.url:
self.copy_to_build_dir(req_to_install)
install = False
# req_to_install.req is only avail after unpack for URL pkgs
# repeat check_if_exists to uninstall-on-upgrade (#14)
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if not is_bundle:
## FIXME: shouldn't be globally added:
finder.add_dependency_links(req_to_install.dependency_links)
if (req_to_install.extras):
logger.notify("Installing extra requirements: %r" % ','.join(req_to_install.extras))
if not self.ignore_dependencies:
for req in req_to_install.requirements(req_to_install.extras):
try:
name = pkg_resources.Requirement.parse(req).project_name
except ValueError:
e = sys.exc_info()[1]
## FIXME: proper warning
logger.error('Invalid requirement: %r (%s) in requirement %s' % (req, e, req_to_install))
continue
subreq = InstallRequirement(req, req_to_install)
if self.has_requirement(name):
investigate.append([ self.get_requirement(name), subreq ])
continue
reqs.append(subreq)
self.add_requirement(subreq)
if req_to_install.name not in self.requirements:
self.requirements[req_to_install.name] = req_to_install
if self.is_download:
self.reqs_to_cleanup.append(req_to_install)
else:
self.reqs_to_cleanup.append(req_to_install)
if install:
self.successfully_downloaded.append(req_to_install)
if bundle and (req_to_install.url and req_to_install.url.startswith('file:///')):
self.copy_to_build_dir(req_to_install)
finally:
logger.indent -= 2
# ---
import optparse
from pip.index import PackageFinder
from pip.req import RequirementSet, parse_requirements
from pip.locations import build_prefix, src_prefix
try:
# Set up the version control backends
from pip import version_control
version_control()
except:
# Recent versions of pip don't need this
pass
# Logging
parser = optparse.OptionParser(usage='%prog [--verbose] <requirements file>')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose')
options, args = parser.parse_args()
level = 1 if options.verbose else 0
level = logger.level_for_integer(4-level)
logger.consumers.extend([(level, sys.stdout)])
if not len(args):
parser.print_help()
sys.exit()
# Monkey patch our above redefined function
RequirementSet.prepare_files = prepare_files
# Bits of what pip install --no-install does, as minimal as we can
requirement_set = RequirementSet(build_dir=build_prefix, src_dir=src_prefix, download_dir=None, download_cache=None, upgrade=None, ignore_installed=None, ignore_dependencies=False, force_reinstall=None)
class Opt:
skip_requirements_regex = None
for req in parse_requirements(args[0], options=Opt):
requirement_set.add_requirement(req)
finder = PackageFinder(find_links=[], index_urls=['http://pypi.python.org/simple/'], use_mirrors=True, mirrors=[])
requirement_set.prepare_files(finder)
for first, later in investigate:
later.check_if_exists()
if later.satisfied_by:
pass # print 'already satisfied by %s' % (later.satisfied_by)
elif later.conflicts_with:
print '%s conflicts with installed \033[31m%s\033[0m' % (prettify(later), later.conflicts_with)
else:
if first.installed_version not in later.req:
print '%s, but pip will install version \033[31m%s\033[0m from \033[33m%s\033[0m' % (prettify(later), first.installed_version, first)
|
|
import pygame
import time
from pygame.locals import *
background_color = (248, 248, 248)
speednumber_position = (170, 120)
def position_in_rect(position, rectangle):
return rectangle.collidepoint(position)
def speed_position(text, font, right_upper_coordinates):
"""" Gives the position for screen.blib() starting from the coordiates
from the right upper corner (as a tupple) """
right_coordinate = right_upper_coordinates[0] - font.size(text)[0]
return (right_coordinate, right_upper_coordinates[1])
class top_ribbon:
def __init__(self, width=320, height=20, color=(100, 100, 100)):
self.signal = 0
self.width = width
self.height = height
self.color = color
def draw(self, screen, font):
""" Draws the ribbon with the time in the top of the screen """
background = pygame.Surface((self.width,self.height)).convert()
background.fill(self.color)
# gmtime() isn't the right function yet (time should come from raspberry pi)
clock = time.strftime("%H:%M",time.gmtime())
clock_size = font.size(clock)
clock = font.render(clock, True, (200,200,200))
clock_position = ((self.width-clock_size[0])/2, (self.height-clock_size[1])/2)
screen.blit(background, (0,0))
screen.blit(clock, clock_position)
pygame.display.update([0, 0, self.width, self.height])
class graphical_speedometer:
def __init__(self, blockcolor, width, height):
self.width = width - width%10
self.blockcolor = blockcolor
self.height = height
# from http://pygame.org/project-AAfilledRoundedRect-2349-.html
def AAfilledRoundedRect(self, surface, rect, color, radius=0.4):
"""
AAfilledRoundedRect(surface,rect,color,radius=0.4)
surface : destination
rect : rectangle
color : rgb or rgba
radius : 0 <= radius <= 1
"""
rect = pygame.Rect(rect)
color = pygame.Color(*color)
alpha = color.a
color.a = 0
pos = rect.topleft
rect.topleft = 0,0
rectangle = pygame.Surface(rect.size,SRCALPHA)
circle = pygame.Surface([min(rect.size)*3]*2,SRCALPHA)
pygame.draw.ellipse(circle,(0,0,0),circle.get_rect(),0)
circle = pygame.transform.scale(circle,[int(min(rect.size)*radius)]*2)
radius = rectangle.blit(circle,(0,0))
radius.bottomright = rect.bottomright
rectangle.blit(circle,radius)
radius.topright = rect.topright
rectangle.blit(circle,radius)
radius.bottomleft = rect.bottomleft
rectangle.blit(circle,radius)
rectangle.fill((0,0,0),rect.inflate(-radius.w,0))
rectangle.fill((0,0,0),rect.inflate(0,-radius.h))
rectangle.fill(color,special_flags=BLEND_RGBA_MAX)
rectangle.fill((255,255,255,alpha),special_flags=BLEND_RGBA_MIN)
return surface.blit(rectangle,pos)
def draw_speedometer(self, screen, position):
# Save variables for use in update_speed()
self.screen = screen
self.position = position
self.speedometer_rect = [position[0], position[1], self.width, self.height]
self.background = self.AAfilledRoundedRect(screen, self.speedometer_rect, (150, 150, 150), radius=0.2)
pygame.display.update(self.speedometer_rect)
def update_speed(self, speed):
# Determine number of blocks
number_of_blocks = int(speed/30. * 8)
# Remove old blocks
# self.screen.blit(self.background, self.position) # not working yet
self.AAfilledRoundedRect(self.screen, self.speedometer_rect, (150, 150, 150), radius=0.2)
for i in range(number_of_blocks):
# numbers in rect have to be adjusted for variable sizes
rect = [self.position[0] + 19 * i + 4, self.position[1] + 4, 17, 32]
self.AAfilledRoundedRect(self.screen, rect, self.blockcolor, radius=0.2)
pygame.display.update(self.speedometer_rect)
class speed_text:
def __init__(self, font, position, color):
self.font = font
self.position = position
self.color = color
def update_speed_text(self, screen, text):
pass
def start():
global startbutton_rect
global screen
global background
global ribbon
global startbutton
global speedometer
global background_color
global start_font
global speednumber_position
pygame.init()
# uncomment when using Raspberry Pi:
screen = pygame.display.set_mode((320, 240), pygame.FULLSCREEN)
pygame.mouse.set_visible(False)
# comment when using Raspberry Pi:
# screen = pygame.display.set_mode((320, 240))
# pygame.mouse.set_visible(True)
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill(background_color)
screen.blit(background, (0,0))
ribbon = top_ribbon()
# Not sure yet what font to use on raspberry pi
ribbon_font = pygame.font.Font(None, 20)
ribbon.draw(screen=screen, font=ribbon_font)
speedometer = graphical_speedometer((20, 250, 20), 160, 40)
speedometer.draw_speedometer(screen, (10, 60))
speedometer.update_speed(0.)
start_font = pygame.font.Font(None, 40)
startbutton = start_font.render("Start", True, (10, 10, 10))
startbutton_rect = pygame.Rect(200, 180, 80, 40)
graphical_speedometer(0, 0, 0).AAfilledRoundedRect(screen, startbutton_rect, (20, 250, 20))
screen.blit(startbutton, (207, 185))
speed_font = pygame.font.Font(None, 100)
speedtext = speed_font.render("0.0", True, (10, 10, 10))
screen.blit(speedtext, speed_position("0.0", speed_font, speednumber_position))
pygame.display.flip()
def get_started():
global started
return started
def main():
global started
started = 'Begin'
start()
# #Main Loop
while 1:
# #Handle Input Events
for event in pygame.event.get():
if event.type == QUIT:
return
elif event.type == KEYDOWN and event.key == K_ESCAPE:
return
elif event.type == MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if started == 'Begin' and position_in_rect(pos, startbutton_rect):
started = 'Started'
pygame.draw.rect(screen, background_color, startbutton_rect)
endbutton = start_font.render("Stop", True, (10, 10, 10))
graphical_speedometer(0, 0, 0).AAfilledRoundedRect(screen, startbutton_rect, (250, 20, 20))
screen.blit(endbutton, (210, 185))
pygame.display.update(startbutton_rect)
elif started == 'Started':
if pos[1] < 120:
speedometer.update_speed(15.)
elif position_in_rect(pos, startbutton_rect):
started = 'Stop'
endbutton = start_font.render("Send", True, (10, 10, 10))
graphical_speedometer(0, 0, 0).AAfilledRoundedRect(screen, startbutton_rect, (20, 20, 250))
screen.blit(endbutton, (210, 185))
pygame.display.update(startbutton_rect)
elif started == 'Stop':
if position_in_rect(pos, startbutton_rect):
started = 'Send'
time.sleep(0.2)
if __name__ == '__main__': main()
|
|
import logging
import os
import warnings
import six
from .. import auth, errors, utils
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
log = logging.getLogger(__name__)
class ImageApiMixin(object):
@utils.check_resource
def get_image(self, image):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
image (str): Image name to get
Returns:
(urllib3.response.HTTPResponse object): The response from the
daemon.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.get_image("fedora:latest")
>>> f = open('/tmp/fedora-latest.tar', 'w')
>>> f.write(image.data)
>>> f.close()
"""
res = self._get(self._url("/images/{0}/get", image), stream=True)
self._raise_for_status(res)
return res.raw
@utils.check_resource
def history(self, image):
"""
Show the history of an image.
Args:
image (str): The image to show history for
Returns:
(str): The history of the image
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, viz=False,
filters=None):
"""
List images. Similar to the ``docker images`` command.
Args:
name (str): Only show images belonging to the repository ``name``
quiet (bool): Only return numeric IDs as a list.
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- ``label`` (str): format either ``key`` or ``key=value``
Returns:
(dict or list): A list if ``quiet=True``, otherwise a dict.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if viz:
if utils.compare_version('1.7', self._version) >= 0:
raise Exception('Viz output is not supported in API >= 1.7!')
return self._result(self._get(self._url("images/viz")))
params = {
'filter': name,
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
True)
if quiet:
return [x['Id'] for x in res]
return res
def import_image(self, src=None, repository=None, tag=None, image=None,
changes=None, stream_src=False):
"""
Import an image. Similar to the ``docker import`` command.
If ``src`` is a string or unicode string, it will first be treated as a
path to a tarball on the local system. If there is an error reading
from that file, ``src`` will be treated as a URL instead to fetch the
image from. You can also pass an open file handle as ``src``, in which
case the data will be read from that file.
If ``src`` is unset but ``image`` is set, the ``image`` parameter will
be taken as the name of an existing image to import from.
Args:
src (str or file): Path to tarfile, URL, or file-like object
repository (str): The repository to create
tag (str): The tag to apply
image (str): Use another image like the ``FROM`` Dockerfile
parameter
"""
if not (src or image):
raise errors.DockerException(
'Must specify src or image to import from'
)
u = self._url('/images/create')
params = _import_image_params(
repository, tag, image,
src=(src if isinstance(src, six.string_types) else None),
changes=changes
)
headers = {'Content-Type': 'application/tar'}
if image or params.get('fromSrc') != '-': # from image or URL
return self._result(
self._post(u, data=None, params=params)
)
elif isinstance(src, six.string_types): # from file path
with open(src, 'rb') as f:
return self._result(
self._post(
u, data=f, params=params, headers=headers, timeout=None
)
)
else: # from raw data
if stream_src:
headers['Transfer-Encoding'] = 'chunked'
return self._result(
self._post(u, data=src, params=params, headers=headers)
)
def import_image_from_data(self, data, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
allows importing in-memory bytes data.
Args:
data (bytes collection): Bytes collection containing valid tar data
repository (str): The repository to create
tag (str): The tag to apply
"""
u = self._url('/images/create')
params = _import_image_params(
repository, tag, src='-', changes=changes
)
headers = {'Content-Type': 'application/tar'}
return self._result(
self._post(
u, data=data, params=params, headers=headers, timeout=None
)
)
def import_image_from_file(self, filename, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a tar file on disk.
Args:
filename (str): Full path to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
Raises:
IOError: File does not exist.
"""
return self.import_image(
src=filename, repository=repository, tag=tag, changes=changes
)
def import_image_from_stream(self, stream, repository=None, tag=None,
changes=None):
return self.import_image(
src=stream, stream_src=True, repository=repository, tag=tag,
changes=changes
)
def import_image_from_url(self, url, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a URL.
Args:
url (str): A URL pointing to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
src=url, repository=repository, tag=tag, changes=changes
)
def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
)
@utils.check_resource
def insert(self, image, url, path):
if utils.compare_version('1.12', self._version) >= 0:
raise errors.DeprecatedMethod(
'insert is not available for API version >=1.12'
)
api_url = self._url("/images/{0}/insert", image)
params = {
'url': url,
'path': path
}
return self._result(self._post(api_url, params=params))
@utils.check_resource
def inspect_image(self, image):
"""
Get detailed information about an image. Similar to the ``docker
inspect`` command, but only for containers.
Args:
container (str): The container to inspect
Returns:
(dict): Similar to the output of ``docker inspect``, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
def load_image(self, data):
"""
Load an image that was previously saved using
:py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
save``). Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
"""
res = self._post(self._url("/images/load"), data=data)
self._raise_for_status(res)
def pull(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None, decode=False):
"""
Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
tag (str): The tag to pull
stream (bool): Stream the output as a generator
insecure_registry (bool): Use an insecure registry
auth_config (dict): Override the credentials that
:py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
Returns:
(generator or str): The output
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in cli.pull('busybox', stream=True):
... print(json.dumps(json.loads(line), indent=4))
{
"status": "Pulling image (latest) from busybox",
"progressDetail": {},
"id": "e72ac664f4f0"
}
{
"status": "Pulling image (latest) from busybox, endpoint: ...",
"progressDetail": {},
"id": "e72ac664f4f0"
}
"""
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
params = {
'tag': tag,
'fromImage': repository
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post(
self._url('/images/create'), params=params, headers=headers,
stream=stream, timeout=None
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response)
def push(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None, decode=False):
"""
Push an image or a repository to the registry. Similar to the ``docker
push`` command.
Args:
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
insecure_registry (bool): Use ``http://`` to connect to the
registry
auth_config (dict): Override the credentials that
:py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
Returns:
(generator or str): The output from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in cli.push('yourname/app', stream=True):
... print line
{"status":"Pushing repository yourname/app (1 tags)"}
{"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
{"status":"Image already pushed, skipping","progressDetail":{},
"id":"511136ea3c5a"}
...
"""
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response)
@utils.check_resource
def remove_image(self, image, force=False, noprune=False):
"""
Remove an image. Similar to the ``docker rmi`` command.
Args:
image (str): The image to remove
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
"""
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
self._raise_for_status(res)
def search(self, term):
"""
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
Returns:
(list of dicts): The response of the search.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(
self._get(self._url("/images/search"), params={'term': term}),
True
)
@utils.check_resource
def tag(self, image, repository, tag=None, force=False):
"""
Tag an image into a repository. Similar to the ``docker tag`` command.
Args:
image (str): The image to tag
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Returns:
(bool): ``True`` if successful
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {
'tag': tag,
'repo': repository,
'force': 1 if force else 0
}
url = self._url("/images/{0}/tag", image)
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201
def is_file(src):
try:
return (
isinstance(src, six.string_types) and
os.path.isfile(src)
)
except TypeError: # a data string will make isfile() raise a TypeError
return False
def _import_image_params(repo, tag, image=None, src=None,
changes=None):
params = {
'repo': repo,
'tag': tag,
}
if image:
params['fromImage'] = image
elif src and not is_file(src):
params['fromSrc'] = src
else:
params['fromSrc'] = '-'
if changes:
params['changes'] = changes
return params
|
|
#!/usr/bin/env python
import math
import sys
import roslib; roslib.load_manifest('bugs')
import rospy
import tf.transformations as transform
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from location import Location, necessary_heading
from dist import Dist
current_location = Location()
current_dists = Dist()
delta = .1
WALL_PADDING = .5
STRAIGHT = 0
LEFT = 1
RIGHT = 2
MSG_STOP = 3
def init_listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber('base_pose_ground_truth', Odometry, location_callback)
rospy.Subscriber('base_scan', LaserScan, sensor_callback)
def location_callback(data):
p = data.pose.pose.position
q = (
data.pose.pose.orientation.x,
data.pose.pose.orientation.y,
data.pose.pose.orientation.z,
data.pose.pose.orientation.w)
t = transform.euler_from_quaternion(q)[2] # in [-pi, pi]
current_location.update_location(p.x, p.y, t)
def sensor_callback(data):
current_dists.update(data)
class Bug:
def __init__(self, tx, ty):
self.pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.tx = tx
self.ty = ty
def go(self, direction):
cmd = Twist()
if direction == STRAIGHT:
cmd.linear.x = 1
elif direction == LEFT:
cmd.angular.z = 0.25
elif direction == RIGHT:
cmd.angular.z = -0.25
elif direction == MSG_STOP:
pass
self.pub.publish(cmd)
def go_until_obstacle(self):
print "Going until destination or obstacle"
while current_location.distance(tx, ty) > delta:
(frontdist, _) = current_dists.get()
if frontdist <= WALL_PADDING:
return True
if current_location.facing_point(tx, ty):
self.go(STRAIGHT)
elif current_location.faster_left(tx, ty):
self.go(LEFT)
else:
self.go(RIGHT)
rospy.sleep(.01)
return False
def follow_wall(self):
print "Following wall"
while current_dists.get()[0] <= WALL_PADDING:
self.go(RIGHT)
rospy.sleep(.01)
while not self.should_leave_wall():
(front, left) = current_dists.get()
if front <= WALL_PADDING:
self.go(RIGHT)
elif WALL_PADDING - .1 <= left <= WALL_PADDING + .1:
self.go(STRAIGHT)
elif left > WALL_PADDING + .1:
self.go(LEFT)
else:
self.go(RIGHT)
rospy.sleep(.01)
def should_leave_wall(self):
print "You dolt! You need to subclass bug to know how to leave the wall"
sys.exit(1)
class Bug0(Bug):
def should_leave_wall(self):
(x, y, t) = current_location.current_location()
dir_to_go = current_location.global_to_local(necessary_heading(x, y, tx, ty))
at = current_dists.at(dir_to_go)
if at > 10:
print "Leaving wall"
return True
return False
class Bug1(Bug):
def __init__(self, tx, ty):
Bug.__init__(self, tx, ty)
self.closest_point = (None, None)
self.origin = (None, None)
self.circumnavigated = False
def should_leave_wall(self):
(x, y, t) = current_location.current_location()
if None in self.closest_point:
self.origin = (x, y)
self.closest_point = (x, y)
self.closest_distance = current_location.distance(self.tx, self.ty)
self.left_origin_point = False
return False
d = current_location.distance(self.tx, self.ty)
if d < self.closest_distance:
print "New closest point at", (x, y)
self.closest_distance = d
self.closest_point = (x, y)
(ox, oy) = self.origin
if not self.left_origin_point and not near(x, y, ox, oy):
# we have now left the point where we hit the wall
print "Left original touch point"
self.left_origin_point = True
elif near(x, y, ox, oy) and self.left_origin_point:
# circumnavigation achieved!
print "Circumnavigated obstacle"
self.circumnavigated = True
(cx, ct) = self.closest_point
if self.circumnavigated and near(x, y, cx, ct):
self.closest_point = (None, None)
self.origin = (None, None)
self.circumnavigated = False
self.left_origin_point = False
print "Leaving wall"
return True
else:
return False
class Bug2(Bug):
def __init__(self, tx, ty):
Bug.__init__(self, tx, ty)
self.lh = None
self.encountered_wall_at = (None, None)
def face_goal(self):
while not current_location.facing_point(self.tx, self.ty):
self.go(RIGHT)
rospy.sleep(.01)
def follow_wall(self):
Bug.follow_wall(self)
self.face_goal()
def should_leave_wall(self):
(x, y, _) = current_location.current_location()
if None in self.encountered_wall_at:
self.encountered_wall_at = (x, y)
self.lh = necessary_heading(x, y, self.tx, self.ty)
return False
t_angle = necessary_heading(x, y, self.tx, self.ty)
(ox, oy) = self.encountered_wall_at
od = math.sqrt((ox-self.tx)**2 + (oy-self.ty)**2)
cd = math.sqrt( (x-self.tx)**2 + (y-self.ty)**2)
dt = 0.01
if self.lh - dt <= t_angle <= self.lh + dt and not near(x, y, ox, oy):
if cd < od:
print "Leaving wall"
return True
return False
def near(cx, cy, x, y):
nearx = x - .3 <= cx <= x + .3
neary = y - .3 <= cy <= y + .3
return nearx and neary
def bug_algorithm(bug):
init_listener()
print "Calibrating sensors..."
# This actually just lets the sensor readings propagate into the system
rospy.sleep(1)
print "Calibrated"
while current_location.distance(tx, ty) > delta:
hit_wall = bug.go_until_obstacle()
if hit_wall:
bug.follow_wall()
print "Arrived at", (tx, ty)
# Parse arguments
algorithm = sys.argv[1]
algorithms = ["bug0", "bug1", "bug2"]
if algorithm not in algorithms:
print "First argument should be one of ", algorithms, ". Was ", algorithm
sys.exit(1)
if len(sys.argv) < 4:
print "Usage: rosrun bugs bug.py ALGORITHM X Y"
sys.exit(1)
(tx, ty) = map(float, sys.argv[2:4])
print "Setting target:", (tx, ty)
bug = None
if algorithm == "bug0":
bug = Bug0(tx, ty)
elif algorithm == "bug1":
bug = Bug1(tx, ty)
elif algorithm == "bug2":
bug = Bug2(tx, ty)
bug_algorithm(bug)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.data_fusion_v1.types import datafusion
from google.longrunning import operations_pb2 # type: ignore
from .base import DataFusionTransport, DEFAULT_CLIENT_INFO
from .grpc import DataFusionGrpcTransport
class DataFusionGrpcAsyncIOTransport(DataFusionTransport):
"""gRPC AsyncIO backend transport for DataFusion.
Service for creating and managing Data Fusion instances.
Data Fusion enables ETL developers to build code-free, data
integration pipelines via a point-and-click UI.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "datafusion.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "datafusion.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_available_versions(
self,
) -> Callable[
[datafusion.ListAvailableVersionsRequest],
Awaitable[datafusion.ListAvailableVersionsResponse],
]:
r"""Return a callable for the list available versions method over gRPC.
Lists possible versions for Data Fusion instances in
the specified project and location.
Returns:
Callable[[~.ListAvailableVersionsRequest],
Awaitable[~.ListAvailableVersionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_available_versions" not in self._stubs:
self._stubs["list_available_versions"] = self.grpc_channel.unary_unary(
"/google.cloud.datafusion.v1.DataFusion/ListAvailableVersions",
request_serializer=datafusion.ListAvailableVersionsRequest.serialize,
response_deserializer=datafusion.ListAvailableVersionsResponse.deserialize,
)
return self._stubs["list_available_versions"]
@property
def list_instances(
self,
) -> Callable[
[datafusion.ListInstancesRequest], Awaitable[datafusion.ListInstancesResponse]
]:
r"""Return a callable for the list instances method over gRPC.
Lists Data Fusion instances in the specified project
and location.
Returns:
Callable[[~.ListInstancesRequest],
Awaitable[~.ListInstancesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_instances" not in self._stubs:
self._stubs["list_instances"] = self.grpc_channel.unary_unary(
"/google.cloud.datafusion.v1.DataFusion/ListInstances",
request_serializer=datafusion.ListInstancesRequest.serialize,
response_deserializer=datafusion.ListInstancesResponse.deserialize,
)
return self._stubs["list_instances"]
@property
def get_instance(
self,
) -> Callable[[datafusion.GetInstanceRequest], Awaitable[datafusion.Instance]]:
r"""Return a callable for the get instance method over gRPC.
Gets details of a single Data Fusion instance.
Returns:
Callable[[~.GetInstanceRequest],
Awaitable[~.Instance]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_instance" not in self._stubs:
self._stubs["get_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.datafusion.v1.DataFusion/GetInstance",
request_serializer=datafusion.GetInstanceRequest.serialize,
response_deserializer=datafusion.Instance.deserialize,
)
return self._stubs["get_instance"]
@property
def create_instance(
self,
) -> Callable[
[datafusion.CreateInstanceRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create instance method over gRPC.
Creates a new Data Fusion instance in the specified
project and location.
Returns:
Callable[[~.CreateInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_instance" not in self._stubs:
self._stubs["create_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.datafusion.v1.DataFusion/CreateInstance",
request_serializer=datafusion.CreateInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_instance"]
@property
def delete_instance(
self,
) -> Callable[
[datafusion.DeleteInstanceRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete instance method over gRPC.
Deletes a single Date Fusion instance.
Returns:
Callable[[~.DeleteInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_instance" not in self._stubs:
self._stubs["delete_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.datafusion.v1.DataFusion/DeleteInstance",
request_serializer=datafusion.DeleteInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_instance"]
@property
def update_instance(
self,
) -> Callable[
[datafusion.UpdateInstanceRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the update instance method over gRPC.
Updates a single Data Fusion instance.
Returns:
Callable[[~.UpdateInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_instance" not in self._stubs:
self._stubs["update_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.datafusion.v1.DataFusion/UpdateInstance",
request_serializer=datafusion.UpdateInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_instance"]
@property
def restart_instance(
self,
) -> Callable[
[datafusion.RestartInstanceRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the restart instance method over gRPC.
Restart a single Data Fusion instance.
At the end of an operation instance is fully restarted.
Returns:
Callable[[~.RestartInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restart_instance" not in self._stubs:
self._stubs["restart_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.datafusion.v1.DataFusion/RestartInstance",
request_serializer=datafusion.RestartInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["restart_instance"]
def close(self):
return self.grpc_channel.close()
__all__ = ("DataFusionGrpcAsyncIOTransport",)
|
|
"""Helpers for tests."""
import json
from unittest.mock import patch
import pytest
from homeassistant.config_entries import ENTRY_STATE_LOADED
from .common import MQTTMessage
from tests.common import MockConfigEntry, load_fixture
from tests.components.light.conftest import mock_light_profiles # noqa: F401
@pytest.fixture(name="generic_data", scope="session")
def generic_data_fixture():
"""Load generic MQTT data and return it."""
return load_fixture("ozw/generic_network_dump.csv")
@pytest.fixture(name="migration_data", scope="session")
def migration_data_fixture():
"""Load migration MQTT data and return it."""
return load_fixture("ozw/migration_fixture.csv")
@pytest.fixture(name="fan_data", scope="session")
def fan_data_fixture():
"""Load fan MQTT data and return it."""
return load_fixture("ozw/fan_network_dump.csv")
@pytest.fixture(name="light_data", scope="session")
def light_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_network_dump.csv")
@pytest.fixture(name="light_new_ozw_data", scope="session")
def light_new_ozw_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_new_ozw_network_dump.csv")
@pytest.fixture(name="light_no_ww_data", scope="session")
def light_no_ww_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_no_ww_network_dump.csv")
@pytest.fixture(name="light_no_cw_data", scope="session")
def light_no_cw_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_no_cw_network_dump.csv")
@pytest.fixture(name="light_wc_data", scope="session")
def light_wc_only_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_wc_network_dump.csv")
@pytest.fixture(name="cover_data", scope="session")
def cover_data_fixture():
"""Load cover MQTT data and return it."""
return load_fixture("ozw/cover_network_dump.csv")
@pytest.fixture(name="cover_gdo_data", scope="session")
def cover_gdo_data_fixture():
"""Load cover_gdo MQTT data and return it."""
return load_fixture("ozw/cover_gdo_network_dump.csv")
@pytest.fixture(name="climate_data", scope="session")
def climate_data_fixture():
"""Load climate MQTT data and return it."""
return load_fixture("ozw/climate_network_dump.csv")
@pytest.fixture(name="lock_data", scope="session")
def lock_data_fixture():
"""Load lock MQTT data and return it."""
return load_fixture("ozw/lock_network_dump.csv")
@pytest.fixture(name="string_sensor_data", scope="session")
def string_sensor_fixture():
"""Load string sensor MQTT data and return it."""
return load_fixture("ozw/sensor_string_value_network_dump.csv")
@pytest.fixture(name="sent_messages")
def sent_messages_fixture():
"""Fixture to capture sent messages."""
sent_messages = []
with patch(
"homeassistant.components.mqtt.async_publish",
side_effect=lambda hass, topic, payload: sent_messages.append(
{"topic": topic, "payload": json.loads(payload)}
),
):
yield sent_messages
@pytest.fixture(name="fan_msg")
async def fan_msg_fixture(hass):
"""Return a mock MQTT msg with a fan actuator message."""
fan_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/fan.json")
)
message = MQTTMessage(topic=fan_json["topic"], payload=fan_json["payload"])
message.encode()
return message
@pytest.fixture(name="light_msg")
async def light_msg_fixture(hass):
"""Return a mock MQTT msg with a light actuator message."""
light_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/light.json")
)
message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"])
message.encode()
return message
@pytest.fixture(name="light_no_rgb_msg")
async def light_no_rgb_msg_fixture(hass):
"""Return a mock MQTT msg with a light actuator message."""
light_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/light_no_rgb.json")
)
message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"])
message.encode()
return message
@pytest.fixture(name="light_rgb_msg")
async def light_rgb_msg_fixture(hass):
"""Return a mock MQTT msg with a light actuator message."""
light_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/light_rgb.json")
)
message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"])
message.encode()
return message
@pytest.fixture(name="light_pure_rgb_msg")
async def light_pure_rgb_msg_fixture(hass):
"""Return a mock MQTT msg with a pure rgb light actuator message."""
light_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/light_pure_rgb.json")
)
message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"])
message.encode()
return message
@pytest.fixture(name="switch_msg")
async def switch_msg_fixture(hass):
"""Return a mock MQTT msg with a switch actuator message."""
switch_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/switch.json")
)
message = MQTTMessage(topic=switch_json["topic"], payload=switch_json["payload"])
message.encode()
return message
@pytest.fixture(name="sensor_msg")
async def sensor_msg_fixture(hass):
"""Return a mock MQTT msg with a sensor change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/sensor.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="binary_sensor_msg")
async def binary_sensor_msg_fixture(hass):
"""Return a mock MQTT msg with a binary_sensor change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/binary_sensor.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="binary_sensor_alt_msg")
async def binary_sensor_alt_msg_fixture(hass):
"""Return a mock MQTT msg with a binary_sensor change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/binary_sensor_alt.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="cover_msg")
async def cover_msg_fixture(hass):
"""Return a mock MQTT msg with a cover level change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/cover.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="cover_gdo_msg")
async def cover_gdo_msg_fixture(hass):
"""Return a mock MQTT msg with a cover barrier state change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/cover_gdo.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="climate_msg")
async def climate_msg_fixture(hass):
"""Return a mock MQTT msg with a climate mode change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/climate.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="lock_msg")
async def lock_msg_fixture(hass):
"""Return a mock MQTT msg with a lock actuator message."""
lock_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/lock.json")
)
message = MQTTMessage(topic=lock_json["topic"], payload=lock_json["payload"])
message.encode()
return message
@pytest.fixture(name="stop_addon")
def mock_install_addon():
"""Mock stop add-on."""
with patch("homeassistant.components.hassio.async_stop_addon") as stop_addon:
yield stop_addon
@pytest.fixture(name="uninstall_addon")
def mock_uninstall_addon():
"""Mock uninstall add-on."""
with patch(
"homeassistant.components.hassio.async_uninstall_addon"
) as uninstall_addon:
yield uninstall_addon
@pytest.fixture(name="get_addon_discovery_info")
def mock_get_addon_discovery_info():
"""Mock get add-on discovery info."""
with patch(
"homeassistant.components.hassio.async_get_addon_discovery_info"
) as get_addon_discovery_info:
yield get_addon_discovery_info
@pytest.fixture(name="mqtt")
async def mock_mqtt_fixture(hass):
"""Mock the MQTT integration."""
mqtt_entry = MockConfigEntry(domain="mqtt", state=ENTRY_STATE_LOADED)
mqtt_entry.add_to_hass(hass)
return mqtt_entry
|
|
##########################################################
## QueryParser.py
## Purpose: script parses and converts SPARQL query text
## version 1.2.0
## date: 07.09.2017
##########################################################
## import module/script dependencies
import re
import os
def TripleType(triple_list):
'''
Function takes a list of lists, where the inner lists represent a triple. Each potential triple is checked to ensure
consistent formatting so that each triple contains three variables, separated by a comma. The function returns a
lists of lists, where the inner lists represent triples with correct formatting.
:param triple_list: ist of lists, where the inner lists represent a triple
:return: lists of lists, where the inner lists represent triples with correct formatting
'''
triples = []
for triple in triple_list:
# singleton - indicates it is the object for the first triple's subject and predicate
if len(triple) == 1:
triples.append(triple_list[0][:-1] + triple)
# pair - indicates that these are the predicate and object of the first triple
if len(triple) == 2:
triples.append(triple_list[0][:-2] + triple)
# complete triple - one containing exactly 3 variables
if len(triple) == 3:
if len([x for x in triple if ':' in x or '?' in x]) == 3:
triples.append(triple)
# gets at string objects not split (ex. '2')
elif (':' in triple[0] or '?' in triple[0]) and (':' in triple[1] or '?' in triple[1]):
triples.append(triple)
# split string objects (ex. "'breast', 'cancer'")
else:
triples.append(triple_list[0][:-2] + [triple[0]] + [' '.join(triple[1:]).replace('"', "")])
return triples
def QueryTriples(query_body):
'''
Function takes a string representing the body of a SPARQL query and depending on the style that the triples are
written (1 triple per line or shortcuts with ';') the function parses the triples and returns a list of list
where each inner list represents a triple.
:param query_body: string representing the body of a SPARQL query
:return: a list of list where each inner list represents a triple
'''
# identify query triples - accounts for two different types of triple formatting
# format 1 - 1 triple per line
if ';' not in query_body:
query_triple = []
body = filter(None, query_body[re.compile("{").search(query_body).start() + 1:re.compile("}").search(
query_body).end() - 1]).split('\n')
triple_list = filter(None, [x.lstrip(' ').split('.')[0] for x in body if not x.lstrip(' ').startswith('#')])
for triple in triple_list:
if '(' not in triple:
query_triple.append(filter(None, triple.rstrip(' ').split(' ')))
return TripleType(query_triple)
# format 2 - alternative triple format:
## ?gda sio:SIO_000628 ?gene,?disease;
## sio:SIO_000216 ?scoreIRI.
else:
body = [x for x in filter(None,
query_body[re.compile("{").search(query_body).start() + 1:re.compile("}").search(
query_body).end() - 1].split('.\n'))]
query_triple = []
for triples in body:
items = []
for triple in filter(None, triples.split('\n')):
for var in triple.strip(';').split(','):
items.append(filter(None, var.split(' ')))
query_triple += [list(item) for item in set(tuple(row) for row in TripleType(items))]
return query_triple
def QueryParser(input_file):
'''
Function reads a string containing a file path/name, reads contents, and splits the string into different parts
of a SPARQL query. The query is parsed with each triple appended to a list. The function returns a list of lists
where the first list is a list of triples and the second list is a list of lists representing the query text.
:param input_file: string containing the file path/name of SPARQL query
:return: a list of lists where list[0] is a list of triples, and list[1] is a list of query components
(i.e., prefixes and query body)
'''
# CHECK - file has data
if os.stat(input_file).st_size == 0:
return 'ERROR: input file: {} is empty'.format(input_file)
else:
# first remove all commented lines (lines starting with '#')
data = ''.join(filter(None, [row for row in open(input_file).readlines() if not row.lstrip(' ').startswith('#')]))
query = data.split(re.search(r'\bselect\b', data, re.IGNORECASE).group(0))
# identify query triples - accounts for two different types of triple formatting
query_triples = QueryTriples(query[1])
# construct pieces of query
query_triples = [' '.join([item for item in row]) for row in query_triples]
return [query_triples, query]
def QueryFeature(query_body):
'''
Function takes the body of a SPARQL query and searches for rows of the query that contain SPARQL features like
FILTER or OPTION, and returns a string of features, where each feature is separated by a newline.
:param query_body - query_body: string representing the body of a SPARQL query
:return: a string of features, where each feature is separated by a newline
'''
features = []
for row in query_body.split('\n'):
row = row.lstrip(' ')
if '(' in row and ('ORDER' or 'GROUP') not in row and not row.startswith('?'):
# replaces newlines and empty white space with an empty string
line = [re.sub('["\n"]', '', row)]
for item in line:
if '#' in item:
features.append(item.split('#')[0])
else:
features.append(item)
return '\n'.join(features)
def QuerySelect(triples):
'''
Function takes a list of lists representing triples and formats each item in each triple according the type of
variable including: resources ("rdf: type"); literals (2, "name"); and variables ('?subject', '?object'). The
function returns a list of formatted triples.
:param triples: a list of lists representing triples
:return: a list of formatted triples
'''
select_list = set()
# create a list of subjects and objects from query
triples = [x for y in triples for x in set([y.split(' ')[0], y.split(' ')[2]])]
for item in triples:
# if a resource (ex - rdf:type)
if ':' in item:
select_list.add('(' + str(item) + ' as ?' + str(item.split(':')[1]) + ')')
# if a literal (ex - 2; "name")
elif (':' and '?') not in item:
select_list.add('(' + str(item) + ' as ?' + str(item) + ')')
# if a variable (ex - ?subject)
else:
select_list.add(item)
return list(select_list)
def OWLGraph(query_body):
'''
Function takes a string containing a SPARQL query and reformats the string, removing NETS-specific triples into a
list of triples needed to create the OWL representation.
:param query_body: a string containing a SPARQL query
:return: a list of triples needed to create the OWL representation
'''
triples = []
body = query_body[query_body.index('{'):].split('\n')
# extract query text from triples
for x in body:
if not x.lstrip().startswith('FILTER'):
if x.lstrip().startswith('OPTIONAL'):
triples.append(x.lstrip().split('OPTIONAL {')[-1])
else:
triples.append(x.lstrip())
return [x.lstrip().strip('?').split(' ') for x in triples if ('?' in x or ':' in x) and '(' not in x]
def NETSQueryParser(query_text, NETS_nodes, NETS_edge_metadata):
'''
Function takes the original SPARQL query text, the list of NETS nodes and edge metadata and updates the original
SPARQL query text. The function returns a list where list[0] contains a string representing the updated query,
list[1] contains the NETS node label variables, list[2] contains the NETS node identifier variables, list[3]
contains the OWL graph select statement information, and list[4] contains the OWL query triples.
:param query_text: a list of lists where list[0] is the query text to run against KaBOB, list[1] is a list of
triples, and list[2] is a list of query components (i.e., prefixes, select, and query_features)
:param NETS_nodes: a list of lists, where each list contains the triple for labeling a single NETS node
:param NETS_edge_metadata: dictionary keyed by NETS edges, values are triples to label NETS edges
:return: a list where the first item is a string representing the updated query, the
second list contains the NETS node label variables, the NETS node identifier variables, OWL graph select statement,
and the final item is the OWL graph triples.
'''
## PREFIX
# identify query prefixes
prefix = [str(x) + '\n' for x in query_text[1][0].split('\n') if re.search(r'\bprefix\b', x, re.IGNORECASE)]
# features - filter, optional, bind
features = [str(x) + '\n' for x in QueryFeature(query_text[1][1]).split('\n') if 'bind' not in x.lower()]
bind = [str(x) + '\n' for x in QueryFeature(query_text[1][1]).split('\n') if 'bind' in x.lower()]
# identify query limits
try:
query_text[1][1].index('LIMIT')
limit = [query_text[1][1][query_text[1][1].index('LIMIT'):len(query_text[1][1])]]
except ValueError:
limit = ['']
## QUERY BODY
# query triples
triples = [str(x) + ' .\n' for x in query_text[0] if 'rdfs:label' not in x]
# ids
id_triple = [x.split('.')[0].rstrip() for x in triples if
x.split(' ')[2] in NETS_nodes and 'IAO_0000219' in x.split(' ')[1]]
# edge label triples - with optional clause
edge_labels = list(set(['OPTIONAL {' + ' '.join(val) + '} \n' for val in NETS_edge_metadata[0].values()]))
# get node label triples - with optional clause
node_labels = list(set([' '.join([str(x), 'rdfs:label', str(x) + '_name ', '.\n']) for
y in set(NETS_edge_metadata[0].keys()) for x in y]))
## SELECT - start ('SELECT'); end ('WHERE {'); and text (query variables)
try:
re.search(r'\bdistinct\b', query_text[1][1], re.IGNORECASE).group(0)
select_start = ['SELECT DISTINCT']
except AttributeError:
select_start = ['SELECT']
# select text
# OWL
select_text_OWL = set(['(' + str(x) + ' as ?'+ str(x.split(':')[1]) + ')' if
':' in x else '(' + str(x) + ' as ?' + str(x) + ')' if
':' not in x and '?' not in x else x
for y in [x[::2] for x in [x.split(' ')[0:3] for x in triples]] for x in y])
# NETS
select_text = set(['(' + str(x) + ' as ?'+ str(x.split(':')[1]) + ')' if
':' in x else '(' + str(x) + ' as ?' + str(x) + ')' if
':' not in x and '?' not in x else x
for y in [x[::2] for x in [x.split(' ')[0:3] for x in triples]] for x in y] +
[x[-1] for x in NETS_edge_metadata[0].values()] +
[str(x[0]) + '_name' for x in NETS_edge_metadata[0].keys()] +
[str(x[1]) + '_name' for x in NETS_edge_metadata[0].keys()])
select_end = ['WHERE { \n']
## COMPLETE QUERY
# NETS
full_query = prefix + \
select_start + \
list(select_text) + \
select_end +\
bind +\
triples + \
edge_labels +\
node_labels +\
features +\
[' }\n'] +\
limit
# OWL - this code can be improved in future iterations
OWL_query = prefix + \
select_start + \
list(select_text) + \
select_end + \
bind + \
triples + \
features + \
[' }\n'] + \
limit
return [' '.join(full_query), node_labels, id_triple, select_text_OWL, OWLGraph(' '.join(OWL_query))]
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.video.transcoder_v1.services.transcoder_service import pagers
from google.cloud.video.transcoder_v1.types import resources
from google.cloud.video.transcoder_v1.types import services
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import TranscoderServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import TranscoderServiceGrpcTransport
from .transports.grpc_asyncio import TranscoderServiceGrpcAsyncIOTransport
class TranscoderServiceClientMeta(type):
"""Metaclass for the TranscoderService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[TranscoderServiceTransport]]
_transport_registry["grpc"] = TranscoderServiceGrpcTransport
_transport_registry["grpc_asyncio"] = TranscoderServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[TranscoderServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TranscoderServiceClient(metaclass=TranscoderServiceClientMeta):
"""Using the Transcoder API, you can queue asynchronous jobs for
transcoding media into various output formats. Output formats
may include different streaming standards such as HTTP Live
Streaming (HLS) and Dynamic Adaptive Streaming over HTTP (DASH).
You can also customize jobs using advanced features such as
Digital Rights Management (DRM), audio equalization, content
concatenation, and digital ad-stitch ready content generation.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "transcoder.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TranscoderServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TranscoderServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> TranscoderServiceTransport:
"""Returns the transport used by the client instance.
Returns:
TranscoderServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def job_path(project: str, location: str, job: str,) -> str:
"""Returns a fully-qualified job string."""
return "projects/{project}/locations/{location}/jobs/{job}".format(
project=project, location=location, job=job,
)
@staticmethod
def parse_job_path(path: str) -> Dict[str, str]:
"""Parses a job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/jobs/(?P<job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def job_template_path(project: str, location: str, job_template: str,) -> str:
"""Returns a fully-qualified job_template string."""
return "projects/{project}/locations/{location}/jobTemplates/{job_template}".format(
project=project, location=location, job_template=job_template,
)
@staticmethod
def parse_job_template_path(path: str) -> Dict[str, str]:
"""Parses a job_template path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/jobTemplates/(?P<job_template>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, TranscoderServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the transcoder service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, TranscoderServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, TranscoderServiceTransport):
# transport is a TranscoderServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def create_job(
self,
request: Union[services.CreateJobRequest, dict] = None,
*,
parent: str = None,
job: resources.Job = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Job:
r"""Creates a job in the specified region.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_create_job():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
job = transcoder_v1.Job()
job.template_id = "template_id_value"
request = transcoder_v1.CreateJobRequest(
parent="parent_value",
job=job,
)
# Make the request
response = client.create_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.CreateJobRequest, dict]):
The request object. Request message for
`TranscoderService.CreateJob`.
parent (str):
Required. The parent location to create and process this
job. Format: ``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job (google.cloud.video.transcoder_v1.types.Job):
Required. Parameters for creating
transcoding job.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.types.Job:
Transcoding job resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.CreateJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.CreateJobRequest):
request = services.CreateJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job is not None:
request.job = job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_jobs(
self,
request: Union[services.ListJobsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobsPager:
r"""Lists jobs in the specified region.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_list_jobs():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.ListJobsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_jobs(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.ListJobsRequest, dict]):
The request object. Request message for
`TranscoderService.ListJobs`. The parent location from
which to retrieve the collection of jobs.
parent (str):
Required. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.services.transcoder_service.pagers.ListJobsPager:
Response message for TranscoderService.ListJobs.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.ListJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.ListJobsRequest):
request = services.ListJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_job(
self,
request: Union[services.GetJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Job:
r"""Returns the job data.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_get_job():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.GetJobRequest(
name="name_value",
)
# Make the request
response = client.get_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.GetJobRequest, dict]):
The request object. Request message for
`TranscoderService.GetJob`.
name (str):
Required. The name of the job to retrieve. Format:
``projects/{project}/locations/{location}/jobs/{job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.types.Job:
Transcoding job resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.GetJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.GetJobRequest):
request = services.GetJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_job(
self,
request: Union[services.DeleteJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a job.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_delete_job():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.DeleteJobRequest(
name="name_value",
)
# Make the request
client.delete_job(request=request)
Args:
request (Union[google.cloud.video.transcoder_v1.types.DeleteJobRequest, dict]):
The request object. Request message for
`TranscoderService.DeleteJob`.
name (str):
Required. The name of the job to delete. Format:
``projects/{project}/locations/{location}/jobs/{job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.DeleteJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.DeleteJobRequest):
request = services.DeleteJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_job_template(
self,
request: Union[services.CreateJobTemplateRequest, dict] = None,
*,
parent: str = None,
job_template: resources.JobTemplate = None,
job_template_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.JobTemplate:
r"""Creates a job template in the specified region.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_create_job_template():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.CreateJobTemplateRequest(
parent="parent_value",
job_template_id="job_template_id_value",
)
# Make the request
response = client.create_job_template(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.CreateJobTemplateRequest, dict]):
The request object. Request message for
`TranscoderService.CreateJobTemplate`.
parent (str):
Required. The parent location to create this job
template. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_template (google.cloud.video.transcoder_v1.types.JobTemplate):
Required. Parameters for creating job
template.
This corresponds to the ``job_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_template_id (str):
Required. The ID to use for the job template, which will
become the final component of the job template's
resource name.
This value should be 4-63 characters, and valid
characters must match the regular expression
``[a-zA-Z][a-zA-Z0-9_-]*``.
This corresponds to the ``job_template_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.types.JobTemplate:
Transcoding job template resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job_template, job_template_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.CreateJobTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.CreateJobTemplateRequest):
request = services.CreateJobTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job_template is not None:
request.job_template = job_template
if job_template_id is not None:
request.job_template_id = job_template_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_job_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_job_templates(
self,
request: Union[services.ListJobTemplatesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobTemplatesPager:
r"""Lists job templates in the specified region.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_list_job_templates():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.ListJobTemplatesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_job_templates(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.ListJobTemplatesRequest, dict]):
The request object. Request message for
`TranscoderService.ListJobTemplates`.
parent (str):
Required. The parent location from which to retrieve the
collection of job templates. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.services.transcoder_service.pagers.ListJobTemplatesPager:
Response message for TranscoderService.ListJobTemplates.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.ListJobTemplatesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.ListJobTemplatesRequest):
request = services.ListJobTemplatesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_job_templates]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListJobTemplatesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_job_template(
self,
request: Union[services.GetJobTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.JobTemplate:
r"""Returns the job template data.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_get_job_template():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.GetJobTemplateRequest(
name="name_value",
)
# Make the request
response = client.get_job_template(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.GetJobTemplateRequest, dict]):
The request object. Request message for
`TranscoderService.GetJobTemplate`.
name (str):
Required. The name of the job template to retrieve.
Format:
``projects/{project}/locations/{location}/jobTemplates/{job_template}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.types.JobTemplate:
Transcoding job template resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.GetJobTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.GetJobTemplateRequest):
request = services.GetJobTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_job_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_job_template(
self,
request: Union[services.DeleteJobTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a job template.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_delete_job_template():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.DeleteJobTemplateRequest(
name="name_value",
)
# Make the request
client.delete_job_template(request=request)
Args:
request (Union[google.cloud.video.transcoder_v1.types.DeleteJobTemplateRequest, dict]):
The request object. Request message for
`TranscoderService.DeleteJobTemplate`.
name (str):
Required. The name of the job template to delete.
``projects/{project}/locations/{location}/jobTemplates/{job_template}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a services.DeleteJobTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, services.DeleteJobTemplateRequest):
request = services.DeleteJobTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_job_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-video-transcoder",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TranscoderServiceClient",)
|
|
# -*- coding: utf-8 -*-
"""
This code is originally from parser.py. This file is not meant to by used by itself.
This is for JunOS like ACLs
#Constants
junos_match_types
rules - this is really from the grammar.py
# Classes
Policer
PolicerGroup
QuotedString
# Functions
braced_list
keyword_match
range_match
handle_junos_acl
handle_junos_family_acl
handle_junos_policers
handle_junos_term
juniper_multiline_comments
"""
#Copied metadata from parser.py
__author__ = 'Jathan McCollum, Mike Biancaniello, Michael Harding, Michael Shields'
__editor__ = 'Joseph Malone'
__maintainer__ = 'Jathan McCollum'
__email__ = 'jathanism@aol.com'
__copyright__ = 'Copyright 2006-2013, AOL Inc.; 2013 Saleforce.com'
from grammar import *
# Temporary resting place for comments, so the rest of the parser can
# ignore them. Yes, this makes the library not thread-safe.
Comments = []
class Policer(object):
"""
Container class for policer policy definitions. This is a dummy class for
now, that just passes it through as a string.
"""
def __init__(self, name, data):
if not name:
raise exceptions.ActionError("Policer requres name")
self.name = name
self.exceedings = []
self.actions = []
for elt in data:
for k,v in elt.iteritems():
if k == 'if-exceeding':
for entry in v:
type, value = entry
if type == 'bandwidth-limit':
limit = self.str2bits(value)
if limit > 32000000000 or limit < 32000:
raise "bandwidth-limit must be between 32000bps and 32000000000bps"
self.exceedings.append((type, limit))
elif type == 'burst-size-limit':
limit = self.str2bits(value)
if limit > 100000000 or limit < 1500:
raise "burst-size-limit must be between 1500B and 100,000,000B"
self.exceedings.append((type, limit))
elif type == 'bandwidth-percent':
limit = int(value)
if limit < 1 or limit > 100:
raise "bandwidth-percent must be between 1 and 100"
else:
raise "Unknown policer if-exceeding tag: %s" % type
elif k == 'action':
for i in v:
self.actions.append(i)
def str2bits(self, str):
try:
val = int(str)
except:
if str[-1] == 'k':
return int(str[0:-1]) * 1024
if str[-1] == 'm':
return int(str[0:-1]) * 1048576
else:
raise "invalid bit definition %s" % str
return val
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, repr(self.name))
def __str__(self):
return self.data
def output(self):
output = ['policer %s {' % self.name]
if self.exceedings:
output.append(' if-exceeding {')
for x in self.exceedings:
output.append(' %s %s;' % (x[0],x[1]))
if self.exceedings:
output.append(' }')
if self.actions:
output.append(' then {')
for x in self.actions:
output.append(' %s;' % x)
if self.actions:
output.append(' }')
output.append('}')
return output
class PolicerGroup(object):
"""Container for Policer objects. Juniper only."""
def __init__(self, format=None):
self.policers = []
self.format = format
global Comments
self.comments = Comments
Comments = []
def output(self, format=None, *largs, **kwargs):
if format is None:
format = self.format
return getattr(self,'output_' + format)(*largs, **kwargs)
def output_junos(self, replace=False):
output = []
for ent in self.policers:
for x in ent.output():
output.append(x)
if replace:
return ['firewall {', 'replace:'] + [' '+x for x in output] + ['}']
else:
return output
class QuotedString(str):
def __str__(self):
return '"' + self + '"'
junos_match_types = []
def braced_list(arg):
'''Returned braced output. Will alert if comment is malformed.'''
#return '("{", jws?, (%s, jws?)*, "}")' % arg
return '("{", jws?, (%s, jws?)*, "}"!%s)' % (arg, errs['comm_start'])
def keyword_match(keyword, arg=None):
for k in keyword, keyword+'-except':
prod = 'junos_' + k.replace('-', '_')
junos_match_types.append(prod)
if arg is None:
rules[prod] = ('"%s", jsemi' % k, {k: True})
else:
tokens = '"%s", jws, ' % k
if k in address_matches:
tokens += braced_list(arg + ', jsemi')
else:
tokens += arg + ', jsemi'
rules[S(prod)] = (tokens, lambda x, k=k: {k: x})
keyword_match('address', 'cidr / ipaddr')
keyword_match('destination-address', 'cidr / ipaddr')
keyword_match('destination-prefix-list', 'jword')
keyword_match('first-fragment')
keyword_match('fragment-flags', 'fragment_flag')
keyword_match('ip-options', 'ip_option')
keyword_match('is-fragment')
keyword_match('prefix-list', 'jword')
keyword_match('source-address', 'cidr / ipaddr')
keyword_match('source-prefix-list', 'jword')
keyword_match('tcp-established')
keyword_match('tcp-flags', 'tcp_flag')
keyword_match('tcp-initial')
def range_match(key, arg):
rules[S(arg+'_range')] = ('%s, "-", %s' % (arg, arg), tuple)
match = '%s_range / %s' % (arg, arg)
keyword_match(key, '%s / ("[", jws?, (%s, jws?)*, "]")' % (match, match))
range_match('ah-spi', 'alphanums')
range_match('destination-mac-address', 'macaddr')
range_match('destination-port', 'port')
range_match('dscp', 'dscp')
range_match('ether-type', 'alphanums')
range_match('esp-spi', 'alphanums')
range_match('forwarding-class', 'jword')
range_match('fragment-offset', 'port')
range_match('icmp-code', 'icmp_code')
range_match('icmp-type', 'icmp_type')
range_match('interface-group', 'digits')
range_match('packet-length', 'digits')
range_match('port', 'port')
range_match('precedence', 'jword')
range_match('protocol', 'protocol')
range_match('source-mac-address', 'macaddr')
range_match('source-port', 'port')
range_match('vlan-ether-type', 'alphanums')
def handle_junos_acl(x):
"""
Parse JUNOS ACL and return an ACL object populated with Term and Policer
objects.
It's expected that x is a 2-tuple of (name, terms) returned from the
parser.
Don't forget to wrap your token in S()!
"""
a = ACL(name=x[0], format='junos')
for elt in x[1:]:
# Handle dictionary args we throw at the constructor
if isinstance(elt, dict):
a.__dict__.update(elt)
elif isinstance(elt, Term):
a.terms.append(elt)
elif isinstance(elt, Policer):
a.policers.append(elt)
else:
raise RuntimeError('Bad Object: %s' % repr(elt))
return a
def handle_junos_family_acl(x):
"""
Parses a JUNOS acl that contains family information and sets the family
attribute for the ACL object.
It's expected that x is a 2-tuple of (family, aclobj) returned from the
parser.
Don't forget to wrap your token in S()!
"""
family, aclobj = x
setattr(aclobj, 'family', family)
return aclobj
def handle_junos_policers(x):
"""Parse JUNOS policers and return a PolicerGroup object"""
p = PolicerGroup(format='junos')
for elt in x:
if isinstance(elt, Policer):
p.policers.append(elt)
else:
raise RuntimeError('bad object: %s in policer' % repr(elt))
return p
def handle_junos_term(d):
"""Parse a JUNOS term and return a Term object"""
if 'modifiers' in d:
d['modifiers'] = Modifiers(d['modifiers'])
return Term(**d)
#For multiline comments
def juniper_multiline_comments():
"""
Return appropriate multi-line comment grammar for Juniper ACLs.
This depends on ``settings.ALLOW_JUNIPER_MULTLIINE_COMMENTS``.
"""
single = '-("*/" / "\n")*' # single-line comments only
multi = '-"*/"*' # syntactically correct multi-line support
if settings.ALLOW_JUNIPER_MULTILINE_COMMENTS:
return multi
return single
rules.update({
'jword': 'double_quoted / word',
'double_quoted': ('"\\"", -[\\"]+, "\\""',
lambda x: QuotedString(x[1:-1])),
#'>jws<': '(ws / jcomment)+',
#S('jcomment'): ('"/*", ws?, jcomment_body, ws?, "*/"',
# lambda x: Comment(x[0])),
#'jcomment_body': '-(ws?, "*/")*',
'>jws<': '(ws / jcomment)+',
S('jcomment'): ('jslashbang_comment',
lambda x: Comment(x[0])),
'<comment_start>': '"/*"',
'<comment_stop>': '"*/"',
'>jslashbang_comment<': 'comment_start, jcomment_body, !%s, comment_stop' % errs['comm_stop'],
'jcomment_body': juniper_multiline_comments(),
# Errors on missing ';', ignores multiple ;; and normalizes to one.
'<jsemi>': 'jws?, [;]+!%s' % errs['semicolon'],
'fragment_flag': literals(fragment_flag_names),
'ip_option': "digits / " + literals(ip_option_names),
'tcp_flag': literals(tcp_flag_names),
})
# Note there cannot be jws (including comments) before or after the "filter"
# section of the config. It's wrong to do this anyway, since if you load
# that config onto the router, the comments will not remain in place on
# the next load of a similar config (e.g., another ACL). I had a workaround
# for this but it made the parser substantially slower.
rules.update({
S('junos_raw_acl'): ('jws?, "filter", jws, jword, jws?, ' + \
braced_list('junos_iface_specific / junos_term / junos_policer'),
handle_junos_acl),
'junos_iface_specific': ('("interface-specific", jsemi)',
lambda x: {'interface_specific': len(x) > 0}),
'junos_replace_acl': ('jws?, "firewall", jws?, "{", jws?, "replace:", jws?, (junos_raw_acl, jws?)*, "}"'),
S('junos_replace_family_acl'): ('jws?, "firewall", jws?, "{", jws?, junos_filter_family, jws?, "{", jws?, "replace:", jws?, (junos_raw_acl, jws?)*, "}", jws?, "}"',
handle_junos_family_acl),
S('junos_replace_policers'):('"firewall", jws?, "{", jws?, "replace:", jws?, (junos_policer, jws?)*, "}"',
handle_junos_policers),
'junos_filter_family': ('"family", ws, junos_family_type'),
'junos_family_type': ('"inet" / "inet6" / "ethernet-switching"'),
'opaque_braced_group': ('"{", jws?, (jword / "[" / "]" / ";" / '
'opaque_braced_group / jws)*, "}"',
lambda x: x),
S('junos_term'): ('maybe_inactive, "term", jws, junos_term_name, '
'jws?, ' + braced_list('junos_from / junos_then'),
lambda x: handle_junos_term(dict_sum(x))),
S('junos_term_name'): ('jword', lambda x: {'name': x[0]}),
'maybe_inactive': ('("inactive:", jws)?',
lambda x: {'inactive': len(x) > 0}),
S('junos_from'): ('"from", jws?, ' + braced_list('junos_match'),
lambda x: {'match': Matches(dict_sum(x))}),
S('junos_then'): ('junos_basic_then / junos_braced_then', dict_sum),
S('junos_braced_then'): ('"then", jws?, ' +
braced_list('junos_action/junos_modifier, jsemi'),
dict_sum),
S('junos_basic_then'): ('"then", jws?, junos_action, jsemi', dict_sum),
S('junos_policer'): ('"policer", jws, junos_term_name, jws?, ' +
braced_list('junos_exceeding / junos_policer_then'),
lambda x: Policer(x[0]['name'], x[1:])),
S('junos_policer_then'): ('"then", jws?, ' +
braced_list('junos_policer_action, jsemi')),
S('junos_policer_action'): ('junos_discard / junos_fwd_class / '\
'("loss-priority", jws, jword)',
lambda x: {'action':x}),
'junos_discard': ('"discard"'),
'junos_loss_pri': ('"loss-priority", jws, jword',
lambda x: {'loss-priority':x[0]}),
'junos_fwd_class': ('"forwarding-class", jws, jword',
lambda x: {'forwarding-class':x[0]}),
'junos_filter_specific': ('"filter-specific"'),
S('junos_exceeding'): ('"if-exceeding", jws?, ' +
braced_list('junos_bw_limit/junos_bw_perc/junos_burst_limit'),
lambda x: {'if-exceeding':x}),
S('junos_bw_limit'): ('"bandwidth-limit", jws, word, jsemi',
lambda x: ('bandwidth-limit',x[0])),
S('junos_bw_perc'): ('"bandwidth-percent", jws, alphanums, jsemi',
lambda x: ('bandwidth-percent',x[0])),
S('junos_burst_limit'): ('"burst-size-limit", jws, alphanums, jsemi',
lambda x: ('burst-size-limit',x[0])),
S('junos_match'): (' / '.join(junos_match_types), dict_sum),
S('junos_action'): ('junos_one_action / junos_reject_action /'
'junos_reject_action / junos_ri_action',
lambda x: {'action': x[0]}),
'junos_one_action': ('"accept" / "discard" / "reject" / '
'("next", jws, "term")'),
'junos_reject_action': ('"reject", jws, ' + literals(icmp_reject_codes),
lambda x: ('reject', x)),
S('junos_ri_action'): ('"routing-instance", jws, jword',
lambda x: ('routing-instance', x[0])),
S('junos_modifier'): ('junos_one_modifier / junos_arg_modifier',
lambda x: {'modifiers': x}),
'junos_one_modifier': ('"log" / "sample" / "syslog" / "port-mirror"',
lambda x: (x, True)),
S('junos_arg_modifier'): 'junos_arg_modifier_kw, jws, jword',
'junos_arg_modifier_kw': ('"count" / "forwarding-class" / "ipsec-sa" /'
'"loss-priority" / "policer"'),
})
|
|
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The main data collector module.
The data collector is deployed on every compute host and is executed
periodically to collect the CPU utilization data for each VM running
on the host and stores the data in the local file-based data store.
The data is stored as the average number of MHz consumed by a VM
during the last measurement interval. The CPU usage data are stored as
integers. This data format is portable: the stored values can be
converted to the CPU utilization for any host or VM type, supporting
heterogeneous hosts and VMs.
The actual data is obtained from Libvirt in the form of the CPU time
consumed by a VM to date. Using the CPU time collected at the previous
time frame, the CPU time for the past time interval is calculated.
According to the CPU frequency of the host and the length of the time
interval, the CPU time is converted into the required average MHz
consumed by the VM over the last time interval. The collected data are
stored both locally and submitted to the central database. The number
of the latest data values stored locally and passed to the underload /
overload detection and VM selection algorithms is defined using the
`data_collector_data_length` option in the configuration file.
At the beginning of every execution, the data collector obtains the
set of VMs currently running on the host using the Nova API and
compares them to the VMs running on the host at the previous time
step. If new VMs have been found, the data collector fetches the
historical data about them from the central database and stores the
data in the local file-based data store. If some VMs have been
removed, the data collector removes the data about these VMs from the
local data store.
The data collector stores the resource usage information locally in
files in the <local_data_directory>/vm directory, where
<local_data_directory> is defined in the configuration file using
the local_data_directory option. The data for each VM are stored in
a separate file named according to the UUID of the corresponding VM.
The format of the files is a new line separated list of integers
representing the average CPU consumption by the VMs in MHz during the
last measurement interval.
The data collector will be implemented as a Linux daemon running in
the background and collecting data on the resource usage by VMs every
data_collector_interval seconds. When the data collection phase is
invoked, the component performs the following steps:
1. Read the names of the files from the <local_data_directory>/vm
directory to determine the list of VMs running on the host at the
last data collection.
2. Call the Nova API to obtain the list of VMs that are currently
active on the host.
3. Compare the old and new lists of VMs and determine the newly added
or removed VMs.
4. Delete the files from the <local_data_directory>/vm directory
corresponding to the VMs that have been removed from the host.
5. Fetch the latest data_collector_data_length data values from the
central database for each newly added VM using the database
connection information specified in the sql_connection option and
save the data in the <local_data_directory>/vm directory.
6. Call the Libvirt API to obtain the CPU time for each VM active on
the host.
7. Transform the data obtained from the Libvirt API into the average
MHz according to the frequency of the host's CPU and time interval
from the previous data collection.
8. Store the converted data in the <local_data_directory>/vm
directory in separate files for each VM, and submit the data to the
central database.
9. Schedule the next execution after data_collector_interval
seconds.
"""
from contracts import contract
from neat.contracts_primitive import *
from neat.contracts_extra import *
import os
import time
from collections import deque
import libvirt
import neat.common as common
from neat.config import *
from neat.db_utils import *
import logging
log = logging.getLogger(__name__)
@contract
def start():
""" Start the data collector loop.
:return: The final state.
:rtype: dict(str: *)
"""
config = read_and_validate_config([DEFAILT_CONFIG_PATH, CONFIG_PATH],
REQUIRED_FIELDS)
common.init_logging(
config['log_directory'],
'data-collector.log',
int(config['log_level']))
vm_path = common.build_local_vm_path(config['local_data_directory'])
if not os.access(vm_path, os.F_OK):
os.makedirs(vm_path)
log.info('Created a local VM data directory: %s', vm_path)
else:
cleanup_all_local_data(config['local_data_directory'])
log.info('Creaned up the local data directory: %s',
config['local_data_directory'])
interval = config['data_collector_interval']
log.info('Starting the data collector, ' +
'iterations every %s seconds', interval)
return common.start(
init_state,
execute,
config,
int(interval))
@contract
def init_state(config):
""" Initialize a dict for storing the state of the data collector.
:param config: A config dictionary.
:type config: dict(str: *)
:return: A dict containing the initial state of the data collector.
:rtype: dict
"""
vir_connection = libvirt.openReadOnly(None)
if vir_connection is None:
message = 'Failed to open a connection to the hypervisor'
log.critical(message)
raise OSError(message)
hostname = vir_connection.getHostname()
host_cpu_mhz, host_ram = get_host_characteristics(vir_connection)
physical_cpus = common.physical_cpu_count(vir_connection)
host_cpu_usable_by_vms = float(config['host_cpu_usable_by_vms'])
db = init_db(config['sql_connection'])
db.update_host(hostname,
int(host_cpu_mhz * host_cpu_usable_by_vms),
physical_cpus,
host_ram)
return {'previous_time': 0.,
'previous_cpu_time': dict(),
'previous_cpu_mhz': dict(),
'previous_host_cpu_time_total': 0.,
'previous_host_cpu_time_busy': 0.,
'previous_overload': -1,
'vir_connection': vir_connection,
'hostname': hostname,
'host_cpu_overload_threshold':
float(config['host_cpu_overload_threshold']) * \
host_cpu_usable_by_vms,
'physical_cpus': physical_cpus,
'physical_cpu_mhz': host_cpu_mhz,
'physical_core_mhz': host_cpu_mhz / physical_cpus,
'db': db}
def execute(config, state):
""" Execute a data collection iteration.
1. Read the names of the files from the <local_data_directory>/vm
directory to determine the list of VMs running on the host at the
last data collection.
2. Call the Nova API to obtain the list of VMs that are currently
active on the host.
3. Compare the old and new lists of VMs and determine the newly added
or removed VMs.
4. Delete the files from the <local_data_directory>/vm directory
corresponding to the VMs that have been removed from the host.
5. Fetch the latest data_collector_data_length data values from the
central database for each newly added VM using the database
connection information specified in the sql_connection option and
save the data in the <local_data_directory>/vm directory.
6. Call the Libvirt API to obtain the CPU time for each VM active on
the host. Transform the data obtained from the Libvirt API into the
average MHz according to the frequency of the host's CPU and time
interval from the previous data collection.
8. Store the converted data in the <local_data_directory>/vm
directory in separate files for each VM, and submit the data to the
central database.
:param config: A config dictionary.
:type config: dict(str: *)
:param state: A state dictionary.
:type state: dict(str: *)
:return: The updated state dictionary.
:rtype: dict(str: *)
"""
log.info('Started an iteration')
vm_path = common.build_local_vm_path(config['local_data_directory'])
host_path = common.build_local_host_path(config['local_data_directory'])
data_length = int(config['data_collector_data_length'])
vms_previous = get_previous_vms(vm_path)
vms_current = get_current_vms(state['vir_connection'])
vms_added = get_added_vms(vms_previous, vms_current.keys())
added_vm_data = dict()
if vms_added:
if log.isEnabledFor(logging.DEBUG):
log.debug('Added VMs: %s', str(vms_added))
for i, vm in enumerate(vms_added):
if vms_current[vm] != libvirt.VIR_DOMAIN_RUNNING:
del vms_added[i]
del vms_current[vm]
if log.isEnabledFor(logging.DEBUG):
log.debug('Added VM %s skipped as migrating in', vm)
added_vm_data = fetch_remote_data(state['db'],
data_length,
vms_added)
if log.isEnabledFor(logging.DEBUG):
log.debug('Fetched remote data: %s', str(added_vm_data))
write_vm_data_locally(vm_path, added_vm_data, data_length)
vms_removed = get_removed_vms(vms_previous, vms_current.keys())
if vms_removed:
if log.isEnabledFor(logging.DEBUG):
log.debug('Removed VMs: %s', str(vms_removed))
cleanup_local_vm_data(vm_path, vms_removed)
for vm in vms_removed:
del state['previous_cpu_time'][vm]
del state['previous_cpu_mhz'][vm]
log.info('Started VM data collection')
current_time = time.time()
(cpu_time, cpu_mhz) = get_cpu_mhz(state['vir_connection'],
state['physical_core_mhz'],
state['previous_cpu_time'],
state['previous_time'],
current_time,
vms_current.keys(),
state['previous_cpu_mhz'],
added_vm_data)
log.info('Completed VM data collection')
log.info('Started host data collection')
(host_cpu_time_total,
host_cpu_time_busy,
host_cpu_mhz) = get_host_cpu_mhz(state['physical_cpu_mhz'],
state['previous_host_cpu_time_total'],
state['previous_host_cpu_time_busy'])
log.info('Completed host data collection')
if state['previous_time'] > 0:
append_vm_data_locally(vm_path, cpu_mhz, data_length)
append_vm_data_remotely(state['db'], cpu_mhz)
total_vms_cpu_mhz = sum(cpu_mhz.values())
host_cpu_mhz_hypervisor = host_cpu_mhz - total_vms_cpu_mhz
if host_cpu_mhz_hypervisor < 0:
host_cpu_mhz_hypervisor = 0
total_cpu_mhz = total_vms_cpu_mhz + host_cpu_mhz_hypervisor
append_host_data_locally(host_path, host_cpu_mhz_hypervisor, data_length)
append_host_data_remotely(state['db'],
state['hostname'],
host_cpu_mhz_hypervisor)
if log.isEnabledFor(logging.DEBUG):
log.debug('Collected VM CPU MHz: %s', str(cpu_mhz))
log.debug('Collected total VMs CPU MHz: %s', str(total_vms_cpu_mhz))
log.debug('Collected hypervisor CPU MHz: %s', str(host_cpu_mhz_hypervisor))
log.debug('Collected host CPU MHz: %s', str(host_cpu_mhz))
log.debug('Collected total CPU MHz: %s', str(total_cpu_mhz))
state['previous_overload'] = log_host_overload(
state['db'],
state['host_cpu_overload_threshold'],
state['hostname'],
state['previous_overload'],
state['physical_cpu_mhz'],
total_cpu_mhz)
state['previous_time'] = current_time
state['previous_cpu_time'] = cpu_time
state['previous_cpu_mhz'] = cpu_mhz
state['previous_host_cpu_time_total'] = host_cpu_time_total
state['previous_host_cpu_time_busy'] = host_cpu_time_busy
log.info('Completed an iteration')
return state
@contract
def get_previous_vms(path):
""" Get a list of VM UUIDs from the path.
:param path: A path to read VM UUIDs from.
:type path: str
:return: The list of VM UUIDs from the path.
:rtype: list(str)
"""
return os.listdir(path)
@contract()
def get_current_vms(vir_connection):
""" Get a dict of VM UUIDs to states from libvirt.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:return: The dict of VM UUIDs to states from libvirt.
:rtype: dict(str: int)
"""
vm_uuids = {}
for vm_id in vir_connection.listDomainsID():
try:
vm = vir_connection.lookupByID(vm_id)
vm_uuids[vm.UUIDString()] = vm.state(0)[0]
except libvirt.libvirtError:
pass
return vm_uuids
@contract
def get_added_vms(previous_vms, current_vms):
""" Get a list of newly added VM UUIDs.
:param previous_vms: A list of VMs at the previous time frame.
:type previous_vms: list(str)
:param current_vms: A list of VM at the current time frame.
:type current_vms: list(str)
:return: A list of VM UUIDs added since the last time frame.
:rtype: list(str)
"""
return substract_lists(current_vms, previous_vms)
@contract
def get_removed_vms(previous_vms, current_vms):
""" Get a list of VM UUIDs removed since the last time frame.
:param previous_vms: A list of VMs at the previous time frame.
:type previous_vms: list(str)
:param current_vms: A list of VM at the current time frame.
:type current_vms: list(str)
:return: A list of VM UUIDs removed since the last time frame.
:rtype: list(str)
"""
return substract_lists(previous_vms, current_vms)
@contract
def substract_lists(list1, list2):
""" Return the elements of list1 that are not in list2.
:param list1: The first list.
:type list1: list
:param list2: The second list.
:type list2: list
:return: The list of element of list 1 that are not in list2.
:rtype: list
"""
return list(set(list1).difference(list2))
@contract
def cleanup_local_vm_data(path, vms):
""" Delete the local data related to the removed VMs.
:param path: A path to remove VM data from.
:type path: str
:param vms: A list of removed VM UUIDs.
:type vms: list(str)
"""
for vm in vms:
os.remove(os.path.join(path, vm))
@contract
def cleanup_all_local_data(path):
""" Delete all the local data about VMs.
:param path: A path to the local data directory.
:type path: str
"""
vm_path = common.build_local_vm_path(path)
cleanup_local_vm_data(vm_path, os.listdir(vm_path))
host_path = common.build_local_host_path(path)
if os.access(host_path, os.F_OK):
os.remove(host_path)
@contract
def fetch_remote_data(db, data_length, uuids):
""" Fetch VM data from the central DB.
:param db: The database object.
:type db: Database
:param data_length: The length of data to fetch.
:type data_length: int
:param uuids: A list of VM UUIDs to fetch data for.
:type uuids: list(str)
:return: A dictionary of VM UUIDs and the corresponding data.
:rtype: dict(str : list(int))
"""
result = dict()
for uuid in uuids:
result[uuid] = db.select_cpu_mhz_for_vm(uuid, data_length)
return result
@contract
def write_vm_data_locally(path, data, data_length):
""" Write a set of CPU MHz values for a set of VMs.
:param path: A path to write the data to.
:type path: str
:param data: A map of VM UUIDs onto the corresponing CPU MHz history.
:type data: dict(str : list(int))
:param data_length: The maximum allowed length of the data.
:type data_length: int
"""
for uuid, values in data.items():
with open(os.path.join(path, uuid), 'w') as f:
if data_length > 0:
f.write('\n'.join([str(x)
for x in values[-data_length:]]) + '\n')
@contract
def append_vm_data_locally(path, data, data_length):
""" Write a CPU MHz value for each out of a set of VMs.
:param path: A path to write the data to.
:type path: str
:param data: A map of VM UUIDs onto the corresponing CPU MHz values.
:type data: dict(str : int)
:param data_length: The maximum allowed length of the data.
:type data_length: int
"""
for uuid, value in data.items():
vm_path = os.path.join(path, uuid)
if not os.access(vm_path, os.F_OK):
with open(vm_path, 'w') as f:
f.write(str(value) + '\n')
else:
with open(vm_path, 'r+') as f:
values = deque(f.read().strip().splitlines(), data_length)
values.append(value)
f.truncate(0)
f.seek(0)
f.write('\n'.join([str(x) for x in values]) + '\n')
@contract
def append_vm_data_remotely(db, data):
""" Submit CPU MHz values to the central database.
:param db: The database object.
:type db: Database
:param data: A map of VM UUIDs onto the corresponing CPU MHz values.
:type data: dict(str : int)
"""
db.insert_vm_cpu_mhz(data)
@contract
def append_host_data_locally(path, cpu_mhz, data_length):
""" Write a CPU MHz value for the host.
:param path: A path to write the data to.
:type path: str
:param cpu_mhz: A CPU MHz value.
:type cpu_mhz: int,>=0
:param data_length: The maximum allowed length of the data.
:type data_length: int
"""
if not os.access(path, os.F_OK):
with open(path, 'w') as f:
f.write(str(cpu_mhz) + '\n')
else:
with open(path, 'r+') as f:
values = deque(f.read().strip().splitlines(), data_length)
values.append(cpu_mhz)
f.truncate(0)
f.seek(0)
f.write('\n'.join([str(x) for x in values]) + '\n')
@contract
def append_host_data_remotely(db, hostname, host_cpu_mhz):
""" Submit a host CPU MHz value to the central database.
:param db: The database object.
:type db: Database
:param hostname: The host name.
:type hostname: str
:param host_cpu_mhz: An average host CPU utilization in MHz.
:type host_cpu_mhz: int,>=0
"""
db.insert_host_cpu_mhz(hostname, host_cpu_mhz)
@contract
def get_cpu_mhz(vir_connection, physical_core_mhz, previous_cpu_time,
previous_time, current_time, current_vms,
previous_cpu_mhz, added_vm_data):
""" Get the average CPU utilization in MHz for a set of VMs.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:param physical_core_mhz: The core frequency of the physical CPU in MHz.
:type physical_core_mhz: int
:param previous_cpu_time: A dict of previous CPU times for the VMs.
:type previous_cpu_time: dict(str : int)
:param previous_time: The previous timestamp.
:type previous_time: float
:param current_time: The previous timestamp.
:type current_time: float
:param current_vms: A list of VM UUIDs.
:type current_vms: list(str)
:param previous_cpu_mhz: A dict of VM UUIDs and previous CPU MHz.
:type previous_cpu_mhz: dict(str : int)
:param added_vm_data: A dict of VM UUIDs and the corresponding data.
:type added_vm_data: dict(str : list(int))
:return: The updated CPU times and average CPU utilization in MHz.
:rtype: tuple(dict(str : int), dict(str : int))
"""
previous_vms = previous_cpu_time.keys()
added_vms = get_added_vms(previous_vms, current_vms)
removed_vms = get_removed_vms(previous_vms, current_vms)
cpu_mhz = {}
for uuid in removed_vms:
del previous_cpu_time[uuid]
for uuid, cpu_time in previous_cpu_time.items():
current_cpu_time = get_cpu_time(vir_connection, uuid)
if current_cpu_time < cpu_time:
if log.isEnabledFor(logging.DEBUG):
log.debug('VM %s: current_cpu_time < cpu_time: ' +
'previous CPU time %d, ' +
'current CPU time %d',
uuid, cpu_time, current_cpu_time)
log.debug('VM %s: using previous CPU MHz %d',
uuid, previous_cpu_mhz[uuid])
cpu_mhz[uuid] = previous_cpu_mhz[uuid]
else:
if log.isEnabledFor(logging.DEBUG):
log.debug('VM %s: previous CPU time %d, ' +
'current CPU time %d, ' +
'previous time %.10f, ' +
'current time %.10f',
uuid, cpu_time, current_cpu_time,
previous_time, current_time)
cpu_mhz[uuid] = calculate_cpu_mhz(physical_core_mhz,
previous_time,
current_time,
cpu_time,
current_cpu_time)
previous_cpu_time[uuid] = current_cpu_time
if log.isEnabledFor(logging.DEBUG):
log.debug('VM %s: CPU MHz %d', uuid, cpu_mhz[uuid])
for uuid in added_vms:
if added_vm_data[uuid]:
cpu_mhz[uuid] = added_vm_data[uuid][-1]
previous_cpu_time[uuid] = get_cpu_time(vir_connection, uuid)
return previous_cpu_time, cpu_mhz
@contract
def get_cpu_time(vir_connection, uuid):
""" Get the CPU time of a VM specified by the UUID using libvirt.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:param uuid: The UUID of a VM.
:type uuid: str[36]
:return: The CPU time of the VM.
:rtype: int,>=0
"""
try:
domain = vir_connection.lookupByUUIDString(uuid)
return int(domain.getCPUStats(True, 0)[0]['cpu_time'])
except libvirt.libvirtError:
return 0
@contract
def calculate_cpu_mhz(cpu_mhz, previous_time, current_time,
previous_cpu_time, current_cpu_time):
""" Calculate the average CPU utilization in MHz for a period of time.
:param cpu_mhz: The frequency of a core of the physical CPU in MHz.
:type cpu_mhz: int
:param previous_time: The previous time.
:type previous_time: float
:param current_time: The current time.
:type current_time: float
:param previous_cpu_time: The previous CPU time of the domain.
:type previous_cpu_time: int
:param current_cpu_time: The current CPU time of the domain.
:type current_cpu_time: int
:return: The average CPU utilization in MHz.
:rtype: int,>=0
"""
return int(cpu_mhz * float(current_cpu_time - previous_cpu_time) / \
((current_time - previous_time) * 1000000000))
@contract
def get_host_cpu_mhz(cpu_mhz, previous_cpu_time_total, previous_cpu_time_busy):
""" Get the average CPU utilization in MHz for a set of VMs.
:param cpu_mhz: The total frequency of the physical CPU in MHz.
:type cpu_mhz: int
:param previous_cpu_time_total: The previous total CPU time.
:type previous_cpu_time_total: float
:param previous_cpu_time_busy: The previous busy CPU time.
:type previous_cpu_time_busy: float
:return: The current total and busy CPU time, and CPU utilization in MHz.
:rtype: tuple(float, float, int)
"""
cpu_time_total, cpu_time_busy = get_host_cpu_time()
cpu_usage = int(cpu_mhz * (cpu_time_busy - previous_cpu_time_busy) / \
(cpu_time_total - previous_cpu_time_total))
if cpu_usage < 0:
raise ValueError('The host CPU usage in MHz must be >=0, but it is: ' + str(cpu_usage) +
'; cpu_mhz=' + str(cpu_mhz) +
'; previous_cpu_time_total=' + str(previous_cpu_time_total) +
'; cpu_time_total=' + str(cpu_time_total) +
'; previous_cpu_time_busy=' + str(previous_cpu_time_busy) +
'; cpu_time_busy=' + str(cpu_time_busy))
return cpu_time_total, \
cpu_time_busy, \
cpu_usage
@contract()
def get_host_cpu_time():
""" Get the total and busy CPU time of the host.
:return: A tuple of the total and busy CPU time.
:rtype: tuple(float, float)
"""
with open('/proc/stat', 'r') as f:
values = [float(x) for x in f.readline().split()[1:8]]
return sum(values), sum(values[0:3])
@contract()
def get_host_characteristics(vir_connection):
""" Get the total CPU MHz and RAM of the host.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:return: A tuple of the total CPU MHz and RAM of the host.
:rtype: tuple(int, long)
"""
info = vir_connection.getInfo()
return info[2] * info[3], info[1]
@contract()
def log_host_overload(db, overload_threshold, hostname, previous_overload,
host_total_mhz, host_utilization_mhz):
""" Log to the DB whether the host is overloaded.
:param db: The database object.
:type db: Database
:param overload_threshold: The host overload threshold.
:type overload_threshold: float
:param hostname: The host name.
:type hostname: str
:param previous_overload: Whether the host has been overloaded.
:type previous_overload: int
:param host_total_mhz: The total frequency of the CPU in MHz.
:type host_total_mhz: int
:param host_utilization_mhz: The total CPU utilization in MHz.
:type host_utilization_mhz: int
:return: Whether the host is overloaded.
:rtype: int
"""
overload = overload_threshold * host_total_mhz < host_utilization_mhz
overload_int = int(overload)
if previous_overload != -1 and previous_overload != overload_int \
or previous_overload == -1:
db.insert_host_overload(hostname, overload)
if log.isEnabledFor(logging.DEBUG):
log.debug('Overload state logged: %s', str(overload))
return overload_int
|
|
import math
import nltk
import time
import sys
# Constants to be used by you when you fill the functions
START_SYMBOL = '*'
STOP_SYMBOL = 'STOP'
MINUS_INFINITY_SENTENCE_LOG_PROB = -1000
log2 = lambda x: math.log(x, 2)
# TODO: IMPLEMENT THIS FUNCTION
# Calculates unigram, bigram, and trigram probabilities given a training corpus
# training_corpus: is a list of the sentences. Each sentence is a string with tokens separated by spaces, ending in a newline character.
# This function outputs three python dictionaries, where the keys are
# tuples expressing the ngram and the value is the log probability of that
# ngram
def calc_probabilities(training_corpus):
"""
this is docstring
"""
# unigram_tuples = []
# bigram_tuples = []
# trigram_tuples = []
unigram_count = {}
bigram_count = {}
trigram_count = {}
unigram_count_pnodes = {}
bigram_count_pnodes = {}
trigram_count_pnodes = {}
unigram_total = 0
bigram_total = 0
trigram_total = 0
print 'total {} sentences'.format(len(training_corpus))
for i in xrange(0, len(training_corpus)):
if i % 3000 == 0:
print 'processing ', i, 'th sentence...'
training_corpus[i] = START_SYMBOL + ' ' + training_corpus[i]
training_corpus[i] = training_corpus[i] + ' ' + STOP_SYMBOL
# training_corpus[i].replace('.',' ' + STOP_SYMBOL)
tokens = training_corpus[i].split()
unigram_tuples_i = list((token,) for token in tokens)
bigram_tuples_i = list(nltk.bigrams(tokens))
trigram_tuples_i = list(nltk.trigrams(tokens))
unigram_total += len(unigram_tuples_i)
bigram_total += len(bigram_tuples_i)
trigram_total += len(trigram_tuples_i)
for item in unigram_tuples_i:
if item in [(START_SYMBOL,)]:
continue
unigram_count.setdefault(item, 0)
unigram_count_pnodes.setdefault(item[0:-1], 0)
unigram_count[item] = unigram_count[item] + 1
unigram_count_pnodes[
item[0:-1]] = unigram_count_pnodes[item[0:-1]] + 1
for item in bigram_tuples_i:
bigram_count.setdefault(item, 0)
bigram_count_pnodes.setdefault(item[0:-1], 0)
bigram_count[item] = bigram_count[item] + 1
bigram_count_pnodes[
item[0:-1]] = bigram_count_pnodes[item[0:-1]] + 1
for item in trigram_tuples_i:
trigram_count.setdefault(item, 0)
trigram_count_pnodes.setdefault(item[0:-1], 0)
trigram_count[item] = trigram_count[item] + 1
trigram_count_pnodes[
item[0:-1]] = trigram_count_pnodes[item[0:-1]] + 1
unigram_p = {
item: math.log(
unigram_count[item],
2) -
math.log(
unigram_count_pnodes[
item[
0:-
1]],
2) for item in set(unigram_count)}
bigram_p = {
item: math.log(
bigram_count[item],
2) -
math.log(
bigram_count_pnodes[
item[
0:-
1]],
2) for item in set(bigram_count)}
trigram_p = {
item: math.log(
trigram_count[item],
2) -
math.log(
trigram_count_pnodes[
item[
0:-
1]],
2) for item in set(trigram_count)}
print "calc_probabilities finished!"
return unigram_p, bigram_p, trigram_p
# Prints the output for q1
# Each input is a python dictionary where keys are a tuple expressing the
# ngram, and the value is the log probability of that ngram
def q1_output(unigrams, bigrams, trigrams, filename):
# output probabilities
outfile = open(filename, 'w')
unigrams_keys = sorted(unigrams.keys())
for unigram in unigrams_keys:
outfile.write('UNIGRAM ' +
unigram[0] +
' ' +
str(unigrams[unigram]) +
'\n')
outfile.flush()
bigrams_keys = sorted(bigrams.keys())
for bigram in bigrams_keys:
outfile.write('BIGRAM ' +
bigram[0] +
' ' +
bigram[1] +
' ' +
str(bigrams[bigram]) +
'\n')
outfile.flush()
trigrams_keys = sorted(trigrams.keys())
for trigram in trigrams_keys:
outfile.write('TRIGRAM ' +
trigram[0] +
' ' +
trigram[1] +
' ' +
trigram[2] +
' ' +
str(trigrams[trigram]) +
'\n')
outfile.flush()
outfile.close()
# TODO: IMPLEMENT THIS FUNCTION
# Calculates scores (log probabilities) for every sentence
# ngram_p: python dictionary of probabilities of uni-, bi- and trigrams.
# n: size of the ngram you want to use to compute probabilities
# corpus: list of sentences to score. Each sentence is a string with tokens separated by spaces, ending in a newline character.
# This function must return a python list of scores, where the first
# element is the score of the first sentence, etc.
def score(ngram_p, n, corpus):
print "scoring corpus for ", n, "-grams"
scores = []
for i, sentence in enumerate(corpus):
ngram_tuples = None
score_i = 0
if i % 10000 == 0:
print 'scoring ', i, 'th sentence...'
tokens = sentence.split()
if n == 1:
ngram_tuples = list([(token,) for token in tokens])
elif n == 2:
ngram_tuples = list(nltk.bigrams(tokens))
elif n == 3:
ngram_tuples = list(nltk.trigrams(tokens))
try:
score_i = sum([ngram_p[gram] for gram in ngram_tuples
if gram not in [(START_SYMBOL,)]])
except KeyError as error:
score_i = MINUS_INFINITY_SENTENCE_LOG_PROB
print 'ngram_tuple ', gram, ' not in dict ', error.message
scores.append(score_i)
return scores
# Outputs a score to a file
# scores: list of scores
# filename: is the output file name
def score_output(scores, filename):
outfile = open(filename, 'w')
for score in scores:
outfile.write(str(score) + '\n')
outfile.close()
# TODO: IMPLEMENT THIS FUNCTION
# Calculates scores (log probabilities) for every sentence with a linearly interpolated model
# Each ngram argument is a python dictionary where the keys are tuples that express an ngram and the value is the log probability of that ngram
# Like score(), this function returns a python list of scores
# TODO: `EM` algorithm to find the optimal weights.
def linearscore(unigrams, bigrams, trigrams, corpus):
scores = []
weights = (1. / 3, 1. / 3, 1. / 3,)
for i, sentence in enumerate(corpus):
if i % 3000 == 0:
print 'linearscore ', i, 'th sentence...'
score_i = 0
tokens = sentence.split()
trigram_tuples = list(nltk.trigrams(tokens))
try:
for trigram in trigram_tuples:
score_i += log2(sum([weights[0] * 2 ** trigrams[trigram[0:]],
weights[1] * 2 ** bigrams[trigram[1:]],
weights[2] * 2 ** unigrams[trigram[2:]],
]))
except KeyError as e:
score_i = MINUS_INFINITY_SENTENCE_LOG_PROB
print i, 'th sentence', 'ngram ', trigram, ' not in dict', e.message
scores.append(score_i)
return scores
DATA_PATH = 'data/'
OUTPUT_PATH = 'output/'
# DO NOT MODIFY THE MAIN FUNCTION
def main():
# start timer
time.clock()
# get data
infile = open(DATA_PATH + 'Brown_train.txt', 'r')
corpus = infile.readlines()
infile.close()
# calculate ngram probabilities (question 1)
unigrams, bigrams, trigrams = calc_probabilities(corpus)
# question 1 output
q1_output(unigrams, bigrams, trigrams, OUTPUT_PATH + 'A1.txt')
# score sentences (question 2)
uniscores = score(unigrams, 1, corpus)
biscores = score(bigrams, 2, corpus)
triscores = score(trigrams, 3, corpus)
# question 2 output
score_output(uniscores, OUTPUT_PATH + 'A2.uni.txt')
score_output(biscores, OUTPUT_PATH + 'A2.bi.txt')
score_output(triscores, OUTPUT_PATH + 'A2.tri.txt')
# linear interpolation (question 3)
linearscores = linearscore(unigrams, bigrams, trigrams, corpus)
# question 3 output
score_output(linearscores, OUTPUT_PATH + 'A3.txt')
# open Sample1 and Sample2 (question 5)
infile = open(DATA_PATH + 'Sample1.txt', 'r')
sample1 = infile.readlines()
infile.close()
infile = open(DATA_PATH + 'Sample2.txt', 'r')
sample2 = infile.readlines()
infile.close()
# score the samples
sample1scores = linearscore(unigrams, bigrams, trigrams, sample1)
sample2scores = linearscore(unigrams, bigrams, trigrams, sample2)
# question 5 output
score_output(sample1scores, OUTPUT_PATH + 'Sample1_scored.txt')
score_output(sample2scores, OUTPUT_PATH + 'Sample2_scored.txt')
# print total time to run Part A
print("Part A time: " + str(time.clock()) + ' sec')
if __name__ == "__main__":
main()
|
|
"""This module contains the general information for StorageLocalDisk ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class StorageLocalDiskConsts:
ADMIN_ACTION_DISABLE_SED_FOREIGN_DRIVES = "disable-sed-foreign-drives"
ADMIN_ACTION_DISABLE_SELF_ENCRYPT = "disable-self-encrypt"
ADMIN_ACTION_ENABLE_SELF_ENCRYPT = "enable-self-encrypt"
ADMIN_ACTION_LOCATOR_LED_OFF = "locator-led-off"
ADMIN_ACTION_LOCATOR_LED_ON = "locator-led-on"
ADMIN_ACTION_MAKE_DEDICATED_HOT_SPARE = "make-dedicated-hot-spare"
ADMIN_ACTION_MAKE_GLOBAL_HOT_SPARE = "make-global-hot-spare"
ADMIN_ACTION_MAKE_JBOD = "make-jbod"
ADMIN_ACTION_MAKE_UNCONFIGURED_GOOD = "make-unconfigured-good"
ADMIN_ACTION_PREPARE_FOR_REMOVAL = "prepare-for-removal"
ADMIN_ACTION_REMOVE_HOT_SPARE = "remove-hot-spare"
ADMIN_ACTION_SET_BOOT_DRIVE = "set-boot-drive"
ADMIN_ACTION_UNDO_PREPARE_FOR_REMOVAL = "undo-prepare-for-removal"
DEDICATED_HOT_SPARE_FOR_VDID_ = ""
LOCATOR_LEDSTATUS_OFF = "off"
LOCATOR_LEDSTATUS_ON = "on"
class StorageLocalDisk(ManagedObject):
"""This is StorageLocalDisk class."""
consts = StorageLocalDiskConsts()
naming_props = set([u'id'])
mo_meta = {
"classic": MoMeta("StorageLocalDisk", "storageLocalDisk", "pd-[id]", VersionMeta.Version151f, "InputOutput", 0x7f, [], ["admin", "read-only", "user"], [u'storageController'], [u'faultInst', u'storageLocalDiskProps', u'storageOperation'], ["Get", "Set"]),
"modular": MoMeta("StorageLocalDisk", "storageLocalDisk", "pd-[id]", VersionMeta.Version2013e, "InputOutput", 0x7f, [], ["admin", "read-only", "user"], [u'storageController'], [u'faultInst', u'storageLocalDiskProps', u'storageOperation'], ["Get", "Set"])
}
prop_meta = {
"classic": {
"admin_action": MoPropertyMeta("admin_action", "adminAction", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x2, 0, 510, None, ["disable-sed-foreign-drives", "disable-self-encrypt", "enable-self-encrypt", "locator-led-off", "locator-led-on", "make-dedicated-hot-spare", "make-global-hot-spare", "make-jbod", "make-unconfigured-good", "prepare-for-removal", "remove-hot-spare", "set-boot-drive", "undo-prepare-for-removal"], []),
"dedicated_hot_spare_for_vd_id": MoPropertyMeta("dedicated_hot_spare_for_vd_id", "dedicatedHotSpareForVDId", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, [""], ["0-4294967295"]),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version151f, MoPropertyMeta.NAMING, 0x10, 0, 510, None, [], ["0-256"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"coerced_size": MoPropertyMeta("coerced_size", "coercedSize", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"drive_firmware": MoPropertyMeta("drive_firmware", "driveFirmware", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"drive_serial_number": MoPropertyMeta("drive_serial_number", "driveSerialNumber", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"drive_state": MoPropertyMeta("drive_state", "driveState", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"enclosure_association": MoPropertyMeta("enclosure_association", "enclosureAssociation", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"enclosure_logical_id": MoPropertyMeta("enclosure_logical_id", "enclosureLogicalId", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"enclosure_sas_address0": MoPropertyMeta("enclosure_sas_address0", "enclosureSASAddress0", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"enclosure_sas_address1": MoPropertyMeta("enclosure_sas_address1", "enclosureSASAddress1", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"fde_capable": MoPropertyMeta("fde_capable", "fdeCapable", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"fde_enabled": MoPropertyMeta("fde_enabled", "fdeEnabled", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"foreign_locked": MoPropertyMeta("foreign_locked", "foreignLocked", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"health": MoPropertyMeta("health", "health", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"interface_type": MoPropertyMeta("interface_type", "interfaceType", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"link_speed": MoPropertyMeta("link_speed", "linkSpeed", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"locator_led_status": MoPropertyMeta("locator_led_status", "locatorLEDStatus", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["off", "on"], []),
"locked": MoPropertyMeta("locked", "locked", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"media_type": MoPropertyMeta("media_type", "mediaType", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"online": MoPropertyMeta("online", "online", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"pd_status": MoPropertyMeta("pd_status", "pdStatus", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"predictive_failure_count": MoPropertyMeta("predictive_failure_count", "predictiveFailureCount", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"product_id": MoPropertyMeta("product_id", "productId", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"secured": MoPropertyMeta("secured", "secured", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
"modular": {
"admin_action": MoPropertyMeta("admin_action", "adminAction", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 510, None, ["disable-sed-foreign-drives", "disable-self-encrypt", "enable-self-encrypt", "locator-led-off", "locator-led-on", "make-dedicated-hot-spare", "make-global-hot-spare", "make-jbod", "make-unconfigured-good", "prepare-for-removal", "remove-hot-spare", "set-boot-drive", "undo-prepare-for-removal"], []),
"dedicated_hot_spare_for_vd_id": MoPropertyMeta("dedicated_hot_spare_for_vd_id", "dedicatedHotSpareForVDId", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 510, None, [], ["0-4294967295"]),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version2013e, MoPropertyMeta.NAMING, 0x10, 0, 510, None, [], ["0-256"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"coerced_size": MoPropertyMeta("coerced_size", "coercedSize", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"drive_firmware": MoPropertyMeta("drive_firmware", "driveFirmware", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"drive_serial_number": MoPropertyMeta("drive_serial_number", "driveSerialNumber", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"drive_state": MoPropertyMeta("drive_state", "driveState", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"enclosure_association": MoPropertyMeta("enclosure_association", "enclosureAssociation", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"enclosure_logical_id": MoPropertyMeta("enclosure_logical_id", "enclosureLogicalId", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"enclosure_sas_address0": MoPropertyMeta("enclosure_sas_address0", "enclosureSASAddress0", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"enclosure_sas_address1": MoPropertyMeta("enclosure_sas_address1", "enclosureSASAddress1", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"fde_capable": MoPropertyMeta("fde_capable", "fdeCapable", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "no", "yes"], []),
"fde_enabled": MoPropertyMeta("fde_enabled", "fdeEnabled", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "no", "yes"], []),
"foreign_locked": MoPropertyMeta("foreign_locked", "foreignLocked", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "no", "yes"], []),
"health": MoPropertyMeta("health", "health", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"interface_type": MoPropertyMeta("interface_type", "interfaceType", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"link_speed": MoPropertyMeta("link_speed", "linkSpeed", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"locked": MoPropertyMeta("locked", "locked", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "no", "yes"], []),
"media_type": MoPropertyMeta("media_type", "mediaType", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"online": MoPropertyMeta("online", "online", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"pd_status": MoPropertyMeta("pd_status", "pdStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"predictive_failure_count": MoPropertyMeta("predictive_failure_count", "predictiveFailureCount", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"product_id": MoPropertyMeta("product_id", "productId", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"secured": MoPropertyMeta("secured", "secured", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "no", "yes"], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
}
prop_map = {
"classic": {
"adminAction": "admin_action",
"dedicatedHotSpareForVDId": "dedicated_hot_spare_for_vd_id",
"dn": "dn",
"id": "id",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"coercedSize": "coerced_size",
"driveFirmware": "drive_firmware",
"driveSerialNumber": "drive_serial_number",
"driveState": "drive_state",
"enclosureAssociation": "enclosure_association",
"enclosureLogicalId": "enclosure_logical_id",
"enclosureSASAddress0": "enclosure_sas_address0",
"enclosureSASAddress1": "enclosure_sas_address1",
"fdeCapable": "fde_capable",
"fdeEnabled": "fde_enabled",
"foreignLocked": "foreign_locked",
"health": "health",
"interfaceType": "interface_type",
"linkSpeed": "link_speed",
"locatorLEDStatus": "locator_led_status",
"locked": "locked",
"mediaType": "media_type",
"online": "online",
"pdStatus": "pd_status",
"predictiveFailureCount": "predictive_failure_count",
"productId": "product_id",
"secured": "secured",
"vendor": "vendor",
},
"modular": {
"adminAction": "admin_action",
"dedicatedHotSpareForVDId": "dedicated_hot_spare_for_vd_id",
"dn": "dn",
"id": "id",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"coercedSize": "coerced_size",
"driveFirmware": "drive_firmware",
"driveSerialNumber": "drive_serial_number",
"driveState": "drive_state",
"enclosureAssociation": "enclosure_association",
"enclosureLogicalId": "enclosure_logical_id",
"enclosureSASAddress0": "enclosure_sas_address0",
"enclosureSASAddress1": "enclosure_sas_address1",
"fdeCapable": "fde_capable",
"fdeEnabled": "fde_enabled",
"foreignLocked": "foreign_locked",
"health": "health",
"interfaceType": "interface_type",
"linkSpeed": "link_speed",
"locked": "locked",
"mediaType": "media_type",
"online": "online",
"pdStatus": "pd_status",
"predictiveFailureCount": "predictive_failure_count",
"productId": "product_id",
"secured": "secured",
"vendor": "vendor",
},
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.admin_action = None
self.dedicated_hot_spare_for_vd_id = None
self.status = None
self.child_action = None
self.coerced_size = None
self.drive_firmware = None
self.drive_serial_number = None
self.drive_state = None
self.enclosure_association = None
self.enclosure_logical_id = None
self.enclosure_sas_address0 = None
self.enclosure_sas_address1 = None
self.fde_capable = None
self.fde_enabled = None
self.foreign_locked = None
self.health = None
self.interface_type = None
self.link_speed = None
self.locator_led_status = None
self.locked = None
self.media_type = None
self.online = None
self.pd_status = None
self.predictive_failure_count = None
self.product_id = None
self.secured = None
self.vendor = None
ManagedObject.__init__(self, "StorageLocalDisk", parent_mo_or_dn, **kwargs)
|
|
import model as Model
import string
import math
import sys
###################################################################################################
def solve(problem):
nodes = nodes_by_max_size(problem)
computers = computers_by_capacity(problem)
allocation = Model.Allocation()
#--------------------------------------------------------------------------------------
# First step: allocate weakest setting of nodes with residence constraint (otherwise I could allocate other nodes and not leave any space for res constraints)
for node in nodes:
settings = sorted(node.settings, key=lambda setting: setting.size, reverse=False)
setting = settings[0]
if setting.residency != []:
computer = setting.residency[0]
if feasible(setting, computer, allocation, problem):
allocation.assign(setting,computer)
else:
print '\nTHERE IS NO SOLUTION,', node.id , 'cannot be allocated - residence\n'
sys.exit(0)
# Second step: allocate weakest setting of nodes with coresidence constraint
# (if one node with cores is allocated in the robot, I could allocate other nodes and not leave any space for other nodes with cores)
for node in nodes:
if not allocation.which_setting(node):
settings = sorted(node.settings, key=lambda setting: setting.size, reverse=False)
setting = settings[0]
if setting.node.coresidence != []:
done= False
for computer in computers:
if not allocation.which_setting(node):
if feasible_coRes(setting, computer, allocation, problem):
allocation.assign(setting,computer)
done= True
break
if done == False:
print '\nTHERE IS NO SOLUTION,', node.id , 'cannot be allocated - coresidence\n'
sys.exit(0)
# Third step: allocate weakest setting of other nodes
for node in nodes:
if not allocation.which_setting(node):
settings = sorted(node.settings, key=lambda setting: setting.size, reverse=False)
setting = settings[0]
done= False
i=0
for computer in computers:
i+=1
if not allocation.which_setting(node):
if feasible(setting, computer, allocation, problem):
allocation.assign(setting,computer)
done= True
break
if done == False:
print '\nTHERE IS NO SOLUTION,', node.id , 'cannot be allocated - other\n'
sys.exit(0)
#--------------------------------------------------------------------------------------
#Fourth step: replace allocated setting by a more powerful one, when possible (CPU increment = 1)
gradients = {}
gradients_aux = {}
for node in nodes:
settings = sorted(node.settings, key=lambda setting: setting.size, reverse=False)
if len(settings) > 1 and settings[0].size < settings[1].size:
gradients[node.id] = node.ratio
# We increase nodes by ratio
upgrade_nodes(allocation, problem, computers, gradients)
#--------------------------------------------------------------------------------------
# Move nodes that didn'node reach their limit from full computers to others with space
for node in nodes:
settings = sorted(node.settings, key=lambda setting: setting.size, reverse=False)
computer = allocation.computer_for_node(node)
if allocation.computer_util(computer) == computer.capacity:
if len(settings) > 1 and settings[0].size < settings[1].size and settings[0].residency == []:
gradients[node.id] = node.ratio
while len(gradients) > 0:
max_node = max(gradients, key=gradients.get)
node = problem.nodes[max_node]
computer = allocation.computer_for_node(node)
settings = sorted(node.settings, key=lambda setting: setting.v_index, reverse=True)
for computer_new in computers:
if computer != computer_new and feasible(settings[1], computer_new, allocation, problem):
allocation.assign(settings[0],computer_new)
for tcr in node.coresidence:
if feasible(tcr.settings[0], computer_new, allocation, problem):
allocation.assign(tcr.settings[0],computer_new)
else:
print '\nTHERE IS NO SOLUTION,', node.id , 'cannot be allocated - coresidence server full B\n'
sys.exit(0)
gradients_aux[node.id] = node.ratio
upgrade_nodes(allocation, problem, computers, gradients_aux)
break
del gradients[max_node]
#------------
for node in nodes:
settings = sorted(node.settings, key=lambda setting: setting.size, reverse=False)
if len(settings) > 1 and settings[0].size < settings[1].size:
gradients[node.id] = node.ratio
upgrade_nodes(allocation, problem, computers, gradients)
#--------------------------------------------------------------------------------------
# Move any node without parameters nor coRes constraint from full computers to others with space
for node in nodes:
settings = sorted(node.settings, key=lambda setting: setting.size, reverse=False)
computer = allocation.computer_for_node(node)
if allocation.computer_util(computer) == computer.capacity:
if len(settings) == 1 and settings[0].residency == [] and settings[0].node.coresidence == []:
setting = settings[0]
for computer_new in computers:
if computer != computer_new and feasible(setting, computer_new, allocation, problem):
allocation.assign(setting,computer_new)
break
for node in nodes:
settings = sorted(node.settings, key=lambda setting: setting.size, reverse=False)
if len(settings) > 1 and settings[0].size < settings[1].size:
gradients[node.id] = node.ratio
if gradients != {}:
upgrade_nodes(allocation, problem, computers, gradients)
#------------
# Move any node, reaching or not their limit, from full computers to others with space
for node in nodes:
settings = sorted(node.settings, key=lambda setting: setting.size, reverse=False)
computer = allocation.computer_for_node(node)
if allocation.computer_util(computer) == computer.capacity:
if len(settings) > 1 and settings[0].residency == []:
gradients[node.id] = node.ratio
while len(gradients) > 0:
max_node = min(gradients, key=gradients.get)
node = problem.nodes[max_node]
computer = allocation.computer_for_node(node)
settings = sorted(node.settings, key=lambda setting: setting.v_index, reverse=True)
for computer_new in computers:
if computer != computer_new and feasible(settings[0], computer_new, allocation, problem):
allocation.assign(settings[0],computer_new)
for tcr in node.coresidence:
if feasible(tcr.settings[0], computer_new, allocation, problem):
allocation.assign(tcr.settings[0],computer_new)
else:
print '\nTHERE IS NO SOLUTION,', node.id , 'cannot be allocated - coresidence server full B\n'
sys.exit(0)
gradients_aux[node.id] = node.ratio
upgrade_nodes(allocation, problem, computers, gradients_aux)
break
del gradients[max_node]
#------------
for node in nodes:
settings = sorted(node.settings, key=lambda setting: setting.size, reverse=False)
if len(settings) > 1 and settings[0].size < settings[1].size:
#print node.id, node.ratio
gradients[node.id] = node.ratio
#print gradients
if gradients != {}:
upgrade_nodes(allocation, problem, computers, gradients)
#--------------------------------------------------------------------------------------
print
for computer in computers:
print 'Computer ', computer.id, ' (capacity, used):', computer.capacity, ',', allocation.computer_util(computer)
print 'Performance:', allocation.perf()
print
return allocation
###################################################################################################
# Increments all the units possible per node by gradient value
def upgrade_nodes(allocation, problem, computers, gradients):
while len(gradients)!=0:
max_node = max(gradients, key=gradients.get)
node = problem.nodes[max_node]
settings = sorted(node.settings, key=lambda setting: setting.v_index, reverse=True)
computer = allocation.computer_for_node(node)
if settings[0].size+1 <= settings[1].size:
if allocation.computer_util(computer)+1 <= computer.capacity:
old_perf = settings[0].perf
settings[0].size+=1
y=float(settings[0].size)/100
formula = string.replace(node.formula, 'x', 'y')
new_perf = round(eval(formula), 2)
settings[0].perf=new_perf
node.ratio = round(new_perf-old_perf, 2)
gradients[node.id] = node.ratio
else:
del gradients[max_node]
else:
del gradients[max_node]
###################################################################################################
def nodes_by_max_size(problem):
return sorted(problem.nodes.values(), key=lambda node: max([setting.size for setting in node.settings]), reverse=True)
###################################################################################################
def computers_by_capacity(problem):
return sorted(problem.computers.values(), key=lambda computer: computer.id, reverse=True)
###################################################################################################
def feasible(setting, computer, allocation, problem):
# Don't overload
if setting.size + allocation.computer_util(computer) > computer.capacity:
return False
# Bandwidth
tmp_allocation = allocation.clone()
tmp_allocation.assign(setting, computer)
for link in problem.links.values():
if tmp_allocation.bandwidth_util(link) > link.bandwidth:
return False
return True
###################################################################################################
def feasible_coRes(setting, computer, allocation, problem):
# Coresidency
for other_node in setting.node.coresidence:
if allocation.which_setting(other_node):
if allocation.computer_for_node(other_node) != computer:
return False
# Don't overload
if setting.size + allocation.computer_util(computer) > computer.capacity:
return False
# Bandwidth
tmp_allocation = allocation.clone()
tmp_allocation.assign(setting, computer)
for link in problem.links.values():
if tmp_allocation.bandwidth_util(link) > link.bandwidth:
return False
return True
|
|
from main import db
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
coauthors = db.Table('coauthor',
db.Column('author_id', db.Integer,
db.ForeignKey('author.id')),
db.Column('coauthor_id', db.Integer,
db.ForeignKey('author.id'))
)
author_publications = db.Table('author_publication',
db.Column('author_id', db.Integer,
db.ForeignKey('author.id')),
db.Column('publication_id', db.Integer,
db.ForeignKey('publication.id'))
)
class Author(db.Model):
"""
A class that represents authors.
"""
__tablename__ = 'author'
"""
The name of the table where authors are stored.
"""
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
"""
The ID of the author.
"""
name = db.Column(db.String(256), nullable = False)
"""
The name of the author.
"""
title = db.Column(db.String(256), nullable = True)
"""
The title (e.g. associate professor) of the author.
"""
organization_id = db.Column(db.Integer, db.ForeignKey('organization.id'), nullable = True)
"""
The ID of the organization where the author belongs.
"""
organization = db.relation('Organization', foreign_keys=[organization_id], backref = "authors")
"""
The organization where the author belongs.
"""
year_of_phd = db.Column(db.Integer, nullable = True)
"""
The year when the author received his/her Ph.D.
"""
tenured = db.Column(db.Boolean, nullable = True)
"""
Whether the author is tenured.
"""
scholar_id = db.Column(db.String(64), nullable = True, unique = True)
"""
The ID of the author in Google Scholar.
"""
website_url = db.Column(db.String(256), nullable = True)
"""
The URL of the website of the author.
"""
email_domain = db.Column(db.String(256), nullable = True)
"""
The domain of the email of the author.
"""
total_citations = db.Column(db.Integer, nullable = True)
"""
The total citations for the author.
"""
h_index = db.Column(db.Numeric(precision = 10, scale = 2), nullable = True)
"""
The value of the h-index metric for the author.
"""
i10_index = db.Column(db.Numeric(precision = 10, scale = 2), nullable = True)
"""
The value of the i10-index metric for the author.
"""
retrieved_at = db.Column(db.DateTime, nullable = True)
"""
The date and time when information about the author was last retrieved
from Google Scholar.
"""
coauthors = db.relationship("Author", secondary = coauthors,
primaryjoin = id == coauthors.c.author_id,
secondaryjoin = id == coauthors.c.coauthor_id)
"""
The co-authors of the author.
"""
publications = db.relationship("Publication",
secondary = author_publications,
backref = "authors")
"""
The publications of the author.
"""
citations_per_year = db.relationship("AuthorCitationsPerYear",
cascade="all, delete-orphan")
"""
The citations per year for the author.
"""
def organization_tree(self):
"""
Gets the names of the organization, where the author belongs, and all
its ancestors (starting from the root of the family tree) separated with
' :: '.
"""
if not self.organization:
return ''
organizations = [ self.organization ]
organizations.extend(self.organization.ancestors())
return ' :: '.join([ a.name for a in reversed(organizations) ])
def organization_ids(self):
"""
Gets the ID's of the organization, where the author belongs, and all
its ancestors (starting from the root of the family tree).
"""
if not self.organization:
return []
organizations = [ self.organization ]
organizations.extend(self.organization.ancestors())
return [ a.id for a in reversed(organizations) ]
class AuthorCitationsPerYear(db.Model):
"""
A class that represents the citations for authors per year.
"""
__tablename__ = 'author_citations_per_year'
"""
The name of the table where citations per year are stored.
"""
author_id = db.Column(db.Integer, db.ForeignKey('author.id'), primary_key = True)
"""
The ID of the author.
"""
author = db.relation('Author')
"""
The author.
"""
year = db.Column(db.Integer, primary_key = True)
"""
The year.
"""
citations = db.Column(db.Integer, nullable = False)
"""
The citations for the author in the year.
"""
class Organization(db.Model):
"""
A class that represents organizations (e.g. universities, schools,
departments).
"""
__tablename__ = 'organization'
"""
The name of the table where organizations are stored.
"""
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
"""
The ID of the organization.
"""
name = db.Column(db.String(256), nullable = False)
"""
The name of the organization.
"""
parent_id = db.Column(db.Integer, db.ForeignKey('organization.id'), nullable = True)
"""
The ID of the parent organization.
"""
parent = db.relation('Organization', remote_side = [id], backref = "children")
"""
The parent organization.
"""
location = db.Column(db.String(256), nullable = True)
"""
The location of the organization.
"""
website_url = db.Column(db.String(256), nullable = True)
"""
The URL of the website of the organization.
"""
children_source_url = db.Column(db.String(256), nullable = True)
"""
The URL where the children of the organization can be retrieved from.
"""
def children_ids(self):
"""
Gets the ID's of the children of the organization.
"""
return [ c.id for c in self.children ]
def ancestors(self):
"""
Gets the ancestors of the organization (starting from its parent and
ending at the root of the family tree).
"""
if self.parent is None:
return []
l = [ self.parent ]
l.extend(self.parent.ancestors())
return l
def ancestor_ids(self):
"""
Gets the ID's of the ancestors of the organization (starting from its
parent and ending at the root of the family tree).
"""
return [ a.id for a in self.ancestors() ]
def ancestor_tree(self):
"""
Gets the names of the ancestors of the organization (starting from the
root of the family tree) separated with ' :: '.
"""
ancestors = self.ancestors()
if not ancestors:
return None
return ' :: '.join([ a.name for a in reversed(ancestors) ])
def descendants(self):
"""
Gets the descendants of the organization (starting from its children and
ending at the leaves of the family tree).
"""
if not self.children:
return []
l = []
for c in self.children:
l.append(c)
l.extend(c.descendants())
return l
def descendant_tree(self):
"""
Gets the descendants of the organization as a tree (starting from the
children).
"""
descendants = []
for c in self.children:
descendants.append({ 'id': c.id, 'name': c.name, 'children': c.descendant_tree(), 'number_of_authors': c.number_of_authors() })
return descendants
def descendant_ids(self):
"""
Gets the ID's of the descendants of the organization (starting from its children and
ending at the leaves of the family tree).
"""
return [ d.id for d in self.descendants() ]
def number_of_authors(self):
"""
Gets the number of the authors that belong to the organization.
"""
return len(self.authors);
class Publication(db.Model):
"""
A class that represents publications.
"""
__tablename__ = 'publication'
"""
The name of the table where publications are stored.
"""
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
"""
The ID of the publication.
"""
type = db.Column(db.String(16), nullable = True)
"""
The type of the publication.
"""
title = db.Column(db.String(512), nullable = True)
"""
The title of the publication.
"""
author_names = db.Column('authors', db.String(512), nullable = True)
"""
The names of the authors of the publication separated with commas.
"""
scholar_id = db.Column(db.String(64), nullable = True, unique = True)
"""
The ID of the publication in Google Scholar.
"""
year_of_publication = db.Column(db.Integer, nullable = True)
"""
The year when the publication was published.
"""
total_citations = db.Column(db.Integer, nullable = True)
"""
The total citations for the publication.
"""
retrieved_at = db.Column(db.DateTime, nullable = True)
"""
The date and time when information about the publication was last retrieved
from Google Scholar.
"""
citations_per_year = db.relationship("PublicationCitationsPerYear",
cascade="all, delete-orphan")
"""
The citations per year for the publication.
"""
class PublicationCitationsPerYear(db.Model):
"""
A class that represents the citations for publications per year.
"""
__tablename__ = 'publication_citations_per_year'
"""
The name of the table where citations per year are stored.
"""
publication_id = db.Column(db.Integer, db.ForeignKey('publication.id'), primary_key = True)
"""
The ID of the publication.
"""
publication = db.relation('Publication')
"""
The publication.
"""
year = db.Column(db.Integer, primary_key = True)
"""
The year.
"""
citations = db.Column(db.Integer, nullable = False)
"""
The citations for the publication in the year.
"""
from main import app
from flask.ext.login import UserMixin
class User(UserMixin):
"""
A class that represents application users.
"""
def __init__(self, username, password):
"""
Constructs a user with the given username and password.
"""
self.id = username
self.password = password
@classmethod
def get(cls):
"""
Gets the only valid application user.
"""
return User(app.config['BASIC_AUTH_USERNAME'], app.config['BASIC_AUTH_PASSWORD'])
|
|
# Copyright (c) 2013 Mirantis Inc.
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for executing commands on nodes via SSH.
The main access point is method get_remote(instance), it returns
InstanceInteropHelper object which does the actual work. See the
class for the list of available methods.
It is a context manager, so it could be used with 'with' statement
like that:
with get_remote(instance) as r:
r.execute_command(...)
Note that the module offloads the ssh calls to a child process.
It was implemented that way because we found no way to run paramiko
and eventlet together. The private high-level module methods are
implementations which are run in a separate process.
"""
import logging
import os
import shlex
import time
import uuid
from eventlet.green import subprocess as e_subprocess
from eventlet import semaphore
from eventlet import timeout as e_timeout
from oslo.config import cfg
from oslo.utils import excutils
import paramiko
import requests
from requests import adapters
import six
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.utils import crypto
from sahara.utils import hashabledict as h
from sahara.utils.openstack import base
from sahara.utils.openstack import neutron
from sahara.utils import procutils
from sahara.utils import remote
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
_ssh = None
_sessions = {}
INFRA = None
_global_remote_semaphore = None
def _connect(host, username, private_key, proxy_command=None):
global _ssh
LOG.debug('Creating SSH connection')
if type(private_key) in [str, unicode]:
private_key = crypto.to_paramiko_private_key(private_key)
_ssh = paramiko.SSHClient()
_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
proxy = None
if proxy_command:
LOG.debug('creating proxy using command: {0}'.format(proxy_command))
proxy = paramiko.ProxyCommand(proxy_command)
_ssh.connect(host, username=username, pkey=private_key, sock=proxy)
def _cleanup():
global _ssh
_ssh.close()
def _read_paramimko_stream(recv_func):
result = ''
buf = recv_func(1024)
while buf != '':
result += buf
buf = recv_func(1024)
return result
def _escape_quotes(command):
command = command.replace('\\', '\\\\')
command = command.replace('"', '\\"')
return command
def _execute_command(cmd, run_as_root=False, get_stderr=False,
raise_when_error=True):
global _ssh
chan = _ssh.get_transport().open_session()
if run_as_root:
chan.exec_command('sudo bash -c "%s"' % _escape_quotes(cmd))
else:
chan.exec_command(cmd)
# todo(dmitryme): that could hang if stderr buffer overflows
stdout = _read_paramimko_stream(chan.recv)
stderr = _read_paramimko_stream(chan.recv_stderr)
ret_code = chan.recv_exit_status()
if ret_code and raise_when_error:
raise ex.RemoteCommandException(cmd=cmd, ret_code=ret_code,
stdout=stdout, stderr=stderr)
if get_stderr:
return ret_code, stdout, stderr
else:
return ret_code, stdout
def _get_http_client(host, port, proxy_command=None):
global _sessions
_http_session = _sessions.get((host, port), None)
LOG.debug('cached HTTP session for {0}:{1} is {2}'.format(host, port,
_http_session))
if not _http_session:
if proxy_command:
# can return a new session here because it actually uses
# the same adapter (and same connection pools) for a given
# host and port tuple
_http_session = _get_proxied_http_session(
proxy_command, host, port=port)
LOG.debug('created proxied HTTP session for {0}:{1}'
.format(host, port))
else:
# need to cache the sessions that are not proxied through
# HTTPRemoteWrapper so that a new session with a new HTTPAdapter
# and associated pools is not recreated for each HTTP invocation
_http_session = requests.Session()
LOG.debug('created standard HTTP session for {0}:{1}'
.format(host, port))
adapter = requests.adapters.HTTPAdapter()
for prefix in ['http://', 'https://']:
_http_session.mount(prefix + '%s:%s' % (host, port),
adapter)
LOG.debug('caching session {0} for {1}:{2}'
.format(_http_session, host, port))
_sessions[(host, port)] = _http_session
return _http_session
def _write_fl(sftp, remote_file, data):
fl = sftp.file(remote_file, 'w')
fl.write(data)
fl.close()
def _append_fl(sftp, remote_file, data):
fl = sftp.file(remote_file, 'a')
fl.write(data)
fl.close()
def _write_file(sftp, remote_file, data, run_as_root):
if run_as_root:
temp_file = 'temp-file-%s' % six.text_type(uuid.uuid4())
_write_fl(sftp, temp_file, data)
_execute_command(
'mv %s %s' % (temp_file, remote_file), run_as_root=True)
else:
_write_fl(sftp, remote_file, data)
def _append_file(sftp, remote_file, data, run_as_root):
if run_as_root:
temp_file = 'temp-file-%s' % six.text_type(uuid.uuid4())
_write_fl(sftp, temp_file, data)
_execute_command(
'cat %s >> %s' % (temp_file, remote_file), run_as_root=True)
_execute_command('rm -f %s' % temp_file)
else:
_append_fl(sftp, remote_file, data)
def _write_file_to(remote_file, data, run_as_root=False):
global _ssh
_write_file(_ssh.open_sftp(), remote_file, data, run_as_root)
def _write_files_to(files, run_as_root=False):
global _ssh
sftp = _ssh.open_sftp()
for fl, data in six.iteritems(files):
_write_file(sftp, fl, data, run_as_root)
def _append_to_file(remote_file, data, run_as_root=False):
global _ssh
_append_file(_ssh.open_sftp(), remote_file, data, run_as_root)
def _append_to_files(files, run_as_root=False):
global _ssh
sftp = _ssh.open_sftp()
for fl, data in six.iteritems(files):
_append_file(sftp, fl, data, run_as_root)
def _read_file(sftp, remote_file):
fl = sftp.file(remote_file, 'r')
data = fl.read()
fl.close()
return data
def _read_file_from(remote_file, run_as_root=False):
global _ssh
fl = remote_file
if run_as_root:
fl = 'temp-file-%s' % (six.text_type(uuid.uuid4()))
_execute_command('cp %s %s' % (remote_file, fl), run_as_root=True)
try:
return _read_file(_ssh.open_sftp(), fl)
except IOError:
LOG.error(_LE('Can\'t read file "%s"') % remote_file)
raise
finally:
if run_as_root:
_execute_command(
'rm %s' % fl, run_as_root=True, raise_when_error=False)
def _replace_remote_string(remote_file, old_str, new_str):
old_str = old_str.replace("\'", "\''")
new_str = new_str.replace("\'", "\''")
cmd = "sudo sed -i 's,%s,%s,g' %s" % (old_str, new_str, remote_file)
_execute_command(cmd)
def _execute_on_vm_interactive(cmd, matcher):
global _ssh
buf = ''
channel = _ssh.invoke_shell()
LOG.debug('channel is {0}'.format(channel))
try:
LOG.debug('sending cmd {0}'.format(cmd))
channel.send(cmd + '\n')
while not matcher.is_eof(buf):
buf += channel.recv(4096)
response = matcher.get_response(buf)
if response is not None:
channel.send(response + '\n')
buf = ''
finally:
LOG.debug('closing channel')
channel.close()
def _acquire_remote_semaphore():
context.current().remote_semaphore.acquire()
_global_remote_semaphore.acquire()
def _release_remote_semaphore():
_global_remote_semaphore.release()
context.current().remote_semaphore.release()
def _get_proxied_http_session(proxy_command, host, port=None):
session = requests.Session()
adapter = ProxiedHTTPAdapter(proxy_command, host, port)
session.mount('http://{0}:{1}'.format(host, adapter.port), adapter)
return session
class ProxiedHTTPAdapter(adapters.HTTPAdapter):
port = None
host = None
def __init__(self, proxy_command, host, port):
super(ProxiedHTTPAdapter, self).__init__()
LOG.debug('HTTP adapter created with cmd {0}'.format(proxy_command))
self.cmd = shlex.split(proxy_command)
self.port = port
self.host = host
def get_connection(self, url, proxies=None):
pool_conn = (
super(ProxiedHTTPAdapter, self).get_connection(url, proxies))
if hasattr(pool_conn, '_get_conn'):
http_conn = pool_conn._get_conn()
if http_conn.sock is None:
if hasattr(http_conn, 'connect'):
sock = self._connect()
LOG.debug('HTTP connection {0} getting new '
'netcat socket {1}'.format(http_conn, sock))
http_conn.sock = sock
else:
if hasattr(http_conn.sock, 'is_netcat_socket'):
LOG.debug('pooled http connection has existing '
'netcat socket. resetting pipe...')
http_conn.sock.reset()
pool_conn._put_conn(http_conn)
return pool_conn
def close(self):
LOG.debug('Closing HTTP adapter for {0}:{1}'
.format(self.host, self.port))
super(ProxiedHTTPAdapter, self).close()
def _connect(self):
LOG.debug('Returning netcat socket with command {0}'
.format(self.cmd))
rootwrap_command = CONF.rootwrap_command if CONF.use_rootwrap else ''
return NetcatSocket(self.cmd, rootwrap_command)
class NetcatSocket(object):
def _create_process(self):
self.process = e_subprocess.Popen(self.cmd,
stdin=e_subprocess.PIPE,
stdout=e_subprocess.PIPE,
stderr=e_subprocess.PIPE)
def __init__(self, cmd, rootwrap_command=None):
self.cmd = cmd
self.rootwrap_command = rootwrap_command
self._create_process()
def send(self, content):
try:
self.process.stdin.write(content)
self.process.stdin.flush()
except IOError as e:
raise ex.SystemError(e)
return len(content)
def sendall(self, content):
return self.send(content)
def makefile(self, mode, *arg):
if mode.startswith('r'):
return self.process.stdout
if mode.startswith('w'):
return self.process.stdin
raise ex.IncorrectStateError(_("Unknown file mode %s") % mode)
def recv(self, size):
try:
return os.read(self.process.stdout.fileno(), size)
except IOError as e:
raise ex.SystemError(e)
def _terminate(self):
if self.rootwrap_command:
os.system('{0} kill {1}'.format(self.rootwrap_command,
self.process.pid))
else:
self.process.terminate()
def close(self):
LOG.debug('Socket close called')
self._terminate()
def settimeout(self, timeout):
pass
def fileno(self):
return self.process.stdin.fileno()
def is_netcat_socket(self):
return True
def reset(self):
self._terminate()
self._create_process()
class InstanceInteropHelper(remote.Remote):
def __init__(self, instance):
self.instance = instance
def __enter__(self):
_acquire_remote_semaphore()
try:
self.bulk = BulkInstanceInteropHelper(self.instance)
return self.bulk
except Exception:
with excutils.save_and_reraise_exception():
_release_remote_semaphore()
def __exit__(self, *exc_info):
try:
self.bulk.close()
finally:
_release_remote_semaphore()
def get_neutron_info(self):
neutron_info = h.HashableDict()
neutron_info['network'] = (
self.instance.node_group.cluster.neutron_management_network)
ctx = context.current()
neutron_info['uri'] = base.url_for(ctx.service_catalog, 'network')
neutron_info['token'] = ctx.token
neutron_info['tenant'] = ctx.tenant_name
neutron_info['host'] = self.instance.management_ip
LOG.debug('Returning neutron info: {0}'.format(neutron_info))
return neutron_info
def _build_proxy_command(self, command, host=None, port=None, info=None,
rootwrap_command=None):
# Accepted keywords in the proxy command template:
# {host}, {port}, {tenant_id}, {network_id}, {router_id}
keywords = {}
if not info:
info = self.get_neutron_info()
keywords['tenant_id'] = context.current().tenant_id
keywords['network_id'] = info['network']
# Query Neutron only if needed
if '{router_id}' in command:
client = neutron.NeutronClient(info['network'], info['uri'],
info['token'], info['tenant'])
keywords['router_id'] = client.get_router()
keywords['host'] = host
keywords['port'] = port
try:
command = command.format(**keywords)
except KeyError as e:
LOG.error(_('Invalid keyword in proxy_command: %s'), str(e))
# Do not give more details to the end-user
raise ex.SystemError('Misconfiguration')
if rootwrap_command:
command = '{0} {1}'.format(rootwrap_command, command)
return command
def _get_conn_params(self):
proxy_command = None
if CONF.proxy_command:
# Build a session through a user-defined socket
proxy_command = CONF.proxy_command
elif CONF.use_namespaces and not CONF.use_floating_ips:
# Build a session through a netcat socket in the Neutron namespace
proxy_command = (
'ip netns exec qrouter-{router_id} nc {host} {port}')
# proxy_command is currently a template, turn it into a real command
# i.e. dereference {host}, {port}, etc.
if proxy_command:
rootwrap = CONF.rootwrap_command if CONF.use_rootwrap else ''
proxy_command = self._build_proxy_command(
proxy_command, host=self.instance.management_ip, port=22,
info=None, rootwrap_command=rootwrap)
return (self.instance.management_ip,
self.instance.node_group.image_username,
self.instance.node_group.cluster.management_private_key,
proxy_command)
def _run(self, func, *args, **kwargs):
proc = procutils.start_subprocess()
try:
procutils.run_in_subprocess(proc, _connect,
self._get_conn_params())
return procutils.run_in_subprocess(proc, func, args, kwargs)
except Exception:
with excutils.save_and_reraise_exception():
procutils.shutdown_subprocess(proc, _cleanup)
finally:
procutils.shutdown_subprocess(proc, _cleanup)
def _run_with_log(self, func, timeout, *args, **kwargs):
start_time = time.time()
try:
with e_timeout.Timeout(timeout, ex.TimeoutException(timeout)):
return self._run(func, *args, **kwargs)
finally:
self._log_command('%s took %.1f seconds to complete' % (
func.__name__, time.time() - start_time))
def _run_s(self, func, timeout, *args, **kwargs):
_acquire_remote_semaphore()
try:
return self._run_with_log(func, timeout, *args, **kwargs)
finally:
_release_remote_semaphore()
def get_http_client(self, port, info=None):
self._log_command('Retrieving HTTP session for {0}:{1}'.format(
self.instance.management_ip, port))
proxy_command = None
if CONF.proxy_command:
# Build a session through a user-defined socket
proxy_command = CONF.proxy_command
elif info or (CONF.use_namespaces and not CONF.use_floating_ips):
# need neutron info
if not info:
info = self.get_neutron_info()
# Build a session through a netcat socket in the Neutron namespace
proxy_command = (
'ip netns exec qrouter-{router_id} nc {host} {port}')
# proxy_command is currently a template, turn it into a real command
# i.e. dereference {host}, {port}, etc.
if proxy_command:
rootwrap = CONF.rootwrap_command if CONF.use_rootwrap else ''
proxy_command = self._build_proxy_command(
proxy_command, host=self.instance.management_ip, port=port,
info=info, rootwrap_command=rootwrap)
return _get_http_client(self.instance.management_ip, port,
proxy_command)
def close_http_session(self, port):
global _sessions
host = self.instance.management_ip
self._log_command(_("Closing HTTP session for %(host)s:%(port)s") % {
'host': host, 'port': port})
session = _sessions.get((host, port), None)
if session is None:
raise ex.NotFoundException(
_('Session for %(host)s:%(port)s not cached') % {
'host': host, 'port': port})
session.close()
del _sessions[(host, port)]
def execute_command(self, cmd, run_as_root=False, get_stderr=False,
raise_when_error=True, timeout=300):
self._log_command('Executing "%s"' % cmd)
return self._run_s(_execute_command, timeout, cmd, run_as_root,
get_stderr, raise_when_error)
def write_file_to(self, remote_file, data, run_as_root=False, timeout=120):
self._log_command('Writing file "%s"' % remote_file)
self._run_s(_write_file_to, timeout, remote_file, data, run_as_root)
def write_files_to(self, files, run_as_root=False, timeout=120):
self._log_command('Writing files "%s"' % files.keys())
self._run_s(_write_files_to, timeout, files, run_as_root)
def append_to_file(self, r_file, data, run_as_root=False, timeout=120):
self._log_command('Appending to file "%s"' % r_file)
self._run_s(_append_to_file, timeout, r_file, data, run_as_root)
def append_to_files(self, files, run_as_root=False, timeout=120):
self._log_command('Appending to files "%s"' % files.keys())
self._run_s(_append_to_files, timeout, files, run_as_root)
def read_file_from(self, remote_file, run_as_root=False, timeout=120):
self._log_command('Reading file "%s"' % remote_file)
return self._run_s(_read_file_from, timeout, remote_file, run_as_root)
def replace_remote_string(self, remote_file, old_str, new_str,
timeout=120):
self._log_command('In file "%s" replacing string "%s" '
'with "%s"' % (remote_file, old_str, new_str))
self._run_s(_replace_remote_string, timeout, remote_file, old_str,
new_str)
def execute_on_vm_interactive(self, cmd, matcher, timeout=1800):
"""Runs given command and responds to prompts.
'cmd' is a command to execute.
'matcher' is an object which provides responses on command's
prompts. It should have two methods implemented:
* get_response(buf) - returns response on prompt if it is
found in 'buf' string, which is a part of command output.
If no prompt is found, the method should return None.
* is_eof(buf) - returns True if current 'buf' indicates that
the command is finished. False should be returned
otherwise.
"""
self._log_command('Executing interactively "%s"' % cmd)
self._run_s(_execute_on_vm_interactive, timeout, cmd, matcher)
def _log_command(self, str):
LOG.debug('[%s] %s' % (self.instance.instance_name, str))
class BulkInstanceInteropHelper(InstanceInteropHelper):
def __init__(self, instance):
super(BulkInstanceInteropHelper, self).__init__(instance)
self.proc = procutils.start_subprocess()
try:
procutils.run_in_subprocess(self.proc, _connect,
self._get_conn_params())
except Exception:
with excutils.save_and_reraise_exception():
procutils.shutdown_subprocess(self.proc, _cleanup)
def close(self):
procutils.shutdown_subprocess(self.proc, _cleanup)
def _run(self, func, *args, **kwargs):
return procutils.run_in_subprocess(self.proc, func, args, kwargs)
def _run_s(self, func, timeout, *args, **kwargs):
return self._run_with_log(func, timeout, *args, **kwargs)
class SshRemoteDriver(remote.RemoteDriver):
def get_type_and_version(self):
return "ssh.1.0"
def setup_remote(self, engine):
global _global_remote_semaphore
global INFRA
_global_remote_semaphore = semaphore.Semaphore(
CONF.global_remote_threshold)
INFRA = engine
def get_remote(self, instance):
return InstanceInteropHelper(instance)
def get_userdata_template(self):
# SSH does not need any instance customization
return ""
|
|
"""
Band Selective Homonuclear CP calculator
Created on 22.05.2014
Modified 02.07.2014 W.T. Franks FMP Berlin
@author: Venita Daebel
@copyright: Bruker Biospin Corporation
"""
# Import Important stuff
import de.bruker.nmr.mfw.root as root
import os, re, string, math, sys, copy
sys.path.append(root.UtilPath.getTopspinHome()+ '/exp/stan/nmr/py/BioPY/modules/')
import TopCmds, Setup
import IntShape
import FREQ as fq
import GetNUCs as NUC
import DREAM
from GetLib import pul
# Dictionary keys (from GetLib) that point to the actual parameter names
#'aCbsh' :'SP 26' ,'aHbshDc':'PL 13' ,'pCbsh' :'P 50' ,'sCbsh' :'SPNAM 44',\
#'pCbshFlp':'P 28' ,'pCbsh2kFlp':'P 29'
# define special characters. (I wonder if we can put the "<html><small><sup>13</sup></small>C 90° pulse length</html>" here
deg=u"\u00b0"
ret=u"\u000D"
spc=u"\u0020"
unb=u"\u005f"
crt=u"\u005e"
"""
Now we define the "wrapper" function. This is so we can re-use the same calculations,
to keep track of the variable names, and to have a convenient place to modify the text
that the user will see.
This is probably not needed in this case, but it is how most of the code is written
Note, the variable "units" was "WdB" in the user script
"""
def Cmatch():
aa, App, Bpp = DREAM.Nucl()
#TopCmds.MSG(str(aa)+"\n"+str(App)+"\n"+str(Bpp))
SUM=0
for val in App:
SUM=SUM+val
AvgA=SUM/len(App)
#TopCmds.MSG(str(AvgA))
SUM=0
for val in Bpp:
SUM=SUM+val
AvgB=SUM/len(Bpp)
#TopCmds.MSG(str(AvgB))
return math.fabs(AvgA-AvgB)
def CACO(units):
Title="CA-CO BSH Homonuclear CP - Input"
SuTit="Band C-C Selective CP"
Label=["Match Field","Ramp Shape","Offset","Contact",
"Decoupling Field"]
In =Title,SuTit,Label
Title="BSH CP - Output"
Label="13C","1H"
Out =Title,Label
ppm=Cmatch()
CalBSH('pC90',ppm,'aCbsh','aHbshDc','pCbsh','sCbsh','ramp.100','pCbshFlp','pCbsh2kFlp',units,In,Out)
def CalBSH(p90,ppm,amp,ampD,Cnct,shp,dfltramp,pflp,pflp2k,units,In,Out):
"""
p90 : Dictionary Key for Nucleus 90 degree pulse; determines Nuc (Decoupling flag)
ppm : float of ppm difference
amp : dict key for CP amp
ampD : dict key for Decoupler (assumed to be 1H) or "empty"
Cnct : dict key for CP contact
shp : dict key of CP shape file
dfltshp: Default pulse shape
pflp : dict key for trim pulse
pflp2k: dict key for flip back pulse
units : Watts (W) or decibel (dB)
In : Title, Subtitle, and Label for Input Dialog
Out : Title and Label for Selection/Confirmation Window
"""
P90 =pul.GetPar(p90,"")
P90D=pul.GetPar('pH90',"")
if p90.find('H') >= 0:Amp=pul.GetPar('aH',units); nuc="1H"
if p90.find('C') >= 0:Amp=pul.GetPar('aC',units); nuc="13C"
if p90.find('N') >= 0:Amp=pul.GetPar('aN',units); nuc="15N"
frq=fq.fq(nuc,1)
AmpD =pul.GetPar('aH',units)
i=0
Nucs=NUC.list()
for label in Nucs:
if label==nuc:frq=fq.fq(nuc,i+1)
i=i+1
SP =pul.GetPar(shp,"")
MAS =pul.GetPar('MAS',"")/1000. #kHz not Hz
CNCT=pul.GetPar(Cnct,"")
## Check that the values aren't stupid
if CNCT <= 1. : CNCT = 1000.
if CNCT >= 10000.: CNCT = 10000.
MaxB1 = 1000000./4./P90
MaxB1D = 1000000./4./P90D
##Set Decoupler if Appropriate
if nuc!="1H":
AmpD0=pul.GetPar(ampD,"dB")
B1_0 = MaxB1D*(math.pow(10,(AmpD-AmpD0)/20.))
if B1_0 > 100. : Dcond='% .1f' % B1_0
if B1_0 > MaxB1D: Dcond='85000.0'
if B1_0 <= 100. : Dcond='85000.0'
#Use a reasonable ramp name (passed in as dfltramp)
if SP == "gauss" or SP == "None" or SP == "" or SP == "0" :
pul.SetPar(shp,dfltramp,"")
TopCmds.XCMD(pul.xcmd_name(pul.pulDict[shp]))
SP=pul.GetPar(shp,"")
## change everything into dB for calculations.
if units == "W":
Amp =Setup.WtodB(Amp)
AmpD=Setup.WtodB(AmpD)
bf = math.floor(float(frq.bf))
ppm=float(str('%.0f' %ppm))
changedPPM='y'
while changedPPM=='y':
#TopCmds.MSG(str(ppm)+":ppm bf:"+str(bf))
DHz = ( float(ppm) * bf )/1000.
bshkHz = round(float(((MAS*MAS)-((DHz * DHz)/4))/MAS),2)
if nuc != "1H":
index=TopCmds.INPUT_DIALOG(In[0],In[1],In[2],\
[str('%.2f' %bshkHz),SP,str('%.0f' %ppm),\
str('%.0f' %(CNCT/1000.)),str('%.2f' %(float(Dcond)/1000.))],\
["kHz","","ppm","ms","kHz"],\
["1","1","1","1","1"],\
["Accept","Close"], [spc,ret], 10)
Dcond=float(index[4])*1000.
if nuc == "1H":
index=TopCmds.INPUT_DIALOG(In[0],In[1],In[2],\
[str('%.2f' %bshkHz),str(SP),str('%.2f' %ppm),\
str('%.0f' %(Cnct/1000.))],\
["kHz","","ppm","ms"],\
["1","1","1","1"],\
["Accept","Close"], [spc,ret], 10)
bshkHz=float(index[0])
SP=index[1]
Cnct=float(index[3])*1000.
if str('%.2f' %ppm) == str('%.2f' %float(index[2])): changedPPM='n'
if str('%.2f' %ppm) != str('%.2f' %float(index[2])): changedPPM='y'; ppm=float(index[2])
DHz = ( float(ppm) * bf )/1000.
flip = (math.atan(DHz / bshkHz) *180) / 3.1415
pflip = round( ((P90 * flip) / 90.) , 2)
#TopCmds.MSG(str(P90)+" "+str(flip)+" "+str(pflip)+" "+str(DHz)+" "+str(bshkHz))
flip2k = (DHz * DHz) / bshkHz
pflip2k= round( ( (0.25 / flip2k) * 1000),2)
w1bsh=float(index[0])
adjust= 20*(math.log10(w1bsh*1000/MaxB1))
AmpBsh= Amp-adjust
# Adjust for the ramp.
if SP == "Unused":
AvgAmp=1.
else:
AvgAmp=IntShape.Integrate(SP)/100.
AmpBsh = AmpBsh - 20*(math.log10(1./AvgAmp))
# For the Decoupling
if nuc != "1H":
AmpDec= Setup.DecSafely(Dcond,ampD,MaxB1D,150000.,AmpD,units)
# Convert to Watts, if wanted
if units == "W":
AmpBsh=Setup.dBtoW(AmpBsh)
if nuc!="1H":Damp=Setup.dBtoW(AmpDec)
if nuc == "1H" :
value = TopCmds.SELECT(Out[0],"This will set\n "+\
Out[1]+" power ("+pul.pulDict[amp]+") to: " + str('%3.2f' %AmpBsh)+" "+ units+"\n"+\
"With shape (" + pul.pulDict[shp] + ") of "+ str(SP) +"\n"\
"Flip pulse (" + pul.pulDict[pflp] + ") of "+ str(pflip) +"us\n"\
"Flip pulse 2k(" + pul.pulDict[pflp2k] + ") of "+ str(pflip2k) +"us\n",\
["Update", "Keep Previous"],[spc,ret])
else:
value = TopCmds.SELECT(Out[0],\
"This will set\n "+\
Out[1][0]+" power ("+pul.pulDict[amp]+") to: " + str('%3.2f' %AmpBsh)+" "+ units+"\n"+\
Out[1][1]+" power ("+pul.pulDict[ampD]+") to: " + str('%3.2f' %AmpD)+" "+ units+"\n"+\
"With shape (" + pul.pulDict[shp] + ") of "+ str(SP) +"\n"\
"Flip pulse (" + pul.pulDict[pflp] + ") of "+ str(pflip) +"us\n"\
"Flip pulse 2k(" + pul.pulDict[pflp2k] + ") of "+ str(pflip2k) +"us\n",\
["Update", "Keep Previous"],[spc,ret])
if value !=1:
pul.SetPar(amp,AmpBsh,units)
pul.SetPar(shp,SP,units)
pul.SetPar(pflp,pflip,units)
pul.SetPar(pflp2k,pflip2k,units)
if nuc == "1H" :
pul.SetPar(ampD,AmpD,units)
return
|
|
# extdiff.py - external diff program support for mercurial
#
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to allow external programs to compare revisions
The extdiff Mercurial extension allows you to use external programs
to compare revisions, or revision with working directory. The external
diff programs are called with a configurable set of options and two
non-option arguments: paths to directories containing snapshots of
files to compare.
The extdiff extension also allows to configure new diff commands, so
you do not need to type "hg extdiff -p kdiff3" always. ::
[extdiff]
# add new command that runs GNU diff(1) in 'context diff' mode
cdiff = gdiff -Nprc5
## or the old way:
#cmd.cdiff = gdiff
#opts.cdiff = -Nprc5
# add new command called vdiff, runs kdiff3
vdiff = kdiff3
# add new command called meld, runs meld (no need to name twice)
meld =
# add new command called vimdiff, runs gvimdiff with DirDiff plugin
# (see http://www.vim.org/scripts/script.php?script_id=102) Non
# English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
# your .vimrc
vimdiff = gvim -f '+next' '+execute "DirDiff" argv(0) argv(1)'
You can use -I/-X and list of file or directory names like normal "hg
diff" command. The extdiff extension makes snapshots of only needed
files, so running the external diff program will actually be pretty
fast (at least faster than having to compare the entire tree).
'''
from mercurial.i18n import _
from mercurial.node import short, nullid
from mercurial import cmdutil, util, commands, encoding
import os, shlex, shutil, tempfile, re
def snapshot(ui, repo, files, node, tmproot):
'''snapshot files as of some revision
if not using snapshot, -I/-X does not work and recursive diff
in tools like kdiff3 and meld displays too many files.'''
dirname = os.path.basename(repo.root)
if dirname == "":
dirname = "root"
if node is not None:
dirname = '%s.%s' % (dirname, short(node))
base = os.path.join(tmproot, dirname)
os.mkdir(base)
if node is not None:
ui.note(_('making snapshot of %d files from rev %s\n') %
(len(files), short(node)))
else:
ui.note(_('making snapshot of %d files from working directory\n') %
(len(files)))
wopener = util.opener(base)
fns_and_mtime = []
ctx = repo[node]
for fn in files:
wfn = util.pconvert(fn)
if not wfn in ctx:
# File doesn't exist; could be a bogus modify
continue
ui.note(' %s\n' % wfn)
dest = os.path.join(base, wfn)
fctx = ctx[wfn]
data = repo.wwritedata(wfn, fctx.data())
if 'l' in fctx.flags():
wopener.symlink(data, wfn)
else:
wopener(wfn, 'w').write(data)
if 'x' in fctx.flags():
util.set_flags(dest, False, True)
if node is None:
fns_and_mtime.append((dest, repo.wjoin(fn), os.path.getmtime(dest)))
return dirname, fns_and_mtime
def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
'''Do the actuall diff:
- copy to a temp structure if diffing 2 internal revisions
- copy to a temp structure if diffing working revision with
another one and more than 1 file is changed
- just invoke the diff for a single file in the working dir
'''
revs = opts.get('rev')
change = opts.get('change')
args = ' '.join(diffopts)
do3way = '$parent2' in args
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = repo.lookup(change)
node1a, node1b = repo.changelog.parents(node2)
else:
node1a, node2 = cmdutil.revpair(repo, revs)
if not revs:
node1b = repo.dirstate.parents()[1]
else:
node1b = nullid
# Disable 3-way merge if there is only one parent
if do3way:
if node1b == nullid:
do3way = False
matcher = cmdutil.match(repo, pats, opts)
mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3])
if do3way:
mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3])
else:
mod_b, add_b, rem_b = set(), set(), set()
modadd = mod_a | add_a | mod_b | add_b
common = modadd | rem_a | rem_b
if not common:
return 0
tmproot = tempfile.mkdtemp(prefix='extdiff.')
try:
# Always make a copy of node1a (and node1b, if applicable)
dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0]
if do3way:
dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0]
else:
dir1b = None
fns_and_mtime = []
# If node2 in not the wc or there is >1 change, copy it
dir2root = ''
if node2:
dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0]
elif len(common) > 1:
#we only actually need to get the files to copy back to
#the working dir in this case (because the other cases
#are: diffing 2 revisions or single file -- in which case
#the file is already directly passed to the diff tool).
dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot)
else:
# This lets the diff tool open the changed file directly
dir2 = ''
dir2root = repo.root
# If only one change, diff the files instead of the directories
# Handle bogus modifies correctly by checking if the files exist
if len(common) == 1:
common_file = util.localpath(common.pop())
dir1a = os.path.join(dir1a, common_file)
if not os.path.isfile(os.path.join(tmproot, dir1a)):
dir1a = os.devnull
if do3way:
dir1b = os.path.join(dir1b, common_file)
if not os.path.isfile(os.path.join(tmproot, dir1b)):
dir1b = os.devnull
dir2 = os.path.join(dir2root, dir2, common_file)
# Function to quote file/dir names in the argument string.
# When not operating in 3-way mode, an empty string is
# returned for parent2
replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b, child=dir2)
def quote(match):
key = match.group()[1:]
if not do3way and key == 'parent2':
return ''
return util.shellquote(replace[key])
# Match parent2 first, so 'parent1?' will match both parent1 and parent
regex = '\$(parent2|parent1?|child)'
if not do3way and not re.search(regex, args):
args += ' $parent1 $child'
args = re.sub(regex, quote, args)
cmdline = util.shellquote(diffcmd) + ' ' + args
ui.debug('running %r in %s\n' % (cmdline, tmproot))
util.system(cmdline, cwd=tmproot)
for copy_fn, working_fn, mtime in fns_and_mtime:
if os.path.getmtime(copy_fn) != mtime:
ui.debug('file changed while diffing. '
'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
util.copyfile(copy_fn, working_fn)
return 1
finally:
ui.note(_('cleaning up temp directory\n'))
shutil.rmtree(tmproot)
def extdiff(ui, repo, *pats, **opts):
'''use external program to diff repository (or selected files)
Show differences between revisions for the specified files, using
an external program. The default program used is diff, with
default options "-Npru".
To select a different program, use the -p/--program option. The
program will be passed the names of two directories to compare. To
pass additional options to the program, use -o/--option. These
will be passed before the names of the directories to compare.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.'''
program = opts.get('program')
option = opts.get('option')
if not program:
program = 'diff'
option = option or ['-Npru']
return dodiff(ui, repo, program, option, pats, opts)
cmdtable = {
"extdiff":
(extdiff,
[('p', 'program', '', _('comparison program to run')),
('o', 'option', [], _('pass option to comparison program')),
('r', 'rev', [], _('revision')),
('c', 'change', '', _('change made by revision')),
] + commands.walkopts,
_('hg extdiff [OPT]... [FILE]...')),
}
def uisetup(ui):
for cmd, path in ui.configitems('extdiff'):
if cmd.startswith('cmd.'):
cmd = cmd[4:]
if not path:
path = cmd
diffopts = ui.config('extdiff', 'opts.' + cmd, '')
diffopts = diffopts and [diffopts] or []
elif cmd.startswith('opts.'):
continue
else:
# command = path opts
if path:
diffopts = shlex.split(path)
path = diffopts.pop(0)
else:
path, diffopts = cmd, []
def save(cmd, path, diffopts):
'''use closure to save diff command to use'''
def mydiff(ui, repo, *pats, **opts):
return dodiff(ui, repo, path, diffopts + opts['option'],
pats, opts)
doc = _('''\
use %(path)s to diff repository (or selected files)
Show differences between revisions for the specified files, using
the %(path)s program.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.\
''') % dict(path=util.uirepr(path))
# We must translate the docstring right away since it is
# used as a format string. The string will unfortunately
# be translated again in commands.helpcmd and this will
# fail when the docstring contains non-ASCII characters.
# Decoding the string to a Unicode string here (using the
# right encoding) prevents that.
mydiff.__doc__ = doc.decode(encoding.encoding)
return mydiff
cmdtable[cmd] = (save(cmd, path, diffopts),
cmdtable['extdiff'][1][1:],
_('hg %s [OPTION]... [FILE]...') % cmd)
|
|
import pandas as pd
from collections import defaultdict
import math
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
import os
#SDSS MOC4 data file
path = 'ADR4.dat'
#solar colors (reverse calculated from Carvano)
#reference to g
solar_color_ug = 3.81
solar_color_rg = 2.04
solar_color_ig = 1.94
solar_color_zg = 1.90
solar_color_gg = 2.5 #to make LRgg = 1
#4.27, 2.96, 2.5, 2.4, 2.36
#2.32, 0.46, 0, -0.1, -0.14
#reference to r
solar_color_ur = solar_color_ug - solar_color_rg
solar_color_gr = solar_color_gg - solar_color_rg
solar_color_rr = 0.0
solar_color_ir = solar_color_ig - solar_color_rg
solar_color_zr = solar_color_zg - solar_color_rg
#print solar_color_ur, solar_color_gr, solar_color_rr, solar_color_ir, solar_color_zr
#os.sys.exit(1)
#sdss wavelengths (microns)
#0.354, 0.477, 0.6230, 0.7630 and 0.913 um
u_wavelength=0.3543
g_wavelength=0.4770
r_wavelength=0.6231
i_wavelength=0.7625
z_wavelength=0.9134
#carvano taxonomy limits
#TAX LRug LRgg LRrg LRig LRzg CGguL CGguU CGrgL CGrgU CGirL CGirU CGziL CGziU
# O 0.884 1.000 1.057 1.053 0.861 0.784 1.666 0.175 0.505 -0.143 0.106 -0.833 -0.467
# V 0.810 1.000 1.099 1.140 0.854 1.087 2.095 0.511 2.374 -0.077 0.445 -2.018 -0.683
# Q 0.842 1.000 1.082 1.094 0.989 0.757 2.122 0.421 0.967 -0.032 0.229 -0.719 -0.200
# S 0.839 1.000 1.099 1.148 1.096 0.868 1.960 0.379 0.910 0.148 0.601 -0.530 -0.047
# A 0.736 1.000 1.156 1.209 1.137 1.264 4.210 0.937 1.342 0.151 0.505 -0.521 -0.089
# C 0.907 1.000 1.008 1.011 1.021 0.385 1.990 -0.140 0.403 -0.203 0.202 -0.221 0.259
# X 0.942 1.000 1.029 1.063 1.073 0.178 1.081 -0.089 0.481 0.136 0.478 -0.182 0.187
# L 0.858 1.000 1.071 1.109 1.116 0.913 2.089 0.253 0.871 0.136 0.622 -0.125 0.160
# D 0.942 1.000 1.075 1.135 1.213 0.085 1.717 -0.080 0.589 0.142 0.625 0.121 0.502
LR_means = {}
LR_means['O'] = {'LRug': 0.884, 'LRgg': 1.000, 'LRrg': 1.057, 'LRig': 1.053, 'LRzg': 0.861}
LR_means['V'] = {'LRug': 0.810, 'LRgg': 1.000, 'LRrg': 1.099, 'LRig': 1.140, 'LRzg': 0.854}
LR_means['Q'] = {'LRug': 0.842, 'LRgg': 1.000, 'LRrg': 1.082, 'LRig': 1.094, 'LRzg': 0.989}
LR_means['S'] = {'LRug': 0.839, 'LRgg': 1.000, 'LRrg': 1.099, 'LRig': 1.148, 'LRzg': 1.096}
LR_means['A'] = {'LRug': 0.736, 'LRgg': 1.000, 'LRrg': 1.156, 'LRig': 1.209, 'LRzg': 1.137}
LR_means['C'] = {'LRug': 0.907, 'LRgg': 1.000, 'LRrg': 1.008, 'LRig': 1.011, 'LRzg': 1.021}
LR_means['X'] = {'LRug': 0.942, 'LRgg': 1.000, 'LRrg': 1.029, 'LRig': 1.063, 'LRzg': 1.073}
LR_means['L'] = {'LRug': 0.858, 'LRgg': 1.000, 'LRrg': 1.071, 'LRig': 1.109, 'LRzg': 1.116}
LR_means['D'] = {'LRug': 0.942, 'LRgg': 1.000, 'LRrg': 1.075, 'LRig': 1.135, 'LRzg': 1.213}
#K type calc from Wabash 2453
LR_means['K'] = {'LRug': 0.871, 'LRgg': 1.000, 'LRrg': 1.053, 'LRig': 1.088, 'LRzg': 1.077}
#calc slope and bd (Carvano 2015) for the mean taxonomic shapes (Carvano 2011)
#LR_means['O'] = {'LRug': 0.884, 'LRgg': 1.000, 'LRrg': 1.057, 'LRig': 1.053, 'LRzg': 0.861}
log_mean=open('moc4.mean.txt', 'w')
log_mean.write('%s,%f,%f,%f,%f,%f\n'%('space', u_wavelength, g_wavelength, r_wavelength, i_wavelength, z_wavelength))
log_mean.write('%s,%s,%s,%s,%s,%s,%s,%s\n'%('class', 'Rur', 'Rgr', 'Rrr', 'Rir', 'Rzr', 'slope', 'bd'))
for key in LR_means:
LRug = LR_means[key]['LRug']
LRgg = LR_means[key]['LRgg']
LRrg = LR_means[key]['LRrg']
LRig = LR_means[key]['LRig']
LRzg = LR_means[key]['LRzg']
#
Cug = -2.5*LRug
Cgg = -2.5*LRgg
Crg = -2.5*LRrg
Cig = -2.5*LRig
Czg = -2.5*LRzg
#
Cur = Cug - Crg
Cgr = Cgg - Crg
Crr = 0.0
Cir = Cig - Crg
Czr = Czg - Crg
#
LRur = -Cur/2.5
LRgr = -Cgr/2.5
LRrr = -Crr/2.5
LRir = -Cir/2.5
LRzr = -Czr/2.5
#
Rur = pow(10,LRur)
Rgr = pow(10,LRgr)
Rrr = pow(10,LRrr)
Rir = pow(10,LRir)
Rzr = pow(10,LRzr)
#Carvano 2015 parameters
slope = (Rir-Rgr)/(i_wavelength-g_wavelength)
bd = Rzr - Rir
log_mean.write('%s,%f,%f,%f,%f,%f,%f,%f\n'%(key, Rur, Rgr, Rrr, Rir, Rzr, slope, bd))
log_mean.close()
CG_limits = {}
CG_limits['O'] = {'CGguL': 0.784, 'CGguU': 1.666, 'CGrgL': 0.175, 'CGrgU': 0.505, 'CGirL':-0.143, 'CGirU': 0.106, 'CGziL': -0.833, 'CGziU': -0.467}
CG_limits['V'] = {'CGguL': 1.087, 'CGguU': 2.095, 'CGrgL': 0.511, 'CGrgU': 2.374, 'CGirL':-0.077, 'CGirU': 0.445, 'CGziL': -2.018, 'CGziU': -0.683}
CG_limits['Q'] = {'CGguL': 0.757, 'CGguU': 2.122, 'CGrgL': 0.421, 'CGrgU': 0.967, 'CGirL':-0.032, 'CGirU': 0.229, 'CGziL': -0.719, 'CGziU': -0.200}
CG_limits['S'] = {'CGguL': 0.868, 'CGguU': 1.960, 'CGrgL': 0.379, 'CGrgU': 0.910, 'CGirL': 0.148, 'CGirU': 0.601, 'CGziL': -0.530, 'CGziU': -0.047}
CG_limits['A'] = {'CGguL': 1.264, 'CGguU': 4.210, 'CGrgL': 0.937, 'CGrgU': 1.342, 'CGirL': 0.151, 'CGirU': 0.505, 'CGziL': -0.521, 'CGziU': -0.089}
CG_limits['C'] = {'CGguL': 0.385, 'CGguU': 1.990, 'CGrgL':-0.140, 'CGrgU': 0.403, 'CGirL':-0.203, 'CGirU': 0.202, 'CGziL': -0.221, 'CGziU': 0.259}
CG_limits['X'] = {'CGguL': 0.178, 'CGguU': 1.081, 'CGrgL':-0.089, 'CGrgU': 0.481, 'CGirL': 0.136, 'CGirU': 0.478, 'CGziL': -0.182, 'CGziU': 0.187}
CG_limits['L'] = {'CGguL': 0.913, 'CGguU': 2.089, 'CGrgL': 0.253, 'CGrgU': 0.871, 'CGirL': 0.136, 'CGirU': 0.622, 'CGziL': -0.125, 'CGziU': 0.160}
CG_limits['D'] = {'CGguL': 0.085, 'CGguU': 1.717, 'CGrgL':-0.080, 'CGrgU': 0.589, 'CGirL': 0.142, 'CGirU': 0.625, 'CGziL': 0.121, 'CGziU': 0.502}
#1 x sigma
#1.243181211 0.516802843 0.357449432 0.074183133
#0.870581826 0.209380322 0.137706511 -0.216456472
#CG_limits['K'] = {'CGguL': 0.870581826, 'CGguU': 1.243181211, 'CGrgL':0.209380322, 'CGrgU': 0.516802843, 'CGirL': 0.137706511, 'CGirU': 0.357449432, 'CGziL': -0.216456472, 'CGziU': 0.074183133}
#2x sigma
#1.429480904 0.670514103 0.467320892 0.219502936
#0.684282133 0.055669061 0.027835051 -0.361776275
CG_limits['K'] = {'CGguL': 0.684282133, 'CGguU': 1.429480904, 'CGrgL':0.055669061, 'CGrgU': 0.670514103, 'CGirL': 0.027835051, 'CGirU': 0.467320892, 'CGziL': -0.361776275, 'CGziU': 0.219502936}
#asteroid dictionary
asteroids = defaultdict(dict)
#===============================================================================
# 1 1 - 7 moID Unique SDSS moving-object ID
# 2 8 - 13 Run SDSS object IDs, for details see SDSS EDR paper
# 3 14 - 15 Col
# 4 16 - 20 Field
# 5 21 - 26 Object
# 6 27 - 35 rowc Pixel row
# 7 36 - 44 colc Pixel col
# -- Astrometry --
# 8 47 - 59 Time (MJD) Modified Julian Day for the mean observation time
# 9 60 - 70 R.A. J2000 right ascension of the object at the time of the (r band) SDSS observation
# 10 71 - 81 Dec J2000 declination of the object at the time of the (r band) SDSS observation
# 11 82 - 92 Lambda Ecliptic longitude at the time of observation
# 12 93 - 103 Beta Ecliptic latitude at the time of observation
# 13 104 - 115 Phi Distance from the opposition at the time of observation
# 14 117 - 124 vMu The velocity component parallel to the SDSS scanning direction, and its error (deg/day)
# 15 125 - 131 vMu Error
# 16 132 - 139 vNu The velocity component perpendicular to the SDSS scanning direction, and its error (deg/day)
# 17 140 - 146 vNu Error
# 18 147 - 154
# vLambda
# The velocity component parallel to the Ecliptic (deg/day)
# 19 155 - 162
# vBeta
# The velocity component perpendicular to the Ecliptic (deg/day)
# -- Photometry --
# 20 164 - 169 u SDSS u'g'r'i'z' psf magnitudes and corresponding errors
# 21 170 - 174 uErr
# 22 175 - 180 g
# 23 181 - 185 gErr
# 24 186 - 191 r
# 25 192 - 196 rErr
# 26 197 - 202 i
# 27 203 - 207 iErr
# 28 208 - 213 z
# 29 214 - 218 zErr
# 30 219 - 224 a a* color = 0.89 (g - r) + 0.45 (r - i) - 0.57 (see Paper I)
# 31 225 - 229 aErr
# 32 231 - 236 V Johnson-V band magnitude, synthetized from SDSS magnitudes
# 33 237 - 242 B Johnson-B band magnitude, synthetized from SDSS magnitudes
# -- Identification --
# 34 243 - 244 Identification flag Has this moving object been linked to a known asteroid (0/1)? See Paper II.
# 35 245 - 252 Numeration Numeration of the asteroid. If the asteroid is not numbered, or this moving object has not yet been linked to a known asteroid, it's 0.
# 36 253 - 273 Designation Asteroid designation or name. If this moving object has not yet been linked to a known asteroid, it's '-'
# 37 274 - 276
# Detection Counter
# Detection counter of this object in SDSS data
# 38 277 - 279 Total Detection Count Total number of SDSS observations of this asteroid
# 39 280 - 288 Flags Flags that encode SDSSMOC processing information (internal)
# -- Matching information --
# 40 290 - 300 Computed R.A. Predicted position and magnitude at the time of SDSS observation for an associated known object computed using ASTORB data See a note about an error in the first three releases
# 41 301 - 311 Computed Dec
# 42 312 - 317 Computed App. Mag.
# 43 319 - 326 R Heliocentric distance at the time of observation
# 44 327 - 334 Geocentric Geocentric distance at the time of observation
# 45 335 - 340 Phase Phase angle at the time of observation
# -- Osculating elements --
# 46 342 - 352 Catalog ID Identification of the catalog from which the osculating elements and (H, G) values were extracted
# 47 363 - 368 H Absolute magnitude and slope parameter
# 48 369 - 373 G
# 49 374 - 379 Arc Arc of observations used to derive the elements
# 50 380 - 393 Epoch Osculating elements
# 51 394 - 406 a
# 52 407 - 417 e
# 53 418 - 428 i
# 54 429 - 439 Lon. of asc. node
# 55 440 - 450 Arg. of perihelion
# 56 451 - 461 M
# -- Proper elements --
# 57 463 - 483 Proper elements catalog ID Identification of the catalog from which the proper elements were extracted
# 58 484 - 496 a' Proper elements
# 59 497 - 507 e'
# 60 508 - 518 sin(i')
# 61-124 519 - 646 binary processing flags Only since the 3rd release!!
#===============================================================================
#using pandas with a column specification defined above
col_specification =[ (0, 6), (7, 12), (13, 14), (15, 19), (20, 25), (26, 34), (35, 43), (46, 58), (59, 69), (70, 80), (81, 91), (92, 102), (103, 114), (116, 123), (124, 130), (131, 138), (139, 145), (146, 153), (154, 161), (163, 168), (169, 173), (174, 179), (180, 184), (185, 190), (191, 195), (196, 201), (202, 206), (207, 212), (213, 217), (218, 223), (224, 228), (230, 235), (236, 241), (242, 243), (244, 251), (252, 272), (273, 275), (276, 278), (279, 287), (289, 299), (300, 310), (311, 316), (318, 325), (326, 333), (334, 339), (341, 351), (362, 367), (368, 372), (373, 378), (379, 392), (393, 405), (406, 416), (417, 427), (428, 438), (439, 449), (450, 460), (462, 482), (483, 495), (496, 506), (507, 517), (518, 645)]
print 'Reading SDSS MOC data from %s...'%path
#read all lines from MOC 4 data file
#variables to process big ole MOC4 data file
skipRows = 0
nRowsMax = 100000
nRows=nRowsMax
#is this a known moving object?
id_flag = 0
#track observation and unique asteroid count
asteroid_count = 0
observation_count = 0
#log files
log=open('moc4.log.txt', 'w')
log_tax=open('moc4.tax.txt', 'w')
log_tax_final=open('moc4.tax.final.txt', 'w')
#organize the observations by asteroid
observation={}
while nRows >= nRowsMax:
try:
data = pd.read_fwf(path, colspecs=col_specification, skiprows=skipRows, nrows=nRowsMax, header=None)
except:
break
nRows = data.shape[0]
for irow in range(0,nRows):
id_flag = data.iat[irow, 33]
#is this a known asteroid?
if id_flag == 1:
designation = data.iat[irow, 35]
if not asteroids.has_key(designation):
asteroids[designation]={}
asteroids[designation]['numeration'] = data.iat[irow, 34]
asteroids[designation]['observations'] = []
asteroid_count += 1
#add a new observation to this asteroid
observation={}
observation['moID'] = data.iat[irow, 0]
observation['mjd'] = float(data.iat[irow, 7])
observation['u'] = float(data.iat[irow, 19])
observation['uErr'] = float(data.iat[irow, 20])
observation['g'] = float(data.iat[irow, 21])
observation['gErr'] = float(data.iat[irow, 22])
observation['r'] = float(data.iat[irow, 23])
observation['rErr'] = float(data.iat[irow, 24])
observation['i'] = float(data.iat[irow, 25])
observation['iErr'] = float(data.iat[irow, 26])
observation['z'] = float(data.iat[irow, 27])
observation['zErr'] = float(data.iat[irow, 28])
observation['a'] = float(data.iat[irow, 29])
observation['aErr'] = float(data.iat[irow, 30])
observation['V'] = float(data.iat[irow, 31])
observation['B'] = float(data.iat[irow, 32])
observation['Phase'] = float(data.iat[irow, 44])
#print observation['moID'], observation['Phase']
#calc asteroid colors, relative to g-band and with solar color subtracted
#Cxg = mx - mg - (C(solar)x - C(solar)g)
observation['Cug'] = observation['u'] - observation['g'] - solar_color_ug
observation['Cgg'] = -solar_color_gg
observation['Crg'] = observation['r'] - observation['g'] - solar_color_rg
observation['Cig'] = observation['i'] - observation['g'] - solar_color_ig
observation['Czg'] = observation['z'] - observation['g'] - solar_color_zg
#calc asteroid color error
##propagate errors using quadrature, e.g. for Cug, error is sqrt(uErr*uErr+gErr*gErr)??
##observation['CugErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['uErr']*observation['uErr'])
##observation['CggErr'] = observation['gErr']
##observation['CrgErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['rErr']*observation['rErr'])
##observation['CigErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['iErr']*observation['iErr'])
##observation['CzgErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['zErr']*observation['zErr'])
#from the Carvano data, this is what it seems they are doing
observation['CugErr'] = observation['uErr']
observation['CggErr'] = observation['gErr']
observation['CrgErr'] = observation['rErr']
observation['CigErr'] = observation['iErr']
observation['CzgErr'] = observation['zErr']
#calc asteroid log reflectance, relative to g-band
#Cxg = -2.5(logRx-logRg) = -2.5(log(Rx/Rg)) = -2.5*LRx
#LRx = LRxg = -Cxg/2.5
observation['LRug'] = -observation['Cug']/2.5
observation['LRgg'] = 1.0
observation['LRrg'] = -observation['Crg']/2.5
observation['LRig'] = -observation['Cig']/2.5
observation['LRzg'] = -observation['Czg']/2.5
#calc asteroid log reflectance errors by propagating the Cxg errors
observation['LRugErr'] = observation['CugErr']/2.5
observation['LRggErr'] = observation['CggErr']/2.5
observation['LRrgErr'] = observation['CrgErr']/2.5
observation['LRigErr'] = observation['CigErr']/2.5
observation['LRzgErr'] = observation['CzgErr']/2.5
#calc asteroid color gradients, basis of Carvano taxonomy
#CGx = -0.4*(Cxg-C(x-1)g)/(lambdax-lambda(x-1))
observation['CGgu'] = -0.4*(observation['Cgg']-observation['Cug'])/(g_wavelength-u_wavelength)
observation['CGrg'] = -0.4*(observation['Crg']-observation['Cgg'])/(r_wavelength-g_wavelength)
observation['CGir'] = -0.4*(observation['Cig']-observation['Crg'])/(i_wavelength-r_wavelength)
observation['CGzi'] = -0.4*(observation['Czg']-observation['Cig'])/(z_wavelength-i_wavelength)
#observation['CGguErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['uErr']*observation['uErr'])
#observation['CGrgErr'] = math.sqrt(observation['rErr']*observation['rErr']+observation['gErr']*observation['gErr'])
#observation['CGirErr'] = math.sqrt(observation['iErr']*observation['iErr']+observation['rErr']*observation['rErr'])
#observation['CGziErr'] = math.sqrt(observation['zErr']*observation['zErr']+observation['iErr']*observation['iErr'])
#observation['CGguErr'] = observation['gErr'] + observation['uErr']
#observation['CGrgErr'] = observation['rErr'] + observation['gErr']
#observation['CGirErr'] = observation['iErr'] + observation['rErr']
#observation['CGziErr'] = observation['zErr'] + observation['iErr']
#observation['CGguErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['uErr']*observation['uErr'])*0.4/(g_wavelength-u_wavelength)
#observation['CGrgErr'] = math.sqrt(observation['rErr']*observation['rErr']+observation['gErr']*observation['gErr'])*0.4/(r_wavelength-g_wavelength)
#observation['CGirErr'] = math.sqrt(observation['iErr']*observation['iErr']+observation['rErr']*observation['rErr'])*0.4/(i_wavelength-r_wavelength)
#observation['CGziErr'] = math.sqrt(observation['zErr']*observation['zErr']+observation['iErr']*observation['iErr'])*0.4/(z_wavelength-i_wavelength)
observation['CGguErr'] = math.sqrt(observation['LRggErr']*observation['LRggErr']+observation['LRugErr']*observation['LRugErr'])/(g_wavelength-u_wavelength)
observation['CGrgErr'] = math.sqrt(observation['LRrgErr']*observation['LRrgErr']+observation['LRggErr']*observation['LRggErr'])/(r_wavelength-g_wavelength)
observation['CGirErr'] = math.sqrt(observation['LRigErr']*observation['LRigErr']+observation['LRrgErr']*observation['LRrgErr'])/(i_wavelength-r_wavelength)
observation['CGziErr'] = math.sqrt(observation['LRzgErr']*observation['LRzgErr']+observation['LRigErr']*observation['LRigErr'])/(z_wavelength-i_wavelength)
#observation['CGguErr'] = (observation['gErr']+observation['uErr'])*0.4/(g_wavelength-u_wavelength)
#observation['CGrgErr'] = (observation['rErr']+observation['gErr'])*0.4/(r_wavelength-g_wavelength)
#observation['CGirErr'] = (observation['iErr']+observation['rErr'])*0.4/(i_wavelength-r_wavelength)
#observation['CGziErr'] = (observation['zErr']+observation['iErr'])*0.4/(z_wavelength-i_wavelength)
#
#this is for phase angle analysis (Carvano et al. 2015)
#color gradients based on r'
observation['Cur'] = observation['u'] - observation['r'] - solar_color_ur
observation['Cgr'] = observation['g'] - observation['r'] - solar_color_gr
observation['Crr'] = 0.0 #-solar_color_rr
observation['Cir'] = observation['i'] - observation['r'] - solar_color_ir
observation['Czr'] = observation['z'] - observation['r'] - solar_color_zr
#from the Carvano data, this is what it seems they are doing
observation['CurErr'] = observation['uErr']
observation['CgrErr'] = observation['gErr']
observation['CrrErr'] = observation['rErr']
observation['CirErr'] = observation['iErr']
observation['CzrErr'] = observation['zErr']
#calc asteroid reflectance, relative to r-band
#Cxr = -2.5(logRx-logRr) = -2.5(log(Rx/Rr))
#Rx/Rr = 10^(-Cxr/2.5)
observation['Rur'] = pow(10,-observation['Cur']/2.5)
observation['Rgr'] = pow(10, -observation['Cgr']/2.5)
observation['Rrr'] = 1.0
observation['Rir'] = pow(10, -observation['Cir']/2.5)
observation['Rzr'] = pow(10, -observation['Czr']/2.5)
#calc slope and bd parameters from Carvano et al. 2015
#eq 1: Rir-Rgr/(lambdai-lambdag)
#eq 2: Rzr-Rir
observation['slope'] = (observation['Rir']-observation['Rgr'])/(i_wavelength-g_wavelength)
observation['bd'] = observation['Rzr'] - observation['Rir']
#calc asteroid log reflectance errors by propagating the Cxg errors
#observation['RurErr'] = ?
#observation['RgrErr'] = ?
#observation['RrrErr'] = ?
#observation['RirErr'] = ?
#observation['RzrErr'] = ?
#
asteroids[designation]['observations'].append(observation)
#print asteroids[designation]
skipRows += nRows
print 'Read %d row(s).'%(skipRows)
print 'Found %d asteroid(s).'%asteroid_count
print 'Calculating taxonomic classes for each observation...'
log_tax.write('%s,%s,%s,%s,%s,%s,%s,%s\n'%('designation', 'moid', 'phase', 'slope', 'bd', 'class', 'score', 'type'))
for designation in asteroids:
log.write('%s\n'%designation)
print 'Processing observations for %s...'%designation
for observation in asteroids[designation]['observations']:
log.write('\t%s\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\n'%('moID', 'LRug', 'LRugErr', 'LRgg', 'LRggErr', 'LRrg', 'LRrgErr', 'LRig', 'LRigErr', 'LRzg', 'LRzgErr'))
log.write('\t%s\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n'%(observation['moID'], observation['LRug'], observation['LRugErr'], observation['LRgg'], observation['LRggErr'], observation['LRrg'], observation['LRrgErr'], observation['LRig'], observation['LRigErr'], observation['LRzg'], observation['LRzgErr']))
log.write('\t%s\t\t%s\t\t%s\t\t%s\n'%('CGgu', 'CGrg', 'CGir', 'CGzi'))
log.write('\t%f\t%f\t%f\t%f\n'%(observation['CGgu'], observation['CGrg'], observation['CGir'], observation['CGzi']))
log.write('\t%s\t\t%s\t\t%s\t\t%s\n'%('CGguErr', 'CGrgErr', 'CGirErr', 'CGziErr'))
log.write('\t%f\t%f\t%f\t%f\n'%(observation['CGguErr'], observation['CGrgErr'], observation['CGirErr'], observation['CGziErr']))
#for this observation, loop through the limits for each taxonomic type
CG_cdf={}
CG_cdf_sum = 0
log.write('\t%s\t%s\t\t%s\t\t%s\t\t%s\t\t%s\n'%('tax', 'score', 'scoregu', 'scorerg', 'scoreir', 'scorezi'))
for taxclass in CG_limits:
CGgu_cdf = CGrg_cdf = CGir_cdf = CGzi_cdf = 0.0
#create normal probability density functions for each color gradient, CG; mean is CG value and stddev is error; cdf = cumulative density function
if observation['CGguErr'] > 0:
CGgu_cdf = stats.norm.cdf(CG_limits[taxclass]['CGguU'], loc=observation['CGgu'], scale=observation['CGguErr'])-stats.norm.cdf(CG_limits[taxclass]['CGguL'], loc=observation['CGgu'], scale=observation['CGguErr'])
#print observation['CGgu'], observation['CGguErr'], CG_limits[taxclass]['CGguL'], CG_limits[taxclass]['CGguU'], stats.norm.cdf(CG_limits[taxclass]['CGguL'], loc=observation['CGgu'], scale=observation['CGguErr']), stats.norm.cdf(CG_limits[taxclass]['CGguU'], loc=observation['CGgu'], scale=observation['CGguErr'])
if observation['CGrgErr'] > 0:
CGrg_cdf = stats.norm.cdf(CG_limits[taxclass]['CGrgU'], loc=observation['CGrg'], scale=observation['CGrgErr'])-stats.norm.cdf(CG_limits[taxclass]['CGrgL'], loc=observation['CGrg'], scale=observation['CGrgErr'])
#print stats.norm.cdf(CG_limits[taxclass]['CGrgU'], loc=observation['CGrg'], scale=observation['CGrgErr']), stats.norm.cdf(CG_limits[taxclass]['CGrgL'], loc=observation['CGrg'], scale=observation['CGrgErr'])
if observation['CGirErr'] > 0:
CGir_cdf = stats.norm.cdf(CG_limits[taxclass]['CGirU'], loc=observation['CGir'], scale=observation['CGirErr'])-stats.norm.cdf(CG_limits[taxclass]['CGirL'], loc=observation['CGir'], scale=observation['CGirErr'])
#print stats.norm.cdf(CG_limits[taxclass]['CGirU'], loc=observation['CGir'], scale=observation['CGirErr']), stats.norm.cdf(CG_limits[taxclass]['CGirL'], loc=observation['CGir'], scale=observation['CGirErr'])
if observation['CGziErr'] > 0:
CGzi_cdf = stats.norm.cdf(CG_limits[taxclass]['CGziU'], loc=observation['CGzi'], scale=observation['CGziErr'])-stats.norm.cdf(CG_limits[taxclass]['CGziL'], loc=observation['CGzi'], scale=observation['CGziErr'])
#print stats.norm.cdf(CG_limits[taxclass]['CGziU'], loc=observation['CGzi'], scale=observation['CGziErr']), stats.norm.cdf(CG_limits[taxclass]['CGziL'], loc=observation['CGzi'], scale=observation['CGziErr'])
CG_cdf[taxclass] = CGgu_cdf * CGrg_cdf * CGir_cdf * CGzi_cdf
CG_cdf_sum += CG_cdf[taxclass]
log.write('\t%s\t%f\t%f\t%f\t%f\t%f\n'%(taxclass, CG_cdf[taxclass], CGgu_cdf, CGrg_cdf, CGir_cdf, CGzi_cdf))
#plt.text(0, 0, '%s\t%s'%(observation['moID'],taxclass))
#uncomment to show plots!
#plt.show()
CG_cdf_max = 0.0
CG_cdf_max_taxclass = ''
log.write('\t%s\t%s\ttype\n'%('tax', '%score'))
for taxclass in CG_cdf:
if CG_cdf[taxclass] > CG_cdf_max:
CG_cdf_max_taxclass = taxclass
CG_cdf_max = CG_cdf[taxclass]
#print taxclass, CG_cdf[taxclass]/CG_cdf_sum*100
try:
if CG_cdf_sum > 0:
log.write('\t%s\t%f\n'%(taxclass, (CG_cdf[taxclass]/CG_cdf_sum*100)))
else:
log.write('\t%s\t%f\n'%(taxclass, 0.0))
except:
log.write('ERROR')
if CG_cdf_sum > 0 and CG_cdf_max/CG_cdf_sum >= 0.6:
#CGzi_ave = (CG_limits[CG_cdf_max_taxclass]['CGziU']+CG_limits[CG_cdf_max_taxclass]['CGziL'])/2.0
log_tax.write('%s,%s,%s,%f,%f,%s,%f,single\n'%(designation, observation['moID'], observation['Phase'], observation['slope'], observation['bd'], CG_cdf_max_taxclass, CG_cdf_max))
log.write('\t%s\t%s\n'%('tax', 'score'))
log.write('\t%s\t%f\n'%(CG_cdf_max_taxclass, CG_cdf_max))
#save final tax and score
observation['class'] = CG_cdf_max_taxclass
observation['score'] = CG_cdf_max
else:
comboclass = ''
combocount = 0
comboscoresum = 0.0
comboscore = 0.0
CGzi_ave = 0.0
for taxclass in CG_cdf:
if CG_cdf_sum > 0 and CG_cdf[taxclass]/CG_cdf_sum >= 0.3:
comboclass += taxclass
combocount += 1
comboscoresum += CG_cdf[taxclass]
CGzi_ave += (CG_limits[taxclass]['CGziU']+CG_limits[taxclass]['CGziL'])/2.0
if combocount > 0:
comboscore = comboscoresum/combocount
CGzi_ave = CGzi_ave/combocount
else:
comboclass = 'U'
log_tax.write('%s,%s,%s,%f,%f,%s,%f,combo\n'%(designation, observation['moID'], observation['Phase'], observation['slope'], observation['bd'], comboclass, comboscore))
#log_tax.write('%s\t%s\t%s\t%s\t%f\tcombo\n'%(designation, observation['moID'], observation['Phase'], comboclass, comboscore))
log.write('\tcombo\n')
log.write('\t%s\t%s\n'%('tax', 'score'))
log.write('\t%s\t%f\n'%(comboclass, comboscore))
#save final tax and score
observation['class'] = comboclass
observation['score'] = comboscore
log.write('\t***************************************\n')
#create dictionary to hold asteroid taxonomy counts and high scores
#include U class too
tax_classes = {}
for key in CG_limits:
tax_classes[key] = {}
tax_classes['U'] = {}
print 'Calculating final taxonomic classes for each asteroid...'
for designation in asteroids:
#init this asteroid's counts and high scores
for key in tax_classes:
tax_classes[key]['count'] = 0
tax_classes[key]['high_score'] = 0.0
pearson_rank_slope = None
pearson_rank_bd = None
if len(asteroids[designation]['observations']) > 2:
phase = []
slope = []
bd = []
for observation in asteroids[designation]['observations']:
phase.append(observation['Phase'])
slope.append(observation['slope'])
bd.append(observation['bd'])
#pearson_rank_slope = stats.pearsonr(phase, slope)
#pearson_rank_bd = stats.pearsonr(phase, bd)
#print pearson_rank_slope, pearson_rank_bd
for observation in asteroids[designation]['observations']:
for tax_class in observation['class']:
tax_classes[tax_class]['count'] += 1
if observation['score'] > tax_classes[tax_class]['high_score']:
tax_classes[tax_class]['high_score'] = observation['score']
#print designation, observation['class'], tax_classes
max_count = 0
for key in tax_classes:
#print key, tax_classes[key]
if tax_classes[key]['count'] > max_count:
max_count = tax_classes[key]['count']
#print max_count
max_high_score = 0
final_tax_class = ''
for key in tax_classes:
if tax_classes[key]['count'] == max_count:
final_tax_class += key
if tax_classes[key]['high_score'] > max_high_score:
max_high_score = tax_classes[key]['high_score']
log_tax_final.write('%s\t%s\t%f\n'%(designation, final_tax_class, max_high_score))
log.close()
log_tax.close()
log_tax_final.close()
# 1 1 - 7 moID Unique SDSS moving-object ID
# 8 47 - 59 Time (MJD) Modified Julian Day for the mean observation time
# 34 243 - 244 Identification flag Has this moving object been linked to a known asteroid (0/1)? See Paper II.
# 35 245 - 252 Numeration Numeration of the asteroid. If the asteroid is not numbered, or this moving object has not yet been linked to a known asteroid, it's 0.
# 36 253 - 273 Designation Asteroid designation or name. If this moving object has not yet been linked to a known asteroid, it's '-'
# 20 164 - 169 u SDSS u'g'r'i'z' psf magnitudes and corresponding errors
# 21 170 - 174 uErr
# 22 175 - 180 g
# 23 181 - 185 gErr
# 24 186 - 191 r
# 25 192 - 196 rErr
# 26 197 - 202 i
# 27 203 - 207 iErr
# 28 208 - 213 z
# 29 214 - 218 zErr
# 30 219 - 224 a a* color = 0.89 (g - r) + 0.45 (r - i) - 0.57 (see Paper I)
# 31 225 - 229 aErr
# 32 231 - 236 V Johnson-V band magnitude, synthetized from SDSS magnitudes
# 33 237 - 242 B Johnson-B band magnitude, synthetized from SDSS magnitudes
|
|
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import unittest
import mock
from opencensus.ext.azure import log_exporter
TEST_FOLDER = os.path.abspath('.test.logs')
def setUpModule():
os.makedirs(TEST_FOLDER)
def tearDownModule():
shutil.rmtree(TEST_FOLDER)
def throw(exc_type, *args, **kwargs):
def func(*_args, **_kwargs):
raise exc_type(*args, **kwargs)
return func
class CustomLogHandler(log_exporter.BaseLogHandler):
def __init__(self, max_batch_size, callback):
self.export_interval = 1
self.max_batch_size = max_batch_size
self.callback = callback
super(CustomLogHandler, self).__init__(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
)
def export(self, batch):
return self.callback(batch)
class TestBaseLogHandler(unittest.TestCase):
def setUp(self):
os.environ["APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"] = "true"
return super(TestBaseLogHandler, self).setUp()
def tearDown(self):
del os.environ["APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"]
return super(TestBaseLogHandler, self).tearDown()
def test_basic(self):
logger = logging.getLogger(self.id())
handler = CustomLogHandler(10, lambda batch: None)
logger.addHandler(handler)
logger.warning('TEST')
handler.flush()
logger.warning('TEST')
logger.removeHandler(handler)
handler.close()
def test_export_exception(self):
logger = logging.getLogger(self.id())
handler = CustomLogHandler(1, throw(Exception))
logger.addHandler(handler)
logger.warning('TEST')
logger.removeHandler(handler)
handler.flush()
handler.close()
class TestAzureLogHandler(unittest.TestCase):
def setUp(self):
os.environ["APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"] = "true"
return super(TestAzureLogHandler, self).setUp()
def tearDown(self):
del os.environ["APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"]
return super(TestAzureLogHandler, self).tearDown()
def test_ctor(self):
self.assertRaises(ValueError, lambda: log_exporter.AzureLogHandler(connection_string="", instrumentation_key="")) # noqa: E501
def test_invalid_sampling_rate(self):
with self.assertRaises(ValueError):
log_exporter.AzureLogHandler(
enable_stats_metrics=False,
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
logging_sampling_rate=4.0,
)
def test_init_handler_with_proxies(self):
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
proxies='{"https":"https://test-proxy.com"}',
)
self.assertEqual(
handler.options.proxies,
'{"https":"https://test-proxy.com"}',
)
def test_init_handler_with_queue_capacity(self):
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
queue_capacity=500,
)
self.assertEqual(
handler.options.queue_capacity,
500
)
self.assertEqual(
handler._worker._src._queue.maxsize,
500
)
@mock.patch('requests.post', return_value=mock.Mock())
def test_exception(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
logger.addHandler(handler)
try:
return 1 / 0 # generate a ZeroDivisionError
except Exception:
logger.exception('Captured an exception.')
handler.close()
self.assertEqual(len(requests_mock.call_args_list), 1)
post_body = requests_mock.call_args_list[0][1]['data']
self.assertTrue('ZeroDivisionError' in post_body)
@mock.patch('requests.post', return_value=mock.Mock())
def test_exception_with_custom_properties(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
logger.addHandler(handler)
try:
return 1 / 0 # generate a ZeroDivisionError
except Exception:
properties = {
'custom_dimensions':
{
'key_1': 'value_1',
'key_2': 'value_2'
}
}
logger.exception('Captured an exception.', extra=properties)
handler.close()
self.assertEqual(len(requests_mock.call_args_list), 1)
post_body = requests_mock.call_args_list[0][1]['data']
self.assertTrue('ZeroDivisionError' in post_body)
self.assertTrue('key_1' in post_body)
self.assertTrue('key_2' in post_body)
@mock.patch('requests.post', return_value=mock.Mock())
def test_export_empty(self, request_mock):
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
handler._export([])
self.assertEqual(len(os.listdir(handler.storage.path)), 0)
handler.close()
@mock.patch('opencensus.ext.azure.log_exporter'
'.AzureLogHandler.log_record_to_envelope')
def test_export_failure(self, log_record_to_envelope_mock):
log_record_to_envelope_mock.return_value = ['bar']
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
with mock.patch('opencensus.ext.azure.log_exporter'
'.AzureLogHandler._transmit') as transmit:
transmit.return_value = 10
handler._export(['foo'])
self.assertEqual(len(os.listdir(handler.storage.path)), 1)
self.assertIsNone(handler.storage.get())
handler.close()
def test_log_record_to_envelope(self):
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
envelope = handler.log_record_to_envelope(mock.MagicMock(
exc_info=None,
levelno=10,
))
self.assertEqual(
envelope.iKey,
'12345678-1234-5678-abcd-12345678abcd')
handler.close()
@mock.patch('requests.post', return_value=mock.Mock())
def test_log_record_with_custom_properties(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
logger.addHandler(handler)
logger.warning('action', extra={
'custom_dimensions':
{
'key_1': 'value_1',
'key_2': 'value_2'
}
})
handler.close()
post_body = requests_mock.call_args_list[0][1]['data']
self.assertTrue('action' in post_body)
self.assertTrue('key_1' in post_body)
self.assertTrue('key_2' in post_body)
@mock.patch('requests.post', return_value=mock.Mock())
def test_log_with_invalid_custom_properties(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
logger.addHandler(handler)
logger.warning('action_1_%s', None)
logger.warning('action_2_%s', 'arg', extra={
'custom_dimensions': 'not_a_dict'
})
logger.warning('action_3_%s', 'arg', extra={
'notcustom_dimensions': {'key_1': 'value_1'}
})
handler.close()
self.assertEqual(len(os.listdir(handler.storage.path)), 0)
post_body = requests_mock.call_args_list[0][1]['data']
self.assertTrue('action_1_' in post_body)
self.assertTrue('action_2_arg' in post_body)
self.assertTrue('action_3_arg' in post_body)
self.assertFalse('not_a_dict' in post_body)
self.assertFalse('key_1' in post_body)
@mock.patch('requests.post', return_value=mock.Mock())
def test_log_record_sampled(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
logging_sampling_rate=1.0,
)
logger.addHandler(handler)
logger.warning('Hello_World')
logger.warning('Hello_World2')
logger.warning('Hello_World3')
logger.warning('Hello_World4')
handler.close()
post_body = requests_mock.call_args_list[0][1]['data']
self.assertTrue('Hello_World' in post_body)
self.assertTrue('Hello_World2' in post_body)
self.assertTrue('Hello_World3' in post_body)
self.assertTrue('Hello_World4' in post_body)
@mock.patch('requests.post', return_value=mock.Mock())
def test_log_record_not_sampled(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureLogHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
logging_sampling_rate=0.0,
)
logger.addHandler(handler)
logger.warning('Hello_World')
logger.warning('Hello_World2')
logger.warning('Hello_World3')
logger.warning('Hello_World4')
handler.close()
self.assertFalse(requests_mock.called)
class TestAzureEventHandler(unittest.TestCase):
def setUp(self):
os.environ["APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"] = "true"
return super(TestAzureEventHandler, self).setUp()
def tearDown(self):
del os.environ["APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"]
return super(TestAzureEventHandler, self).setUp()
def test_ctor(self):
self.assertRaises(ValueError, lambda: log_exporter.AzureEventHandler(connection_string="", instrumentation_key="")) # noqa: E501
def test_invalid_sampling_rate(self):
with self.assertRaises(ValueError):
log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
logging_sampling_rate=4.0,
)
def test_init_handler_with_proxies(self):
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
proxies='{"https":"https://test-proxy.com"}',
)
self.assertEqual(
handler.options.proxies,
'{"https":"https://test-proxy.com"}',
)
def test_init_handler_with_queue_capacity(self):
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
queue_capacity=500,
)
self.assertEqual(
handler.options.queue_capacity,
500
)
# pylint: disable=protected-access
self.assertEqual(
handler._worker._src._queue.maxsize,
500
)
@mock.patch('requests.post', return_value=mock.Mock())
def test_exception(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
logger.addHandler(handler)
try:
return 1 / 0 # generate a ZeroDivisionError
except Exception:
logger.exception('Captured an exception.')
handler.close()
self.assertEqual(len(requests_mock.call_args_list), 1)
post_body = requests_mock.call_args_list[0][1]['data']
self.assertTrue('ZeroDivisionError' in post_body)
@mock.patch('requests.post', return_value=mock.Mock())
def test_exception_with_custom_properties(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
logger.addHandler(handler)
try:
return 1 / 0 # generate a ZeroDivisionError
except Exception:
properties = {
'custom_dimensions':
{
'key_1': 'value_1',
'key_2': 'value_2'
}
}
logger.exception('Captured an exception.', extra=properties)
handler.close()
self.assertEqual(len(requests_mock.call_args_list), 1)
post_body = requests_mock.call_args_list[0][1]['data']
self.assertTrue('ZeroDivisionError' in post_body)
self.assertTrue('key_1' in post_body)
self.assertTrue('key_2' in post_body)
@mock.patch('requests.post', return_value=mock.Mock())
def test_export_empty(self, request_mock):
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
handler._export([])
self.assertEqual(len(os.listdir(handler.storage.path)), 0)
handler.close()
@mock.patch('opencensus.ext.azure.log_exporter'
'.AzureEventHandler.log_record_to_envelope')
def test_export_failure(self, log_record_to_envelope_mock):
log_record_to_envelope_mock.return_value = ['bar']
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
with mock.patch('opencensus.ext.azure.log_exporter'
'.AzureEventHandler._transmit') as transmit:
transmit.return_value = 10
handler._export(['foo'])
self.assertEqual(len(os.listdir(handler.storage.path)), 1)
self.assertIsNone(handler.storage.get())
handler.close()
def test_log_record_to_envelope(self):
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
envelope = handler.log_record_to_envelope(mock.MagicMock(
exc_info=None,
levelno=10,
))
self.assertEqual(
envelope.iKey,
'12345678-1234-5678-abcd-12345678abcd')
handler.close()
@mock.patch('requests.post', return_value=mock.Mock())
def test_log_record_with_custom_properties(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
logger.addHandler(handler)
logger.warning('action', extra={
'custom_dimensions':
{
'key_1': 'value_1',
'key_2': 'value_2'
}
})
handler.close()
post_body = requests_mock.call_args_list[0][1]['data']
self.assertTrue('action' in post_body)
self.assertTrue('key_1' in post_body)
self.assertTrue('key_2' in post_body)
@mock.patch('requests.post', return_value=mock.Mock())
def test_log_with_invalid_custom_properties(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
storage_path=os.path.join(TEST_FOLDER, self.id()),
)
logger.addHandler(handler)
logger.warning('action_1_%s', None)
logger.warning('action_2_%s', 'arg', extra={
'custom_dimensions': 'not_a_dict'
})
logger.warning('action_3_%s', 'arg', extra={
'notcustom_dimensions': {'key_1': 'value_1'}
})
handler.close()
self.assertEqual(len(os.listdir(handler.storage.path)), 0)
post_body = requests_mock.call_args_list[0][1]['data']
self.assertTrue('action_1_' in post_body)
self.assertTrue('action_2_arg' in post_body)
self.assertTrue('action_3_arg' in post_body)
self.assertFalse('not_a_dict' in post_body)
self.assertFalse('key_1' in post_body)
@mock.patch('requests.post', return_value=mock.Mock())
def test_log_record_sampled(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
logging_sampling_rate=1.0,
)
logger.addHandler(handler)
logger.warning('Hello_World')
logger.warning('Hello_World2')
logger.warning('Hello_World3')
logger.warning('Hello_World4')
handler.close()
post_body = requests_mock.call_args_list[0][1]['data']
self.assertTrue('Hello_World' in post_body)
self.assertTrue('Hello_World2' in post_body)
self.assertTrue('Hello_World3' in post_body)
self.assertTrue('Hello_World4' in post_body)
@mock.patch('requests.post', return_value=mock.Mock())
def test_log_record_not_sampled(self, requests_mock):
logger = logging.getLogger(self.id())
handler = log_exporter.AzureEventHandler(
instrumentation_key='12345678-1234-5678-abcd-12345678abcd',
logging_sampling_rate=0.0,
)
logger.addHandler(handler)
logger.warning('Hello_World')
logger.warning('Hello_World2')
logger.warning('Hello_World3')
logger.warning('Hello_World4')
handler.close()
self.assertFalse(requests_mock.called)
|
|
# encoding=utf-8
# Generated by cpy
# 2015-04-20 14:38:56.338378
import os, sys
from sys import stdin, stdout
import socket
class SSDB_Response(object):
pass
def __init__(this, code='', data_or_message=None):
pass
this.code = code
this.data = None
this.message = None
if code=='ok':
pass
this.data = data_or_message
else:
pass
if isinstance(data_or_message, list):
pass
if len(data_or_message)>0:
pass
this.message = data_or_message[0]
else:
pass
this.message = data_or_message
def __repr__(this):
pass
return ((((str(this.code) + ' ') + str(this.message)) + ' ') + str(this.data))
def ok(this):
pass
return this.code=='ok'
def not_found(this):
pass
return this.code=='not_found'
class SSDB(object):
pass
def __init__(this, host, port):
pass
this.recv_buf = ''
this._closed = False
this.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
this.sock.connect(tuple([host, port]))
this.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
def close(this):
pass
if not (this._closed):
pass
this.sock.close()
this._closed = True
def closed(this):
pass
return this._closed
def request(this, cmd, params=None):
pass
if params==None:
pass
params = []
params = ([cmd] + params)
this.send(params)
resp = this.recv()
if resp==None:
pass
return SSDB_Response('error', 'Unknown error')
if len(resp)==0:
pass
return SSDB_Response('disconnected', 'Connection closed')
# {{{ switch: cmd
_continue_1 = False
while True:
if False or ((cmd) == 'ping') or ((cmd) == 'qset') or ((cmd) == 'set') or ((cmd) == 'zset') or ((cmd) == 'hset') or ((cmd) == 'qpush') or ((cmd) == 'qpush_front') or ((cmd) == 'qpush_back') or ((cmd) == 'del') or ((cmd) == 'zdel') or ((cmd) == 'hdel') or ((cmd) == 'multi_set') or ((cmd) == 'multi_del') or ((cmd) == 'multi_hset') or ((cmd) == 'multi_hdel') or ((cmd) == 'multi_zset') or ((cmd) == 'multi_zdel') or ((cmd) == 'scan_del'):
pass
if resp[0]=='ok':
pass
if len(resp)>1:
pass
return SSDB_Response(resp[0], int(resp[1]))
else:
pass
return SSDB_Response(resp[0], 1)
else:
pass
return SSDB_Response(resp[0], resp[1 : ])
break
if False or ((cmd) == 'substr') or ((cmd) == 'get') or ((cmd) == 'getset') or ((cmd) == 'hget') or ((cmd) == 'qfront') or ((cmd) == 'qback') or ((cmd) == 'qget'):
pass
if resp[0]=='ok':
pass
if len(resp)==2:
pass
return SSDB_Response('ok', resp[1])
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0], resp[1 : ])
break
if False or ((cmd) == 'qpop') or ((cmd) == 'qpop_front') or ((cmd) == 'qpop_back'):
pass
if resp[0]=='ok':
pass
size = 1
try:
pass
size = int(params[2])
except Exception , e:
pass
if size==1:
pass
if len(resp)==2:
pass
return SSDB_Response('ok', resp[1])
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response('ok', resp[1 : ])
else:
pass
return SSDB_Response(resp[0], resp[1 : ])
break
if False or ((cmd) == 'dbsize') or ((cmd) == 'getbit') or ((cmd) == 'setbit') or ((cmd) == 'countbit') or ((cmd) == 'bitcount') or ((cmd) == 'strlen') or ((cmd) == 'ttl') or ((cmd) == 'expire') or ((cmd) == 'setnx') or ((cmd) == 'incr') or ((cmd) == 'decr') or ((cmd) == 'zincr') or ((cmd) == 'zdecr') or ((cmd) == 'hincr') or ((cmd) == 'hdecr') or ((cmd) == 'hsize') or ((cmd) == 'zsize') or ((cmd) == 'qsize') or ((cmd) == 'zget') or ((cmd) == 'zrank') or ((cmd) == 'zrrank') or ((cmd) == 'zsum') or ((cmd) == 'zcount') or ((cmd) == 'zavg') or ((cmd) == 'zremrangebyrank') or ((cmd) == 'zremrangebyscore') or ((cmd) == 'hclear') or ((cmd) == 'zclear') or ((cmd) == 'qclear') or ((cmd) == 'qpush') or ((cmd) == 'qpush_front') or ((cmd) == 'qpush_back') or ((cmd) == 'qtrim_front') or ((cmd) == 'qtrim_back'):
pass
if resp[0]=='ok':
pass
if len(resp)==2:
pass
try:
pass
if cmd=='zavg':
pass
val = float(resp[1])
else:
pass
val = int(resp[1])
return SSDB_Response('ok', val)
except Exception , e:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0], resp[1 : ])
break
if False or ((cmd) == 'keys') or ((cmd) == 'rkeys') or ((cmd) == 'zkeys') or ((cmd) == 'zrkeys') or ((cmd) == 'hkeys') or ((cmd) == 'hrkeys') or ((cmd) == 'list') or ((cmd) == 'hlist') or ((cmd) == 'hrlist') or ((cmd) == 'zlist') or ((cmd) == 'zrlist'):
pass
return SSDB_Response(resp[0], resp[1 : ])
break
if False or ((cmd) == 'scan') or ((cmd) == 'rscan') or ((cmd) == 'hgetall') or ((cmd) == 'hscan') or ((cmd) == 'hrscan'):
pass
if resp[0]=='ok':
pass
if len(resp) % 2==1:
pass
data = {'index': [],'items': {},}
i = 1
while i<len(resp):
pass
k = resp[i]
v = resp[(i + 1)]
data['index'].append(k)
data['items'][k] = v
pass
i += 2
return SSDB_Response('ok', data)
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0], resp[1 : ])
break
if False or ((cmd) == 'zscan') or ((cmd) == 'zrscan') or ((cmd) == 'zrange') or ((cmd) == 'zrrange') or ((cmd) == 'zpop_front') or ((cmd) == 'zpop_back'):
pass
if resp[0]=='ok':
pass
if len(resp) % 2==1:
pass
data = {'index': [],'items': {},}
i = 1
while i<len(resp):
pass
k = resp[i]
v = resp[(i + 1)]
try:
pass
v = int(v)
except Exception , e:
pass
v = - (1)
data['index'].append(k)
data['items'][k] = v
pass
i += 2
return SSDB_Response('ok', data)
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0], resp[1 : ])
break
if False or ((cmd) == 'auth') or ((cmd) == 'exists') or ((cmd) == 'hexists') or ((cmd) == 'zexists'):
pass
if resp[0]=='ok':
pass
data = False
if len(resp)>=2:
pass
if resp[1]=='1':
pass
data = True
return SSDB_Response(resp[0], data)
else:
pass
return SSDB_Response(resp[0], resp[1 : ])
break
if False or ((cmd) == 'multi_exists') or ((cmd) == 'multi_hexists') or ((cmd) == 'multi_zexists'):
pass
if resp[0]=='ok':
pass
data = {}
if len(resp) % 2==1:
pass
i = 1
while i<len(resp):
pass
k = resp[i]
if resp[(i + 1)]=='1':
pass
v = True
else:
pass
v = False
data[k] = v
pass
i += 2
return SSDB_Response('ok', data)
else:
pass
return SSDB_Response(resp[0], resp[1 : ])
break
if False or ((cmd) == 'multi_get') or ((cmd) == 'multi_hget'):
pass
if resp[0]=='ok':
pass
if len(resp) % 2==1:
pass
data = {}
i = 1
while i<len(resp):
pass
k = resp[i]
v = resp[(i + 1)]
data[k] = v
pass
i += 2
return SSDB_Response('ok', data)
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0], resp[1 : ])
break
if False or ((cmd) == 'multi_hsize') or ((cmd) == 'multi_zsize') or ((cmd) == 'multi_zget'):
pass
if resp[0]=='ok':
pass
if len(resp) % 2==1:
pass
data = {}
i = 1
while i<len(resp):
pass
k = resp[i]
v = int(resp[(i + 1)])
data[k] = v
pass
i += 2
return SSDB_Response('ok', data)
else:
pass
return SSDB_Response('server_error', 'Invalid response')
else:
pass
return SSDB_Response(resp[0], resp[1 : ])
break
### default
return SSDB_Response(resp[0], resp[1 : ])
break
break
if _continue_1:
continue
# }}} switch
return SSDB_Response('error', 'Unknown error')
def send(this, data):
pass
ps = []
_cpy_r_0 = _cpy_l_1 = data
if type(_cpy_r_0).__name__ == 'dict': _cpy_b_3=True; _cpy_l_1=_cpy_r_0.iterkeys()
else: _cpy_b_3=False;
for _cpy_k_2 in _cpy_l_1:
if _cpy_b_3: p=_cpy_r_0[_cpy_k_2]
else: p=_cpy_k_2
pass
p = str(p)
ps.append(str(len(p)))
ps.append(p)
nl = '\n'
s = (nl.join(ps) + '\n\n')
try:
pass
while True:
pass
ret = this.sock.send(s)
if ret==0:
pass
return - (1)
s = s[ret : ]
if len(s)==0:
pass
break
except socket.error , e:
pass
return - (1)
return ret
def net_read(this):
pass
try:
pass
data = this.sock.recv(1024 * 8)
except Exception , e:
pass
data = ''
if data=='':
pass
this.close()
return 0
this.recv_buf += data
return len(data)
def recv(this):
pass
while True:
pass
ret = this.parse()
if ret==None:
pass
if this.net_read()==0:
pass
return []
else:
pass
return ret
def parse(this):
pass
ret = []
spos = 0
epos = 0
while True:
pass
spos = epos
epos = this.recv_buf.find('\n', spos)
if epos==- (1):
pass
break
epos += 1
line = this.recv_buf[spos : epos]
spos = epos
if line.strip()=='':
pass
if len(ret)==0:
pass
continue
else:
pass
this.recv_buf = this.recv_buf[spos : ]
return ret
try:
pass
num = int(line)
except Exception , e:
pass
return []
epos = (spos + num)
if epos>len(this.recv_buf):
pass
break
data = this.recv_buf[spos : epos]
ret.append(data)
spos = epos
epos = this.recv_buf.find('\n', spos)
if epos==- (1):
pass
break
epos += 1
return None
|
|
"""
Collection of functions to to simulate the dynamics of water penetration
into a multi-layer package.
The user input (typically from a GUI) defines the types of layers, such as
bottle, bag, drum, ...etc. and the components inside each layer such as
desiccant (e.g. slica), tablets, powder, ...etc. The module looks up auxilary
data from a system file to calculate the permeability of each layer and the
moisture isothers for the components. With the extra auxillary data, the
moisture dynamics are simulated in the package.
Functions:
simulate(package, rh_ext, duration) - Main function to simulate dynamics.
make_package(layers) - Creates a variable with minimal neccessary parameters
for the simulation.
get_package_constants() - Retrieves properties for defined packages.
get_GAB_constants() - Retrieves isotherm parameters for known materials.
_calc_permeability(layer, temperature) - Calculates the
permeability for particular material.
_calc_isotherm(layer, temperature, GAB_CONSTANTS) - Calculate the moisture
isother for the contents in a package.
_calc_product_water(product, aw, GAB_CONSTANTS) - Calculate the amount
of water in a product composition at a given water activity.
_calc_desiccant_water(desiccant, aw, GAB_CONSTANTS) - Calculate the amount
of water in a desiccant inside a package layer.
_calc_vapor_water(layer, aw, temperature) - Calculate the amount of water
in the head space of a package layer.
_calc_layer_water(layer, aw, temperature) - Calculate the total amount of
water inside a package layer.
_calc_content_volume(layer) - Calculate the volume of contents in a package
layer.
_calc_package_volume(layer) - Calculate the volume
of a package layer.
_GAB(aw, Wm, C, K) - Calculate the moisture content of a material for given
isother parameters.
_calc_initial_water(layer, temperature) - Calculate the initial water
in a package layer.
"""
import numpy as np
import pandas as pd
from scipy import interpolate
from pdb import set_trace
def get_package():
"""
Create a dictionary that represents the data supplied from
the web based user interface.
"""
package = {
"start_date": "2016-04-01",
"end_date": "2017-04-01",
"rh": "40",
"temperature": "30",
"layers": [{
"container": {
"ID": "BOTTLE_45CC",
"seal": "UNACTIVATED",
"volume": "None",
"area": "None",
"initial_relative_humidity": "30"
},
"desiccant": {
"type": "SILICA",
"mass": "10",
"initial_water_content": "20",
"initial_water_activity": "None",
"density": "1.0"
},
"product": {
"units": "100",
"unit_mass": "20.",
"bulk_density": "1.",
"initial_water_content": "None",
"initial_water_activity": "0.1",
"components": [
{"name": "GENERIC_API",
"frac": "0.1"},
{"name": "MANNITOL",
"frac": "0.3"},
{"name": "LACTOSE_REGULAR",
"frac": "0.6"}
]
}
}],
"events": [
{
"date": "2016-04-01",
"layer": "0",
"ID": "replace_desiccant",
"desiccantWater": "0.2",
"frequency": "WEEKLY"
},
{
"date": "2016-04-03",
"layer": "0",
"ID": "remove_product",
"units": 1,
"frequency": "DAILY"
}
]
}
return package
def get_package_constants(package):
"""
Get the constants for the package from a parameter file residing on the
server.
Args:
package (string): Name of package.
Returns:
pandas DataFrame: parameters for package stored from server data
"""
store = pd.HDFStore("simulation_constants.hdf", mode="r")
package_constants = store["package_properties"]
store.close()
return package_constants.loc[package]
def get_GAB_constants(materials):
"""
Get the GAB constants for list of materials from a parameter file
residing on the server.
Args:
materials (list): List of string names of materials.
Returns:
pandas DataFrame: GAB parameters for materials stored from server data.
"""
store = pd.HDFStore("simulation_constants.hdf", mode="r")
gab_constants = store["GAB_constants"]
store.close()
return gab_constants.loc[materials]
def _calc_permeability(layer, temperature):
"""
Calculate the permeability of a layer provided the layer details and
the temperature.
Args:
layer (dict): Dictionary of layer properties.
temperature (float): Temperature of the layer in degrees C.
Returns:
float: The permeability of the container (mg/day/deltaRH)
"""
container = layer["container"]
product = layer["product"]
desiccant = layer["desiccant"]
# Get transport coefficient from data file.
transport_coef = get_package_constants([container["ID"]])[
["intercept", "slope"]]
# Need some extra parameters for LDPE (i.e. area, thickness) to
# calculate the permeability of LDPE.
if container["ID"] == "LDPE":
# Check or calculate the area
if container["area"] != "None":
area = float(container["area"])
elif container["volume"] != "None":
# Calculate the area from volume assuming sphere.
volume = float(container["volume"])
area = 4 * np.pi * (3 * volume / (4 * np.pi))**(2./3.)
else:
# Calculate area from product and desiccant volume.
volume = _calc_content_volume(product, desiccant)
area = 4 * np.pi * (3 * volume / (4 * np.pi))**(2./3.)
# Check or assign a thickness.
if container["thickness"] != "None":
thickness = float(container["thickness"])
else:
thickness = 4.
set_trace()
permeability = np.exp(transport_coef["intercept"] +
transport_coef["slope"] / float(temperature)) * \
area * 4. / (10. * thickness)
# Calculate the permeability of non-LDPE containers.
else:
# See if the container has an extra parameter describing the seal.
if container["seal"] != "None":
if container["seal"] == "UNACTIVATED":
f1 = 1.20923913
f2 = 0.
elif container["seal"] == "BROACHED":
f1 = 1.
f2 = 0.02072
else:
f1 = 1.
f2 = 0.
permeability = np.exp(transport_coef["intercept"] +
transport_coef["slope"] / float(temperature)) * \
f1 + f2
return permeability
def _calc_isotherm(layer, temperature):
"""
Calculate the isotherm of the layer - the total mass of water
as a function of water activity.
Args:
layer (dict): A dictionary containing the contents of the layer.
temperature (float): Temperature of the layer (C).
Returns:
tuple: A pair of functions that calculate the layer isotherm:
first function takes water activity and returns total water (mg).
second function takes total water (mg) and returns water activity.
"""
# Define a range of water activities.
aw = np.arange(0.01, 0.99, 0.01)
# Calculate the total water in the layer.
total_water = [_calc_layer_water(layer, aw_i, temperature) for aw_i in aw]
# Calculate an interpolation function to get water for a given aw.
isotherm = interpolate.interp1d(aw, total_water)
# Calculate an interpolation function to get aw for a given water amount.
reverse_isotherm = interpolate.interp1d(total_water, aw)
return (isotherm, reverse_isotherm)
def _calc_product_water(product, aw):
"""
Calculate the total water in the product
Args:
product (dict): Dictionary of product properties.
aw (float): water activity in of the product (unitless).
Returns:
float: total water in the product (mg)
"""
# Get the GAB parameters.
gab = get_GAB_constants([c["name"] for c in product["components"]])
# Unweighted water content.
water = [_GAB(aw, g["Wm"], g["C"], g["K"]) for index, g in gab.iterrows()]
# Get the component fractions.
frac = [float(c["frac"]) for c in product["components"]]
# Calculate the water per unit mass product.
water = sum([f * w for w, f in zip(water, frac)])
water *= float(product["units"]) * float(product["unit_mass"])
return water
def _calc_desiccant_water(desiccant, aw):
"""
Calculate the total water in the desiccant.
Args:
desiccant (dict): Dictionary of desiccant properties.
aw (float): water activity of the desiccant (unitless).
Returns:
float: total water in the desiccant (mg)
"""
# Calculate the water in the desiccant (mg).
gab = get_GAB_constants([desiccant["type"]]).iloc[0]
desiccant_water = _GAB(aw, gab.Wm, gab.C, gab.K) * \
float(desiccant["mass"]) * 1.e3
return desiccant_water
def _calc_vapor_water(layer, aw, temperature):
"""
Calculate the total water in the head space.
Args:
layer (dict): Dictionary of layer properties.
aw (float): water activity in the layer (unitless).
temperature (float): Temperature of the layer (C).
Returns:
float: total water in the head space (mg)
"""
# Calculate the mass of water vapor (mg).
T = float(temperature)
mass_sat = 5.079e-3 + 2.5547e-4*T + 1.6124e-5*T**2 + 3.6608e-9*T**3 + \
3.9911e-9*T**4
head_space = _calc_package_volume(layer) - _calc_content_volume(layer)
vapor_water = mass_sat * head_space * aw
return vapor_water
def _calc_layer_water(layer, aw, temperature):
"""
Calculate the total water in a layer.
Args:
layer (dict): Dictionary of layer properties.
aw (float): water activity in the layer (unitless).
temperature (float): Temperature of the layer (C).
Returns:
float: total water in the layer (mg)
"""
total_water = _calc_product_water(layer["product"], aw) + \
_calc_desiccant_water(layer["desiccant"], aw) + \
_calc_vapor_water(layer, aw, temperature)
return total_water
def _calc_content_volume(layer):
"""
Calculate the total volume of the product and dessicant inside the layer,
from the product and desiccant mass and densities.
Args:
layer (dict): Dictionary of layer properties.
Returns:
float: Total volume of contents in the layer (cm^3).
"""
product = layer["product"]
desiccant = layer["desiccant"]
# Calculate the mass of product and desiccant
product_mass = float(product["units"]) * float(product["unit_mass"]) / 1.e3
desiccant_mass = float(desiccant["mass"])
# If product bulk density is not supplied define a default value.
if product["bulk_density"] != "None":
product_bulk_density = float(product["bulk_density"])
else:
product_bulk_density = 1.3
# If desiccant density is not supplied define a default value.
if desiccant["density"] != "None":
desiccant_density = float(desiccant["density"])
else:
desiccant_density = 1.0
# Calculate the total volume of contents.
content_volume = product_mass / product_bulk_density + \
desiccant_mass / desiccant_density
return content_volume
def _calc_package_volume(layer):
"""
Calculate the package volume from either user supplied volume, a container
ID that the volume is known, or calculated from the contents volume.
Args:
layer (dict): dictionary of layer properties
Returns:
float: The volume of the package (cm^3)
"""
container = layer["container"]
# If the volume is specified by the user use that value.
if container["volume"] != "None":
package_volume = container["volume"]
# If the ID is not LDPE (which doesn't have a specific volume), use
# the ID to lookup the package volume.
elif container["ID"] != "LDPE":
package_volume = get_package_constants([container["ID"]])
package_volume = package_volume.iloc[0]["volume"]
# If the package is LDPE and no volume was specified, use the contents
# volume as an approximation.
elif container["ID"] == "LDPE":
package_volume = _calc_content_volume(layer)
return package_volume
def _GAB(aw, Wm, C, K):
"""
Calculate the water content of the substance mixture at activity, aw, based
on the Guggenheim, Anderson and de Boer (GAB) three-parameter isotherm
model. See "GAB Generalized Equation for Sorption Phenomena", Food and
Bioprocess Technology March 2008 Vol 1, Issue 1, pp 82--90
w = (Wm*C*K*aw) / ((1-K*aw)*(1-K*aw+C*K*aw))
Args:
aw (float): water activity (unitless)
Wm (float): GAB constant (unitless)
C (float): GAB constant (unitless)
K (float): GAB constant (unitless)
Returns:
float: mass of water per unit mass of matierial (unitless).
"""
water_content = (Wm*C*K*aw) / ((1-K*aw) * (1-K*aw+C*K*aw)) / 100.
return water_content
def _calc_initial_water(layer):
"""
Get the initial water for the layer that is either given by the
user or calculated from other user data.
Args:
layer (dict): Dictionary of layer properties.
Returns:
float: total water in the layer (mg)
"""
product = layer["product"]
desiccant = layer["desiccant"]
container = layer["container"]
# Calculate the product water (mg).
if product["initial_water_content"] != "None":
product_water = float(product["initial_water_content"]) * \
float(product["units"]) * float(product["unit_mass"])
else:
aw = float(product["initial_water_activity"])
product_water = _calc_product_water(product, aw)
# Calculate the water in the desiccant (mg).
if desiccant["initial_water_content"] != "None":
desiccant_water = float(desiccant["initial_water_content"]) * \
float(desiccant["mass"])
else:
aw = float(desiccant["initial_water_activity"])
desiccant_water = _calc_desiccant_water(desiccant, aw)
# Calculate the mass of water in the vapor (mg).
aw = float(container["initial_relative_humidity"])
# Assume the package was done a 25 C.
temperature = 25.
vapor_water = _calc_vapor_water(layer, aw, temperature)
total_water = product_water + desiccant_water + vapor_water
return total_water
def make_package(layers, temperature):
"""
Make a package object that contains the only information required for
simulating the package dynamics. Specifically, a list of dictionaries
that contains the initial conditions (mass of water, rh), and
the transport properties (permeability), and
equilibrium relationthip (isotherm) for each the layer in the package.
Args:
layers (list): User supplied list of dictionaries describing the
parameters for each layer (e.g. contents, desiccant, container,...etc.)
Note that it is expected that the first item in layers is the inner
most layer, the second item is the second inner most layer, ...and the
last item is the outer most layer.
temperature (float): Temperature of the package (needed for isotherm
and permeability calculations).
Returns:
list: list of dictionaries. Each list item contains a dictionary with
four keys: mass, rh, permeability, isotherm.
"""
# Calculate the total initial water for the layers
total_water = [_calc_initial_water(layer) for layer in layers]
# Calculate the isotherm for the layers
isotherm = [_calc_isotherm(layer, temperature) for layer in layers]
# Use the total water and isotherm to calculate the starting rh for each
# layer.
rh = [isotherm[i][1](total_water[i]) for i in range(len(layers))]
# Calculate the permeability and isotherm for the layers
permeability = [_calc_permeability(layer, temperature) for layer in layers]
# Create the package each item represents a layer (item 1 is the inner
# most layer), with items mass of water, rh, permeability and isotherm,
# which is sufficient for calculating the dynamics.
package = [{"mass_water": mw, "rh": rh, "permeability": perm,
"isotherm": iso} for mw, rh, perm, iso in
zip(total_water, rh, permeability, isotherm)]
return package
def simulate(package, rh_ext, duration):
"""
Simulate the dynamics for a package over some duration.
Args:
package (list): list of dictionaries describing the package (i.e.
the initial mass of water and initial water activity,
the permeability and the isotherm for each layer
of the package). It is assumed the first package item is the inner-most
layer, and the last item is the outer-most layer.
rh_ext (float): the external envornment relative humidity (unitless)
duration (float): number of seconds to simulate.
Returns:
pandas DataFrame: The water activity of each layer. DataFrame
index corresponds to the time points, and each column corresponds
to a layer, where the first column is the innermost layer.
"""
# Define the time step and calculate the number of points in the duration.
dt = 1.e-10
pts = duration / dt
# Define a dictionary of numpy arrays for the results.
# The left most column in the array corresponds to the first package item,
# which is the inner most layer.
results = {"time": np.linspace(0, duration, pts),
"mass": np.zeros((pts, len(package))),
"rh": np.zeros((pts, len(package)))}
# Set the initial conditions.
results["mass"][0] = [l["mass_water"] for l in package]
results["rh"][0] = [l["aw"] for l in package]
# Begin loop dynamics
for i in range(1, pts):
# Calculate the flux through the individual inner layers.
for j in range(len(package)-1):
results["mass"][i, j] = results["mass"][i-1, j] + \
(results["rh"][i-1, j+1] - results["rh"][i-1, j]) * \
package[j]["permeability"] * dt
# The last outer-most layer uses the external relative humidity.
j = len(package)
results["mass"][i, j] = results["mass"][i-1, j] + \
(rh_ext - results["rh"][i-1, j]) * \
package[j]["permeability"] * dt
# Equilibrate the layers.
for j in range(len(package)):
results["rh"][i, j] = package[j]["isotherm"](results["mass"][i, j])
|
|
#
# The Python Imaging Library.
# $Id$
#
# optional color managment support, based on Kevin Cazabon's PyCMS
# library.
#
# History:
# 2009-03-08 fl Added to PIL.
#
# Copyright (C) 2002-2003 Kevin Cazabon
# Copyright (c) 2009 by Fredrik Lundh
#
# See the README file for information on usage and redistribution. See
# below for the original description.
#
from __future__ import print_function
DESCRIPTION = """
pyCMS
a Python / PIL interface to the littleCMS ICC Color Management System
Copyright (C) 2002-2003 Kevin Cazabon
kevin@cazabon.com
http://www.cazabon.com
pyCMS home page: http://www.cazabon.com/pyCMS
littleCMS home page: http://www.littlecms.com
(littleCMS is Copyright (C) 1998-2001 Marti Maria)
Originally released under LGPL. Graciously donated to PIL in
March 2009, for distribution under the standard PIL license
The pyCMS.py module provides a "clean" interface between Python/PIL and
pyCMSdll, taking care of some of the more complex handling of the direct
pyCMSdll functions, as well as error-checking and making sure that all
relevant data is kept together.
While it is possible to call pyCMSdll functions directly, it's not highly
recommended.
Version History:
1.0.0 pil Oct 2013 Port to LCMS 2.
0.1.0 pil mod March 10, 2009
Renamed display profile to proof profile. The proof
profile is the profile of the device that is being
simulated, not the profile of the device which is
actually used to display/print the final simulation
(that'd be the output profile) - also see LCMSAPI.txt
input colorspace -> using 'renderingIntent' -> proof
colorspace -> using 'proofRenderingIntent' -> output
colorspace
Added LCMS FLAGS support.
Added FLAGS["SOFTPROOFING"] as default flag for
buildProofTransform (otherwise the proof profile/intent
would be ignored).
0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms
0.0.2 alpha Jan 6, 2002
Added try/except statements arount type() checks of
potential CObjects... Python won't let you use type()
on them, and raises a TypeError (stupid, if you ask me!)
Added buildProofTransformFromOpenProfiles() function.
Additional fixes in DLL, see DLL code for details.
0.0.1 alpha first public release, Dec. 26, 2002
Known to-do list with current version (of Python interface, not pyCMSdll):
none
"""
VERSION = "1.0.0 pil"
# --------------------------------------------------------------------.
from PIL import Image
from PIL import _imagingcms
from PIL._util import isStringType
core = _imagingcms
#
# intent/direction values
INTENT_PERCEPTUAL = 0
INTENT_RELATIVE_COLORIMETRIC = 1
INTENT_SATURATION = 2
INTENT_ABSOLUTE_COLORIMETRIC = 3
DIRECTION_INPUT = 0
DIRECTION_OUTPUT = 1
DIRECTION_PROOF = 2
#
# flags
FLAGS = {
"MATRIXINPUT": 1,
"MATRIXOUTPUT": 2,
"MATRIXONLY": (1|2),
"NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot
"NOPRELINEARIZATION": 16, # Don't create prelinearization tables on precalculated transforms (internal use)
"GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink)
"NOTCACHE": 64, # Inhibit 1-pixel cache
"NOTPRECALC": 256,
"NULLTRANSFORM": 512, # Don't transform anyway
"HIGHRESPRECALC": 1024, # Use more memory to give better accurancy
"LOWRESPRECALC": 2048, # Use less memory to minimize resouces
"WHITEBLACKCOMPENSATION": 8192,
"BLACKPOINTCOMPENSATION": 8192,
"GAMUTCHECK": 4096, # Out of Gamut alarm
"SOFTPROOFING": 16384, # Do softproofing
"PRESERVEBLACK": 32768, # Black preservation
"NODEFAULTRESOURCEDEF": 16777216, # CRD special
"GRIDPOINTS": lambda n: ((n) & 0xFF) << 16 # Gridpoints
}
_MAX_FLAG = 0
for flag in FLAGS.values():
if isinstance(flag, int):
_MAX_FLAG = _MAX_FLAG | flag
# --------------------------------------------------------------------.
# Experimental PIL-level API
# --------------------------------------------------------------------.
##
# Profile.
class ImageCmsProfile:
def __init__(self, profile):
# accepts a string (filename), a file-like object, or a low-level
# profile object
if isStringType(profile):
self._set(core.profile_open(profile), profile)
elif hasattr(profile, "read"):
self._set(core.profile_frombytes(profile.read()))
else:
self._set(profile) # assume it's already a profile
def _set(self, profile, filename=None):
self.profile = profile
self.filename = filename
if profile:
self.product_name = None #profile.product_name
self.product_info = None #profile.product_info
else:
self.product_name = None
self.product_info = None
##
# Transform. This can be used with the procedural API, or with the
# standard {@link Image.point} method.
class ImageCmsTransform(Image.ImagePointHandler):
def __init__(self, input, output, input_mode, output_mode,
intent=INTENT_PERCEPTUAL,
proof=None, proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, flags=0):
if proof is None:
self.transform = core.buildTransform(
input.profile, output.profile,
input_mode, output_mode,
intent,
flags
)
else:
self.transform = core.buildProofTransform(
input.profile, output.profile, proof.profile,
input_mode, output_mode,
intent, proof_intent,
flags
)
# Note: inputMode and outputMode are for pyCMS compatibility only
self.input_mode = self.inputMode = input_mode
self.output_mode = self.outputMode = output_mode
def point(self, im):
return self.apply(im)
def apply(self, im, imOut=None):
im.load()
if imOut is None:
imOut = Image.new(self.output_mode, im.size, None)
result = self.transform.apply(im.im.id, imOut.im.id)
return imOut
def apply_in_place(self, im):
im.load()
if im.mode != self.output_mode:
raise ValueError("mode mismatch") # wrong output mode
result = self.transform.apply(im.im.id, im.im.id)
return im
##
# (experimental) Fetches the profile for the current display device.
# @return None if the profile is not known.
def get_display_profile(handle=None):
import sys
if sys.platform == "win32":
from PIL import ImageWin
if isinstance(handle, ImageWin.HDC):
profile = core.get_display_profile_win32(handle, 1)
else:
profile = core.get_display_profile_win32(handle or 0)
else:
try:
get = _imagingcms.get_display_profile
except AttributeError:
return None
else:
profile = get()
return ImageCmsProfile(profile)
# --------------------------------------------------------------------.
# pyCMS compatible layer
# --------------------------------------------------------------------.
##
# (pyCMS) Exception class. This is used for all errors in the pyCMS API.
class PyCMSError(Exception):
pass
##
# (pyCMS) Applies an ICC transformation to a given image, mapping from
# inputProfile to outputProfile.
#
# If the input or output profiles specified are not valid filenames, a
# PyCMSError will be raised. If inPlace == TRUE and outputMode != im.mode,
# a PyCMSError will be raised. If an error occurs during application of
# the profiles, a PyCMSError will be raised. If outputMode is not a mode
# supported by the outputProfile (or by pyCMS), a PyCMSError will be
# raised.
#
# This function applies an ICC transformation to im from inputProfile's
# color space to outputProfile's color space using the specified rendering
# intent to decide how to handle out-of-gamut colors.
#
# OutputMode can be used to specify that a color mode conversion is to
# be done using these profiles, but the specified profiles must be able
# to handle that mode. I.e., if converting im from RGB to CMYK using
# profiles, the input profile must handle RGB data, and the output
# profile must handle CMYK data.
#
# @param im An open PIL image object (i.e. Image.new(...) or Image.open(...), etc.)
# @param inputProfile String, as a valid filename path to the ICC input profile
# you wish to use for this image, or a profile object
# @param outputProfile String, as a valid filename path to the ICC output
# profile you wish to use for this image, or a profile object
# @param renderingIntent Integer (0-3) specifying the rendering intent you wish
# to use for the transform
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @param outputMode A valid PIL mode for the output image (i.e. "RGB", "CMYK",
# etc.). Note: if rendering the image "inPlace", outputMode MUST be the
# same mode as the input, or omitted completely. If omitted, the outputMode
# will be the same as the mode of the input image (im.mode)
# @param inPlace Boolean (1 = True, None or 0 = False). If True, the original
# image is modified in-place, and None is returned. If False (default), a
# new Image object is returned with the transform applied.
# @param flags Integer (0-...) specifying additional flags
# @return Either None or a new PIL image object, depending on value of inPlace
# @exception PyCMSError
def profileToProfile(im, inputProfile, outputProfile, renderingIntent=INTENT_PERCEPTUAL, outputMode=None, inPlace=0, flags=0):
if outputMode is None:
outputMode = im.mode
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <=3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
transform = ImageCmsTransform(
inputProfile, outputProfile, im.mode, outputMode, renderingIntent, flags=flags
)
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
return imOut
##
# (pyCMS) Opens an ICC profile file.
#
# The PyCMSProfile object can be passed back into pyCMS for use in creating
# transforms and such (as in ImageCms.buildTransformFromOpenProfiles()).
#
# If profileFilename is not a vaild filename for an ICC profile, a PyCMSError
# will be raised.
#
# @param profileFilename String, as a valid filename path to the ICC profile you
# wish to open, or a file-like object.
# @return A CmsProfile class object.
# @exception PyCMSError
def getOpenProfile(profileFilename):
try:
return ImageCmsProfile(profileFilename)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Builds an ICC transform mapping from the inputProfile to the
# outputProfile. Use applyTransform to apply the transform to a given
# image.
#
# If the input or output profiles specified are not valid filenames, a
# PyCMSError will be raised. If an error occurs during creation of the
# transform, a PyCMSError will be raised.
#
# If inMode or outMode are not a mode supported by the outputProfile (or
# by pyCMS), a PyCMSError will be raised.
#
# This function builds and returns an ICC transform from the inputProfile
# to the outputProfile using the renderingIntent to determine what to do
# with out-of-gamut colors. It will ONLY work for converting images that
# are in inMode to images that are in outMode color format (PIL mode,
# i.e. "RGB", "RGBA", "CMYK", etc.).
#
# Building the transform is a fair part of the overhead in
# ImageCms.profileToProfile(), so if you're planning on converting multiple
# images using the same input/output settings, this can save you time.
# Once you have a transform object, it can be used with
# ImageCms.applyProfile() to convert images without the need to re-compute
# the lookup table for the transform.
#
# The reason pyCMS returns a class object rather than a handle directly
# to the transform is that it needs to keep track of the PIL input/output
# modes that the transform is meant for. These attributes are stored in
# the "inMode" and "outMode" attributes of the object (which can be
# manually overridden if you really want to, but I don't know of any
# time that would be of use, or would even work).
#
# @param inputProfile String, as a valid filename path to the ICC input profile
# you wish to use for this transform, or a profile object
# @param outputProfile String, as a valid filename path to the ICC output
# profile you wish to use for this transform, or a profile object
# @param inMode String, as a valid PIL mode that the appropriate profile also
# supports (i.e. "RGB", "RGBA", "CMYK", etc.)
# @param outMode String, as a valid PIL mode that the appropriate profile also
# supports (i.e. "RGB", "RGBA", "CMYK", etc.)
# @param renderingIntent Integer (0-3) specifying the rendering intent you
# wish to use for the transform
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @param flags Integer (0-...) specifying additional flags
# @return A CmsTransform class object.
# @exception PyCMSError
def buildTransform(inputProfile, outputProfile, inMode, outMode, renderingIntent=INTENT_PERCEPTUAL, flags=0):
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <=3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
return ImageCmsTransform(inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Builds an ICC transform mapping from the inputProfile to the
# outputProfile, but tries to simulate the result that would be
# obtained on the proofProfile device.
#
# If the input, output, or proof profiles specified are not valid
# filenames, a PyCMSError will be raised.
#
# If an error occurs during creation of the transform, a PyCMSError will
# be raised.
#
# If inMode or outMode are not a mode supported by the outputProfile
# (or by pyCMS), a PyCMSError will be raised.
#
# This function builds and returns an ICC transform from the inputProfile
# to the outputProfile, but tries to simulate the result that would be
# obtained on the proofProfile device using renderingIntent and
# proofRenderingIntent to determine what to do with out-of-gamut
# colors. This is known as "soft-proofing". It will ONLY work for
# converting images that are in inMode to images that are in outMode
# color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.).
#
# Usage of the resulting transform object is exactly the same as with
# ImageCms.buildTransform().
#
# Proof profiling is generally used when using an output device to get a
# good idea of what the final printed/displayed image would look like on
# the proofProfile device when it's quicker and easier to use the
# output device for judging color. Generally, this means that the
# output device is a monitor, or a dye-sub printer (etc.), and the simulated
# device is something more expensive, complicated, or time consuming
# (making it difficult to make a real print for color judgement purposes).
#
# Soft-proofing basically functions by adjusting the colors on the
# output device to match the colors of the device being simulated. However,
# when the simulated device has a much wider gamut than the output
# device, you may obtain marginal results.
#
# @param inputProfile String, as a valid filename path to the ICC input profile
# you wish to use for this transform, or a profile object
# @param outputProfile String, as a valid filename path to the ICC output
# (monitor, usually) profile you wish to use for this transform, or a
# profile object
# @param proofProfile String, as a valid filename path to the ICC proof profile
# you wish to use for this transform, or a profile object
# @param inMode String, as a valid PIL mode that the appropriate profile also
# supports (i.e. "RGB", "RGBA", "CMYK", etc.)
# @param outMode String, as a valid PIL mode that the appropriate profile also
# supports (i.e. "RGB", "RGBA", "CMYK", etc.)
# @param renderingIntent Integer (0-3) specifying the rendering intent you
# wish to use for the input->proof (simulated) transform
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @param proofRenderingIntent Integer (0-3) specifying the rendering intent you
# wish to use for proof->output transform
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @param flags Integer (0-...) specifying additional flags
# @return A CmsTransform class object.
# @exception PyCMSError
def buildProofTransform(inputProfile, outputProfile, proofProfile, inMode, outMode, renderingIntent=INTENT_PERCEPTUAL, proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC, flags=FLAGS["SOFTPROOFING"]):
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <=3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
if not isinstance(proofProfile, ImageCmsProfile):
proofProfile = ImageCmsProfile(proofProfile)
return ImageCmsTransform(inputProfile, outputProfile, inMode, outMode, renderingIntent, proofProfile, proofRenderingIntent, flags)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
buildTransformFromOpenProfiles = buildTransform
buildProofTransformFromOpenProfiles = buildProofTransform
##
# (pyCMS) Applies a transform to a given image.
#
# If im.mode != transform.inMode, a PyCMSError is raised.
#
# If inPlace == TRUE and transform.inMode != transform.outMode, a
# PyCMSError is raised.
#
# If im.mode, transfer.inMode, or transfer.outMode is not supported by
# pyCMSdll or the profiles you used for the transform, a PyCMSError is
# raised.
#
# If an error occurs while the transform is being applied, a PyCMSError
# is raised.
#
# This function applies a pre-calculated transform (from
# ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles()) to an
# image. The transform can be used for multiple images, saving
# considerable calcuation time if doing the same conversion multiple times.
#
# If you want to modify im in-place instead of receiving a new image as
# the return value, set inPlace to TRUE. This can only be done if
# transform.inMode and transform.outMode are the same, because we can't
# change the mode in-place (the buffer sizes for some modes are
# different). The default behavior is to return a new Image object of
# the same dimensions in mode transform.outMode.
#
# @param im A PIL Image object, and im.mode must be the same as the inMode
# supported by the transform.
# @param transform A valid CmsTransform class object
# @param inPlace Bool (1 == True, 0 or None == False). If True, im is modified
# in place and None is returned, if False, a new Image object with the
# transform applied is returned (and im is not changed). The default is False.
# @return Either None, or a new PIL Image object, depending on the value of inPlace
# @exception PyCMSError
def applyTransform(im, transform, inPlace=0):
try:
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (TypeError, ValueError) as v:
raise PyCMSError(v)
return imOut
##
# (pyCMS) Creates a profile.
#
# If colorSpace not in ["LAB", "XYZ", "sRGB"], a PyCMSError is raised
#
# If using LAB and colorTemp != a positive integer, a PyCMSError is raised.
#
# If an error occurs while creating the profile, a PyCMSError is raised.
#
# Use this function to create common profiles on-the-fly instead of
# having to supply a profile on disk and knowing the path to it. It
# returns a normal CmsProfile object that can be passed to
# ImageCms.buildTransformFromOpenProfiles() to create a transform to apply
# to images.
#
# @param colorSpace String, the color space of the profile you wish to create.
# Currently only "LAB", "XYZ", and "sRGB" are supported.
# @param colorTemp Positive integer for the white point for the profile, in
# degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50
# illuminant if omitted (5000k). colorTemp is ONLY applied to LAB profiles,
# and is ignored for XYZ and sRGB.
# @return A CmsProfile class object
# @exception PyCMSError
def createProfile(colorSpace, colorTemp=-1):
if colorSpace not in ["LAB", "XYZ", "sRGB"]:
raise PyCMSError("Color space not supported for on-the-fly profile creation (%s)" % colorSpace)
if colorSpace == "LAB":
try:
colorTemp = float(colorTemp)
except:
raise PyCMSError("Color temperature must be numeric, \"%s\" not valid" % colorTemp)
try:
return core.createProfile(colorSpace, colorTemp)
except (TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the internal product name for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised If an error occurs while trying to obtain the
# name tag, a PyCMSError is raised.
#
# Use this function to obtain the INTERNAL name of the profile (stored
# in an ICC tag in the profile itself), usually the one used when the
# profile was originally created. Sometimes this tag also contains
# additional information supplied by the creator.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal name of the profile as stored in an
# ICC tag.
# @exception PyCMSError
def getProfileName(profile):
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# do it in python, not c.
# // name was "%s - %s" (model, manufacturer) || Description ,
# // but if the Model and Manufacturer were the same or the model
# // was long, Just the model, in 1.x
model = profile.profile.product_model
manufacturer = profile.profile.product_manufacturer
if not (model or manufacturer):
return profile.profile.product_description+"\n"
if not manufacturer or len(model) > 30:
return model + "\n"
return "%s - %s\n" % (model, manufacturer)
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the internal product information for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the info tag, a PyCMSError
# is raised
#
# Use this function to obtain the information stored in the profile's
# info tag. This often contains details about the profile, and how it
# was created, as supplied by the creator.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal profile information stored in an ICC
# tag.
# @exception PyCMSError
def getProfileInfo(profile):
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# add an extra newline to preserve pyCMS compatibility
# Python, not C. the white point bits weren't working well, so skipping.
# // info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint
description = profile.profile.product_description
cpright = profile.profile.product_copyright
arr = []
for elt in (description, cpright):
if elt:
arr.append(elt)
return "\r\n\r\n".join(arr)+"\r\n\r\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the copyright for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the copyright tag, a PyCMSError
# is raised
#
# Use this function to obtain the information stored in the profile's
# copyright tag.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal profile information stored in an ICC
# tag.
# @exception PyCMSError
def getProfileCopyright(profile):
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_copyright + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the manufacturer for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the manufacturer tag, a PyCMSError
# is raised
#
# Use this function to obtain the information stored in the profile's
# manufacturer tag.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal profile information stored in an ICC
# tag.
# @exception PyCMSError
def getProfileManufacturer(profile):
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_manufacturer + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the model for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the model tag, a PyCMSError
# is raised
#
# Use this function to obtain the information stored in the profile's
# model tag.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal profile information stored in an ICC
# tag.
# @exception PyCMSError
def getProfileModel(profile):
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_model + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the description for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the description tag, a PyCMSError
# is raised
#
# Use this function to obtain the information stored in the profile's
# description tag.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal profile information stored in an ICC
# tag.
# @exception PyCMSError
def getProfileDescription(profile):
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_description + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the default intent name for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the default intent, a
# PyCMSError is raised.
#
# Use this function to determine the default (and usually best optomized)
# rendering intent for this profile. Most profiles support multiple
# rendering intents, but are intended mostly for one type of conversion.
# If you wish to use a different intent than returned, use
# ImageCms.isIntentSupported() to verify it will work first.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return Integer 0-3 specifying the default rendering intent for this profile.
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @exception PyCMSError
def getDefaultIntent(profile):
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.rendering_intent
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Checks if a given intent is supported.
#
# Use this function to verify that you can use your desired
# renderingIntent with profile, and that profile can be used for the
# input/output/proof profile as you desire.
#
# Some profiles are created specifically for one "direction", can cannot
# be used for others. Some profiles can only be used for certain
# rendering intents... so it's best to either verify this before trying
# to create a transform with them (using this function), or catch the
# potential PyCMSError that will occur if they don't support the modes
# you select.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @param intent Integer (0-3) specifying the rendering intent you wish to use
# with this profile
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @param direction Integer specifing if the profile is to be used for input,
# output, or proof
#
# INPUT = 0 (or use ImageCms.DIRECTION_INPUT)
# OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT)
# PROOF = 2 (or use ImageCms.DIRECTION_PROOF)
#
# @return 1 if the intent/direction are supported, -1 if they are not.
# @exception PyCMSError
def isIntentSupported(profile, intent, direction):
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# FIXME: I get different results for the same data w. different
# compilers. Bug in LittleCMS or in the binding?
if profile.profile.is_intent_supported(intent, direction):
return 1
else:
return -1
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Fetches versions.
def versions():
import sys
return (
VERSION, core.littlecms_version, sys.version.split()[0], Image.VERSION
)
# --------------------------------------------------------------------
if __name__ == "__main__":
# create a cheap manual from the __doc__ strings for the functions above
from PIL import ImageCms
print(__doc__)
for f in dir(pyCMS):
print("="*80)
print("%s" %f)
try:
exec ("doc = ImageCms.%s.__doc__" %(f))
if "pyCMS" in doc:
# so we don't get the __doc__ string for imported modules
print(doc)
except AttributeError:
pass
|
|
"""
Support for interfacing to the Logitech SqueezeBox API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.squeezebox/
"""
import asyncio
import json
import logging
import urllib.parse
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_MEDIA_ENQUEUE, DOMAIN, MEDIA_PLAYER_SCHEMA, MEDIA_TYPE_MUSIC,
PLATFORM_SCHEMA, SUPPORT_CLEAR_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, MediaPlayerDevice)
from homeassistant.const import (
ATTR_COMMAND, CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME,
STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.util.dt import utcnow
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 9000
TIMEOUT = 10
SUPPORT_SQUEEZEBOX = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_SEEK | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA | \
SUPPORT_PLAY | SUPPORT_SHUFFLE_SET | SUPPORT_CLEAR_PLAYLIST
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
})
SERVICE_CALL_METHOD = 'squeezebox_call_method'
DATA_SQUEEZEBOX = 'squeezebox'
KNOWN_SERVERS = 'squeezebox_known_servers'
ATTR_PARAMETERS = 'parameters'
SQUEEZEBOX_CALL_METHOD_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_COMMAND): cv.string,
vol.Optional(ATTR_PARAMETERS):
vol.All(cv.ensure_list, vol.Length(min=1), [cv.string]),
})
SERVICE_TO_METHOD = {
SERVICE_CALL_METHOD: {
'method': 'async_call_method',
'schema': SQUEEZEBOX_CALL_METHOD_SCHEMA},
}
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the squeezebox platform."""
import socket
known_servers = hass.data.get(KNOWN_SERVERS)
if known_servers is None:
hass.data[KNOWN_SERVERS] = known_servers = set()
if DATA_SQUEEZEBOX not in hass.data:
hass.data[DATA_SQUEEZEBOX] = []
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
if discovery_info is not None:
host = discovery_info.get("host")
port = discovery_info.get("port")
else:
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
# In case the port is not discovered
if port is None:
port = DEFAULT_PORT
# Get IP of host, to prevent duplication of same host (different DNS names)
try:
ipaddr = socket.gethostbyname(host)
except (OSError) as error:
_LOGGER.error(
"Could not communicate with %s:%d: %s", host, port, error)
return False
if ipaddr in known_servers:
return
known_servers.add(ipaddr)
_LOGGER.debug("Creating LMS object for %s", ipaddr)
lms = LogitechMediaServer(hass, host, port, username, password)
players = yield from lms.create_players()
hass.data[DATA_SQUEEZEBOX].extend(players)
async_add_entities(players)
@asyncio.coroutine
def async_service_handler(service):
"""Map services to methods on MediaPlayerDevice."""
method = SERVICE_TO_METHOD.get(service.service)
if not method:
return
params = {key: value for key, value in service.data.items()
if key != 'entity_id'}
entity_ids = service.data.get('entity_id')
if entity_ids:
target_players = [player for player in hass.data[DATA_SQUEEZEBOX]
if player.entity_id in entity_ids]
else:
target_players = hass.data[DATA_SQUEEZEBOX]
update_tasks = []
for player in target_players:
yield from getattr(player, method['method'])(**params)
update_tasks.append(player.async_update_ha_state(True))
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
for service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service]['schema']
hass.services.async_register(
DOMAIN, service, async_service_handler,
schema=schema)
return True
class LogitechMediaServer:
"""Representation of a Logitech media server."""
def __init__(self, hass, host, port, username, password):
"""Initialize the Logitech device."""
self.hass = hass
self.host = host
self.port = port
self._username = username
self._password = password
@asyncio.coroutine
def create_players(self):
"""Create a list of devices connected to LMS."""
result = []
data = yield from self.async_query('players', 'status')
if data is False:
return result
for players in data.get('players_loop', []):
player = SqueezeBoxDevice(
self, players['playerid'], players['name'])
yield from player.async_update()
result.append(player)
return result
@asyncio.coroutine
def async_query(self, *command, player=""):
"""Abstract out the JSON-RPC connection."""
auth = None if self._username is None else aiohttp.BasicAuth(
self._username, self._password)
url = "http://{}:{}/jsonrpc.js".format(
self.host, self.port)
data = json.dumps({
"id": "1",
"method": "slim.request",
"params": [player, command]
})
_LOGGER.debug("URL: %s Data: %s", url, data)
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(TIMEOUT, loop=self.hass.loop):
response = yield from websession.post(
url,
data=data,
auth=auth)
if response.status != 200:
_LOGGER.error(
"Query failed, response code: %s Full message: %s",
response.status, response)
return False
data = yield from response.json()
except (asyncio.TimeoutError, aiohttp.ClientError) as error:
_LOGGER.error("Failed communicating with LMS: %s", type(error))
return False
try:
return data['result']
except AttributeError:
_LOGGER.error("Received invalid response: %s", data)
return False
class SqueezeBoxDevice(MediaPlayerDevice):
"""Representation of a SqueezeBox device."""
def __init__(self, lms, player_id, name):
"""Initialize the SqueezeBox device."""
super(SqueezeBoxDevice, self).__init__()
self._lms = lms
self._id = player_id
self._status = {}
self._name = name
self._last_update = None
_LOGGER.debug("Creating SqueezeBox object: %s, %s", name, player_id)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._id
@property
def state(self):
"""Return the state of the device."""
if 'power' in self._status and self._status['power'] == 0:
return STATE_OFF
if 'mode' in self._status:
if self._status['mode'] == 'pause':
return STATE_PAUSED
if self._status['mode'] == 'play':
return STATE_PLAYING
if self._status['mode'] == 'stop':
return STATE_IDLE
return STATE_UNKNOWN
def async_query(self, *parameters):
"""Send a command to the LMS.
This method must be run in the event loop and returns a coroutine.
"""
return self._lms.async_query(
*parameters, player=self._id)
@asyncio.coroutine
def async_update(self):
"""Retrieve the current state of the player."""
tags = 'adKl'
response = yield from self.async_query(
"status", "-", "1", "tags:{tags}"
.format(tags=tags))
if response is False:
return
last_media_position = self.media_position
self._status = {}
try:
self._status.update(response["playlist_loop"][0])
except KeyError:
pass
try:
self._status.update(response["remoteMeta"])
except KeyError:
pass
self._status.update(response)
if self.media_position != last_media_position:
_LOGGER.debug('Media position updated for %s: %s',
self, self.media_position)
self._last_update = utcnow()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if 'mixer volume' in self._status:
return int(float(self._status['mixer volume'])) / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
if 'mixer volume' in self._status:
return str(self._status['mixer volume']).startswith('-')
@property
def media_content_id(self):
"""Content ID of current playing media."""
if 'current_title' in self._status:
return self._status['current_title']
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if 'duration' in self._status:
return int(float(self._status['duration']))
@property
def media_position(self):
"""Duration of current playing media in seconds."""
if 'time' in self._status:
return int(float(self._status['time']))
@property
def media_position_updated_at(self):
"""Last time status was updated."""
return self._last_update
@property
def media_image_url(self):
"""Image url of current playing media."""
if 'artwork_url' in self._status:
media_url = self._status['artwork_url']
elif 'id' in self._status:
media_url = ('/music/{track_id}/cover.jpg').format(
track_id=self._status['id'])
else:
media_url = ('/music/current/cover.jpg?player={player}').format(
player=self._id)
# pylint: disable=protected-access
if self._lms._username:
base_url = 'http://{username}:{password}@{server}:{port}/'.format(
username=self._lms._username,
password=self._lms._password,
server=self._lms.host,
port=self._lms.port)
else:
base_url = 'http://{server}:{port}/'.format(
server=self._lms.host,
port=self._lms.port)
url = urllib.parse.urljoin(base_url, media_url)
return url
@property
def media_title(self):
"""Title of current playing media."""
if 'title' in self._status:
return self._status['title']
if 'current_title' in self._status:
return self._status['current_title']
@property
def media_artist(self):
"""Artist of current playing media."""
if 'artist' in self._status:
return self._status['artist']
@property
def media_album_name(self):
"""Album of current playing media."""
if 'album' in self._status:
return self._status['album']
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
if 'playlist_shuffle' in self._status:
return self._status['playlist_shuffle'] == 1
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SQUEEZEBOX
def async_turn_off(self):
"""Turn off media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('power', '0')
def async_volume_up(self):
"""Volume up media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('mixer', 'volume', '+5')
def async_volume_down(self):
"""Volume down media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('mixer', 'volume', '-5')
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
"""
volume_percent = str(int(volume*100))
return self.async_query('mixer', 'volume', volume_percent)
def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) media player.
This method must be run in the event loop and returns a coroutine.
"""
mute_numeric = '1' if mute else '0'
return self.async_query('mixer', 'muting', mute_numeric)
def async_media_play_pause(self):
"""Send pause command to media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('pause')
def async_media_play(self):
"""Send play command to media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('play')
def async_media_pause(self):
"""Send pause command to media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('pause', '1')
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('playlist', 'index', '+1')
def async_media_previous_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('playlist', 'index', '-1')
def async_media_seek(self, position):
"""Send seek command.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('time', position)
def async_turn_on(self):
"""Turn the media player on.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('power', '1')
def async_play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the current playlist.
This method must be run in the event loop and returns a coroutine.
"""
if kwargs.get(ATTR_MEDIA_ENQUEUE):
return self._add_uri_to_playlist(media_id)
return self._play_uri(media_id)
def _play_uri(self, media_id):
"""Replace the current play list with the uri."""
return self.async_query('playlist', 'play', media_id)
def _add_uri_to_playlist(self, media_id):
"""Add an item to the existing playlist."""
return self.async_query('playlist', 'add', media_id)
def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
return self.async_query('playlist', 'shuffle', int(shuffle))
def async_clear_playlist(self):
"""Send the media player the command for clear playlist."""
return self.async_query('playlist', 'clear')
def async_call_method(self, command, parameters=None):
"""
Call Squeezebox JSON/RPC method.
Escaped optional parameters are added to the command to form the list
of positional parameters (p0, p1..., pN) passed to JSON/RPC server.
"""
all_params = [command]
if parameters:
for parameter in parameters:
all_params.append(urllib.parse.quote(parameter, safe=':=/?'))
return self.async_query(*all_params)
|
|
#!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -s {SAMPLE_SUBMODULE}
$ python runtests.py -t {SAMPLE_TEST}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
$ python runtests.py --durations 20
Run a debugger:
$ gdb --args python runtests.py [...other args...]
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python runtests.py --gcov [...other args...]
$ python runtests.py --lcov-html
"""
from __future__ import division, print_function
#
# This is a generic test runner script for projects using NumPy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "numpy"
PROJECT_ROOT_FILES = ['numpy', 'LICENSE.txt', 'setup.py']
SAMPLE_TEST = "numpy/linalg/tests/test_linalg.py::test_byteorder_check"
SAMPLE_SUBMODULE = "linalg"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
import shutil
import subprocess
import time
from argparse import ArgumentParser, REMAINDER
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument("--refguide-check", action="store_true", default=False,
help="Run refguide (doctest) check (do not run regular tests.)")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output goes "
"under build/coverage"))
parser.add_argument("--durations", action="store", default=-1, type=int,
help=("Time N slowest tests, time all if 0, time none if < 0"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC). "
"gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: fast]")
parser.add_argument("--submodule", "-s", default=None,
help="Submodule whose tests to run (cluster, constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=0,
help="Number of parallel jobs during build")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("--bench-compare", action="store", metavar="COMMIT",
help=("Compare benchmark results of current HEAD to "
"BEFORE. Use an additional "
"--bench-compare=COMMIT to override HEAD with "
"COMMIT. Note that you need to commit your "
"changes first!"))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
args = parser.parse_args(argv)
if args.durations < 0:
args.durations = -1
if args.bench_compare:
args.bench = True
args.no_build = True # ASV does the building
if args.lcov_html:
# generate C code coverage output
lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
gcov_reset_counters()
if args.debug and args.bench:
print("*** Benchmarks should not be run against debug "
"version; remove -g flag ***")
if not args.no_build:
# we need the noarch path in case the package is pure python.
site_dir, site_dir_noarch = build_project(args)
sys.path.insert(0, site_dir)
sys.path.insert(0, site_dir_noarch)
os.environ['PYTHONPATH'] = site_dir + os.pathsep + site_dir_noarch
else:
_temp = __import__(PROJECT_MODULE)
site_dir = os.path.sep.join(_temp.__file__.split(os.path.sep)[:-2])
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
# Debugging issues with warnings is much easier if you can see them
print("Enabling display of all warnings")
import warnings
import types
warnings.filterwarnings("always")
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = types.ModuleType('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec_(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
# Debugging issues with warnings is much easier if you can see them
print("Enabling display of all warnings and pre-importing numpy as np")
import warnings; warnings.filterwarnings("always")
import IPython
import numpy as np
IPython.embed(user_ns={"np": np})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'cmd' if os.name == 'nt' else 'sh')
print("Spawning a shell ({})...".format(shell))
subprocess.call([shell] + extra_argv)
sys.exit(0)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cov-report=html:' + dst_dir]
if args.refguide_check:
cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'),
'--doctests']
if args.submodule:
cmd += [args.submodule]
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(0)
if args.bench:
# Run ASV
items = extra_argv
if args.tests:
items += args.tests
if args.submodule:
items += [args.submodule]
bench_args = []
for a in items:
bench_args.extend(['--bench', a])
if not args.bench_compare:
cmd = ['asv', 'run', '-n', '-e', '--python=same'] + bench_args
ret = subprocess.call(cmd, cwd=os.path.join(ROOT_DIR, 'benchmarks'))
sys.exit(ret)
else:
commits = [x.strip() for x in args.bench_compare.split(',')]
if len(commits) == 1:
commit_a = commits[0]
commit_b = 'HEAD'
elif len(commits) == 2:
commit_a, commit_b = commits
else:
p.error("Too many commits to compare benchmarks for")
# Check for uncommitted files
if commit_b == 'HEAD':
r1 = subprocess.call(['git', 'diff-index', '--quiet',
'--cached', 'HEAD'])
r2 = subprocess.call(['git', 'diff-files', '--quiet'])
if r1 != 0 or r2 != 0:
print("*"*80)
print("WARNING: you have uncommitted changes --- "
"these will NOT be benchmarked!")
print("*"*80)
# Fix commit ids (HEAD is local to current repo)
out = subprocess.check_output(['git', 'rev-parse', commit_b])
commit_b = out.strip().decode('ascii')
out = subprocess.check_output(['git', 'rev-parse', commit_a])
commit_a = out.strip().decode('ascii')
cmd = ['asv', 'continuous', '-e', '-f', '1.05',
commit_a, commit_b] + bench_args
ret = subprocess.call(cmd, cwd=os.path.join(ROOT_DIR, 'benchmarks'))
sys.exit(ret)
if args.build_only:
sys.exit(0)
else:
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
if args.submodule:
tests = [PROJECT_MODULE + "." + args.submodule]
elif args.tests:
tests = args.tests
else:
tests = None
# Run the tests under build/test
if not args.no_build:
test_dir = site_dir
else:
test_dir = os.path.join(ROOT_DIR, 'build', 'test')
if not os.path.isdir(test_dir):
os.makedirs(test_dir)
shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'),
os.path.join(test_dir, '.coveragerc'))
cwd = os.getcwd()
try:
os.chdir(test_dir)
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage,
durations=args.durations,
tests=tests)
finally:
os.chdir(cwd)
if isinstance(result, bool):
sys.exit(0 if result else 1)
elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
import distutils.sysconfig
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
cvars = distutils.sysconfig.get_config_vars()
compiler = env.get('CC') or cvars.get('CC', '')
if 'gcc' in compiler:
# Check that this isn't clang masquerading as gcc.
if sys.platform != 'darwin' or 'gnu-gcc' in compiler:
# add flags used as werrors
warnings_as_errors = ' '.join([
# from tools/travis-test.sh
'-Werror=vla',
'-Werror=nonnull',
'-Werror=pointer-arith',
'-Wlogical-op',
# from sysconfig
'-Werror=unused-function',
])
env['CFLAGS'] = warnings_as_errors + ' ' + env.get('CFLAGS', '')
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) + ' --coverage'
cmd += ["build"]
if args.parallel > 1:
cmd += ["-j", str(args.parallel)]
# Install; avoid producing eggs so numpy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
site_dir_noarch = get_python_lib(prefix=dst_dir, plat_specific=False)
# easy_install won't install to a path that Python by default cannot see
# and isn't on the PYTHONPATH. Plus, it has to exist.
if not os.path.exists(site_dir):
os.makedirs(site_dir)
if not os.path.exists(site_dir_noarch):
os.makedirs(site_dir_noarch)
env['PYTHONPATH'] = site_dir + ':' + site_dir_noarch
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except:
p.kill()
p.wait()
raise
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
return site_dir, site_dir_noarch
#
# GCOV support
#
def gcov_reset_counters():
print("Removing previous GCOV .gcda files...")
build_dir = os.path.join(ROOT_DIR, 'build')
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
if fn.endswith('.gcda') or fn.endswith('.da'):
pth = os.path.join(dirpath, fn)
os.unlink(pth)
#
# LCOV support
#
LCOV_OUTPUT_FILE = os.path.join(ROOT_DIR, 'build', 'lcov.out')
LCOV_HTML_DIR = os.path.join(ROOT_DIR, 'build', 'lcov')
def lcov_generate():
try: os.unlink(LCOV_OUTPUT_FILE)
except OSError: pass
try: shutil.rmtree(LCOV_HTML_DIR)
except OSError: pass
print("Capturing lcov info...")
subprocess.call(['lcov', '-q', '-c',
'-d', os.path.join(ROOT_DIR, 'build'),
'-b', ROOT_DIR,
'--output-file', LCOV_OUTPUT_FILE])
print("Generating lcov HTML output...")
ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE,
'--output-directory', LCOV_HTML_DIR,
'--legend', '--highlight'])
if ret != 0:
print("genhtml failed!")
else:
print("HTML output generated under build/lcov/")
#
# Python 3 support
#
if sys.version_info[0] >= 3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
if __name__ == "__main__":
main(argv=sys.argv[1:])
|
|
import requests
import json
import base64
import datetime
import collections
import re
# Local imports
import sabre_utils
import sabre_exceptions
from sabre_endpoints import sabre_endpoints
class SabreDevStudio(object):
def __init__(self, environment='test', return_obj=True):
self.auth_headers = None
self.client_id = None
self.client_secret = None
self.token = None
self.token_expiry = None
self.return_obj = return_obj
if environment is 'test':
self.host = 'https://api.test.sabre.com'
elif environment is 'prod':
self.host = 'https://api.sabre.com'
else: # default to test
self.host = 'https://api.test.sabre.com'
# init_with_config
# () -> ()
# Initializes the class with an ID and secret from a config file
# Useful for testing and interactive mode
def init_with_config(self, config_file='config.json'):
raw_data = open(config_file).read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
self.set_credentials(client_id, client_secret)
self.authenticate()
# make_endpoint
# String -> String
# Converts a relative endpoint to an absolute URI
def make_endpoint(self, endpoint):
return self.host + endpoint
# set_credentials
# String -> String -> ()
# Sets the Sabre Dev Studio Client ID and Secret to the instance
# Must be done before token is requested
def set_credentials(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
# authenticate
# () -> ()
# This method uses the client ID and client secret provided in set_credentials
# to request the token from Sabre. The token is then saved in the internal state
# of the instance in self.token
def authenticate(self):
if not self.client_id or not self.client_secret:
raise sabre_exceptions.NoCredentialsProvided
token_resp = self.get_token_data(self.client_id, self.client_secret)
self.verify_response(token_resp)
token_json = token_resp.json()
self.token = token_json.get('access_token')
self.token_expiry = datetime.datetime.now() + datetime.timedelta(0, token_json.get('expires_in'))
def get_token_data(self, client_id, client_secret):
encoded = str(base64.b64encode(client_id)) + ':' + str(base64.b64encode(client_secret))
encoded = str(base64.b64encode(encoded))
headers = {
'Authorization': 'Basic ' + encoded,
'Content-Type': 'application/x-www-form-urlencoded'
}
payload = {
'grant_type': 'client_credentials'
}
data = requests.post(self.make_endpoint('/v2/auth/token/'),
headers=headers,
data=payload)
return data
# request
# String -> String -> Dictionary? -> Dictionary? -> (ResponseData or dict)
# The generic request function -- all API requests go through here
# Should be called by a higher-level wrapper like instaflights(...)
# method is a String, 'GET', 'PUT', 'PATCH', 'POST', or 'DELETE'
# endpoint is a relative endpoint
# payload is the data -- added as query params for GET
# Returns an object with the properties of the response data
def request(self, method, endpoint, payload=None, additional_headers=None):
now = datetime.datetime.now()
# Check for token
if not self.token:
raise sabre_exceptions.NotAuthorizedError
if not self.token_expiry:
pass
elif self.token_expiry < now:
# Authenticate again
self.authenticate()
endpoint = self.make_endpoint(endpoint)
auth_header = {
'Authorization': 'Bearer' + self.token
}
headers = additional_headers.copy() if additional_headers else {}
headers.update(auth_header)
if method == 'GET':
resp = requests.get(endpoint, headers=headers, params=payload)
elif method == 'PUT':
resp = requests.put(endpoint, headers=headers, data=payload)
elif method == 'PATCH':
resp = requests.put(endpoint, headers=headers, data=payload)
elif method == 'POST':
resp = requests.post(endpoint, headers=headers, data=payload)
elif method == 'DELETE':
resp = requests.delete(endpoint, headers=headers)
else:
raise UnsupportedMethodError
self.verify_response(resp)
if self.return_obj:
resp_data = self.process_response(resp.json())
else:
resp_data = resp.json()
return resp_data
# verify_response
# Response -> ()
# Checks the status code of a response and raises the appropriate exception
# if the status code is invalid (not in the 2xx range)
def verify_response(self, resp):
if resp.status_code >= 200 and resp.status_code < 299:
pass
else:
if resp.status_code == 400:
raise sabre_exceptions.SabreErrorBadRequest(resp.json())
elif resp.status_code == 401:
raise sabre_exceptions.SabreErrorUnauthenticated(resp.json())
elif resp.status_code == 403:
raise sabre_exceptions.SabreErrorForbidden(resp.json())
elif resp.status_code == 404:
raise sabre_exceptions.SabreErrorNotFound(resp.json())
elif resp.status_code == 405:
raise sabre_exceptions.SabreErrorMethodNotAllowed()
elif resp.status_code == 406:
raise sabre_exceptions.SabreErrorNotAcceptable(resp.json())
elif resp.status_code == 429:
raise sabre_exceptions.SabreErrorRateLimited(resp.json())
elif resp.status_code == 500:
print(resp.text)
raise sabre_exceptions.SabreInternalServerError(resp.text)
elif resp.status_code == 503:
raise sabre_exceptions.SabreErrorServiceUnavailable
elif resp.status_code == 504:
raise sabre_exceptions.SabreErrorGatewayTimeout
# process_response
# JSON Dictionary -> ResponseData
# Converts a dictionary into a python object with Pythonic names
def process_response(self, json_obj):
sabre_utils.convert_keys(json_obj)
json_str = json.dumps(json_obj)
obj = json.loads(json_str,
object_hook=lambda d: collections.namedtuple('ResponseData', d.keys())(*d.values()))
return obj
# instaflights
# Dictionary -> ResponseData
# Executes a request to Sabre's instaflights endpoint with the options specified
def instaflights(self, options):
resp = self.request('GET', sabre_endpoints['instaflights'], options)
return resp
# flights_to
# String -> String? -> ResponseData
# Executes a request to Sabre's "Flights To" endpoint with the options specified
# Returns 20 of the lowest published fares available for a given destination
# Defaults to 'US' as point of sale
def flights_to(self, city_code, point_of_sale=None):
opts = {
'pointofsalecountry': point_of_sale
}
resp = self.request('GET',
sabre_endpoints['flights_to'] + '/' + city_code,
opts)
return resp
# lead_price
# String -> String -> [Number] -> String? -> Date? -> Number? ->
# Number? -> ResponseData
# Executes a request to Sabre's "Lead Price" endpoint with the arguments specified
# Gives the cheapest dates and fare for the specified origin, destination
# and length of stay
def lead_price(self, origin, destination, length_of_stay,
point_of_sale=None, departure_date=None, min_fare=None,
max_fare=None, other_opts={}):
opts = other_opts.copy()
opts['origin'] = origin
opts['destination'] = destination
if point_of_sale:
opts['pointofsalecountry'] = point_of_sale
else:
# Get point of sale country for origin
result = country_code_lookup(origin)
opts['pointofsalecountry'] = result if result else 'US'
if length_of_stay is not None and isinstance(length_of_stay, list):
opts['lengthofstay'] = ','.join(map(str, length_of_stay))
elif length_of_stay is not None:
opts['lengthofstay'] = length_of_stay
if departure_date:
opts['departuredate'] = self.convert_date(departure_date);
if min_fare:
opts['minfare'] = min_fare
if max_fare:
opts['maxfare'] = max_fare
resp = self.request('GET',
sabre_endpoints['lead_price'],
opts)
return resp
# lead_price_opts
# Dictionary -> ResponseData
# Executes a request to Sabre's "Lead Price" endpoint with the arguments specified
# Gives the cheapest dates and fare for the specified origin, destination
# and length of stay
def lead_price_opts(self, opts):
resp = self.request('GET',
sabre_endpoints['lead_price'],
opts)
return resp
# destination_finder
# Executes a request to Sabre's "Lead Price" endpoint with the arguments specified
# Gives the cheapest dates and fare for the specified origin, destination
# and length of stay
def destination_finder(self, origin, destination=None, length_of_stay=None,
point_of_sale=None,
departure_date=None, return_date=None,
earliest_departure_date=None, latest_departure_date=None,
min_fare=None, max_fare=None,
region=None, theme=None, location=None,
cost_per_mile=None,
other_opts={}):
opts = other_opts.copy()
opts['origin'] = origin
if point_of_sale:
opts['pointofsalecountry'] = point_of_sale
else:
# Get point of sale country for origin
result = country_code_lookup(origin)
opts['pointofsalecountry'] = result if result else 'US'
if destination:
opts['destination'] = destination
if length_of_stay is not None and isinstance(length_of_stay, list):
opts['lengthofstay'] = ','.join(map(str, length_of_stay))
elif length_of_stay is not None:
opts['lengthofstay'] = length_of_stay
if departure_date:
opts['departuredate'] = self.convert_date(departure_date);
if return_date:
opts['returndate'] = self.convert_date(return_date);
if earliest_departure_date:
opts['earliestdeparturedate'] = self.convert_date(earliest_departure_date);
if latest_departure_date:
opts['latestdeparturedate'] = self.convert_date(latest_departure_date);
if min_fare:
opts['minfare'] = min_fare
if max_fare:
opts['maxfare'] = max_fare
if region:
opts['region'] = region
if theme:
opts['theme'] = theme
if location:
opts['location'] = location
if cost_per_mile:
opts['pricepermile'] = cost_per_mile
resp = self.request('GET',
sabre_endpoints['destination_finder'],
opts)
return resp
# destination_finder_opts
# Dictionary -> ResponseData
# Executes a request to Sabre's "Lead Price" endpoint with the options specified
# as query parameters
def destination_finder_opts(self, opts):
resp = self.request('GET',
sabre_endpoints['destination_finder'],
opts)
return resp
# top_destinations
# String -> String? -> String? -> Int? ->
# String? -> String? -> Int? -> ResponseData
# Executes a request to Sabre's "Top Destinations" endpoint with the
# options specified. Returns most popular destinations based on the params.
# origin is 2 characters => interpreted as country
# origin is 3 characters => interpreted as city
# destinationtype = ['DOMESTIC', 'INTERNATIONAL', 'OVERALL']
# weeks is the number of weeks to look back for data
def top_destinations(self, origin, destination_type=None,
theme=None, num_results=20, destination_country=None,
region=None, weeks=2):
opts = {}
if len(origin) == 2:
opts['origincountry'] = origin
else:
opts['origin'] = origin
if destination_type:
opts['destinationtype'] = destination_type
if theme:
opts['theme'] = theme
if num_results:
opts['topdestinations'] = num_results
if destination_country:
opts['destinationcountry'] = destination_country
if region:
opts['region'] = region
if weeks:
opts['lookbackweeks'] = weeks
resp = self.request('GET',
sabre_endpoints['top_destinations'],
opts)
return resp
# top_destinations_opts
# Dictionary -> ResponseData
# Executes a request to Sabre's "Top Destinations" endpoint with the
# options specified as query parameters.
def top_destinations_opts(self, opts):
resp = self.request('GET',
sabre_endpoints['top_destinations'],
opts)
return resp
# country_code_lookup
# String -> String?
# Finds a country code given an airport/city code
def country_code_lookup(self, code):
opts = [{
"GeoCodeRQ": {
"PlaceById": {
"Id": code,
"BrowseCategory": {
"name": "AIR"
}
}
}
}]
try:
resp = self.request('POST',
sabre_endpoints['geo_code'],
json.dumps(opts, sort_keys=True),
additional_headers={'Content-Type': 'application/json'})
code = resp.results[0].geo_code_rs.place[0].country
return code
except:
return None
# alliance_lookup
# String -> ResponseData
# Gets a list of airlines for a given alliance
def alliance_lookup(self, alliance_code):
if alliance_code not in ['*A', '*O', '*S']:
return None
else:
resp = self.request('GET',
sabre_endpoints['alliance_lookup'],
{ 'alliancecode': alliance_code })
return resp
# equipment_lookup
# String -> String
# Returns the aircraft name associated with a specified IATA aircraft equipment code
def equipment_lookup(self, aircraft_code):
resp = self.request('GET',
sabre_endpoints['equipment_lookup'],
{ 'aircraftcode': aircraft_code })
try:
return resp.aircraft_info[0].aircraft_name
except:
return None
# multi_city_airport_lookup
# String -> ResponseData
# Returns the cities in a given country (supplied as a two-letter country code)
def multi_city_airport_lookup(self, country_code):
resp = self.request('GET',
sabre_endpoints['multi_city_airport_lookup'],
{ 'country': country_code })
return resp.cities if resp else None
# countries_lookup
# String -> ResponseData
# Returns the valid origin/destination countries for a given point of sale
# Origin countries: resp.origin_countries
# Destination countries: resp.destination_countries
def countries_lookup(self, point_of_sale='US'):
resp = self.request('GET',
sabre_endpoints['countries_lookup'],
{ 'pointofsalecountry': point_of_sale })
return resp
# city_pairs_lookup
# String -> String? -> String? -> String? -> String? -> String? -> ResponseData
# Returns the valid origin/destination city pairs for
# a given point of sale & country
def city_pairs_lookup(self, endpoint, point_of_sale=None, origin_country=None,
destination_country=None, origin_region=None,
destination_region=None):
if endpoint not in ['shop', 'historical', 'forecast']:
error_string = "Invalid endpoint %s specified for city pairs lookup" % endpoint
raise sabre_exceptions.InvalidInputError(error_string)
else:
endpoint = 'city_pairs_' + endpoint + '_lookup'
opts = {
'pointofsalecountry': point_of_sale,
}
if origin_country:
opts['origincountry'] = origin_country
if destination_country:
opts['destinationcountry'] = destination_country
if origin_region:
opts['originregion'] = origin_region
if destination_region:
opts['destinationregion'] = destination_region
resp = self.request('GET',
sabre_endpoints[endpoint],
opts)
return resp
# city_pairs_lookup_opts
# String -> Dictionary -> ResponseData
# Returns the valid origin/destination city pairs for
# a given point of sale & country
def city_pairs_lookup_opts(self, endpoint, opts):
if endpoint not in ['shop', 'historical', 'forecast']:
error_string = "Invalid endpoint %s specified for city pairs lookup" % endpoint
raise sabre_exceptions.InvalidInputError(error_string)
else:
endpoint = 'city_pairs_' + endpoint + '_lookup'
resp = self.request('GET',
sabre_endpoints[endpoint],
opts)
return resp
|
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This script is used to configure openvswitch flows on XenServer hosts.
"""
import os
import sys
# This is written to Python 2.4, since that is what is available on XenServer
import netaddr
import simplejson as json
from novalib import execute, execute_get_output
OVS_OFCTL = '/usr/bin/ovs-ofctl'
class OvsFlow(object):
def __init__(self, bridge, params):
self.bridge = bridge
self.params = params
def add(self, rule):
execute(OVS_OFCTL, 'add-flow', self.bridge, rule % self.params)
def clear_flows(self, ofport):
execute(OVS_OFCTL, 'del-flows', self.bridge, "in_port=%s" % ofport)
def main(command, vif_raw, net_type):
if command not in ('online', 'offline'):
return
vif_name, dom_id, vif_index = vif_raw.split('-')
vif = "%s%s.%s" % (vif_name, dom_id, vif_index)
bridge = execute_get_output('/usr/bin/ovs-vsctl', 'iface-to-br', vif)
xsls = execute_get_output('/usr/bin/xenstore-ls',
'/local/domain/%s/vm-data/networking' % dom_id)
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
for mac in macs:
xsread = execute_get_output('/usr/bin/xenstore-read',
'/local/domain/%s/vm-data/networking/%s' %
(dom_id, mac))
data = json.loads(xsread)
if data["label"] == "public":
this_vif = "vif%s.0" % dom_id
phys_dev = "eth0"
else:
this_vif = "vif%s.1" % dom_id
phys_dev = "eth1"
if vif == this_vif:
vif_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get',
'Interface', vif, 'ofport')
phys_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get',
'Interface', phys_dev, 'ofport')
params = dict(VIF_NAME=vif,
MAC=data['mac'],
OF_PORT=vif_ofport,
PHYS_PORT=phys_ofport)
ovs = OvsFlow(bridge, params)
if command == 'offline':
# I haven't found a way to clear only IPv4 or IPv6 rules.
ovs.clear_flows(vif_ofport)
if command == 'online':
if net_type in ('ipv4', 'all') and 'ips' in data:
for ip4 in data['ips']:
ovs.params.update({'IPV4_ADDR': ip4['ip']})
apply_ovs_ipv4_flows(ovs, bridge, params)
if net_type in ('ipv6', 'all') and 'ip6s' in data:
for ip6 in data['ip6s']:
link_local = str(netaddr.EUI(data['mac']).eui64()\
.ipv6_link_local())
ovs.params.update({'IPV6_LINK_LOCAL_ADDR': link_local})
ovs.params.update({'IPV6_GLOBAL_ADDR': ip6['ip']})
apply_ovs_ipv6_flows(ovs, bridge, params)
def apply_ovs_ipv4_flows(ovs, bridge, params):
# When ARP traffic arrives from a vif, push it to virtual port
# 9999 for further processing
ovs.add("priority=4,arp,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"nw_src=%(IPV4_ADDR)s,arp_sha=%(MAC)s,actions=resubmit:9999")
ovs.add("priority=4,arp,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"nw_src=0.0.0.0,arp_sha=%(MAC)s,actions=resubmit:9999")
# When IP traffic arrives from a vif, push it to virtual port 9999
# for further processing
ovs.add("priority=4,ip,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"nw_src=%(IPV4_ADDR)s,actions=resubmit:9999")
# Drop IP bcast/mcast
ovs.add("priority=6,ip,in_port=%(OF_PORT)s,dl_dst=ff:ff:ff:ff:ff:ff,"
"actions=drop")
ovs.add("priority=5,ip,in_port=%(OF_PORT)s,nw_dst=224.0.0.0/4,"
"actions=drop")
ovs.add("priority=5,ip,in_port=%(OF_PORT)s,nw_dst=240.0.0.0/4,"
"actions=drop")
# Pass ARP requests coming from any VMs on the local HV (port
# 9999) or coming from external sources (PHYS_PORT) to the VM and
# physical NIC. We output this to the physical NIC as well, since
# with instances of shared ip groups, the active host for the
# destination IP might be elsewhere...
ovs.add("priority=3,arp,in_port=9999,nw_dst=%(IPV4_ADDR)s,"
"actions=output:%(OF_PORT)s,output:%(PHYS_PORT)s")
# Pass ARP traffic originating from external sources the VM with
# the matching IP address
ovs.add("priority=3,arp,in_port=%(PHYS_PORT)s,nw_dst=%(IPV4_ADDR)s,"
"actions=output:%(OF_PORT)s")
# Pass ARP traffic from one VM (src mac already validated) to
# another VM on the same HV
ovs.add("priority=3,arp,in_port=9999,dl_dst=%(MAC)s,"
"actions=output:%(OF_PORT)s")
# Pass ARP replies coming from the external environment to the
# target VM
ovs.add("priority=3,arp,in_port=%(PHYS_PORT)s,dl_dst=%(MAC)s,"
"actions=output:%(OF_PORT)s")
# ALL IP traffic: Pass IP data coming from any VMs on the local HV
# (port 9999) or coming from external sources (PHYS_PORT) to the
# VM and physical NIC. We output this to the physical NIC as
# well, since with instances of shared ip groups, the active host
# for the destination IP might be elsewhere...
ovs.add("priority=3,ip,in_port=9999,dl_dst=%(MAC)s,"
"nw_dst=%(IPV4_ADDR)s,actions=output:%(OF_PORT)s,"
"output:%(PHYS_PORT)s")
# Pass IP traffic from the external environment to the VM
ovs.add("priority=3,ip,in_port=%(PHYS_PORT)s,dl_dst=%(MAC)s,"
"nw_dst=%(IPV4_ADDR)s,actions=output:%(OF_PORT)s")
# Send any local traffic to the physical NIC's OVS port for
# physical network learning
ovs.add("priority=2,in_port=9999,actions=output:%(PHYS_PORT)s")
def apply_ovs_ipv6_flows(ovs, bridge, params):
# allow valid IPv6 ND outbound (are both global and local IPs needed?)
# Neighbor Solicitation
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s,"
"actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s,"
"actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,actions=normal")
# Neighbor Advertisement
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,"
"nd_target=%(IPV6_LINK_LOCAL_ADDR)s,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,"
"nd_target=%(IPV6_GLOBAL_ADDR)s,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,actions=normal")
# drop all other neighbor discovery (req b/c we permit all icmp6 below)
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=135,actions=drop")
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=136,actions=drop")
# do not allow sending specifc ICMPv6 types
# Router Advertisement
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=134,actions=drop")
# Redirect Gateway
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=137,actions=drop")
# Mobile Prefix Solicitation
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=146,actions=drop")
# Mobile Prefix Advertisement
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=147,actions=drop")
# Multicast Router Advertisement
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=151,actions=drop")
# Multicast Router Solicitation
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=152,actions=drop")
# Multicast Router Termination
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=153,actions=drop")
# allow valid IPv6 outbound, by type
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,tcp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,tcp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,udp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,udp6,actions=normal")
# all else will be dropped ...
if __name__ == "__main__":
if len(sys.argv) != 4:
print "usage: %s [online|offline] vif-domid-idx [ipv4|ipv6|all] " % \
os.path.basename(sys.argv[0])
sys.exit(1)
else:
command, vif_raw, net_type = sys.argv[1:4]
main(command, vif_raw, net_type)
|
|
#Boa:Frame:frmQueryBuilder
import wx
import wx.richtext
def create(parent):
return frmQueryBuilder(parent)
[wxID_FRMQUERYBUILDER, wxID_FRMQUERYBUILDERBTNALL, wxID_FRMQUERYBUILDERBTNAND,
wxID_FRMQUERYBUILDERBTNAPPLY, wxID_FRMQUERYBUILDERBTNCANCEL,
wxID_FRMQUERYBUILDERBTNCLEAR, wxID_FRMQUERYBUILDERBTNEQUAL,
wxID_FRMQUERYBUILDERBTNGETUNIQUE, wxID_FRMQUERYBUILDERBTNGREATTHAN,
wxID_FRMQUERYBUILDERBTNGTEQUAL, wxID_FRMQUERYBUILDERBTNISNOTNULL,
wxID_FRMQUERYBUILDERBTNISNULL, wxID_FRMQUERYBUILDERBTNLESSTHAN,
wxID_FRMQUERYBUILDERBTNLIKE, wxID_FRMQUERYBUILDERBTNLTEQUAL,
wxID_FRMQUERYBUILDERBTNNOT, wxID_FRMQUERYBUILDERBTNNOTEQUAL,
wxID_FRMQUERYBUILDERBTNOR, wxID_FRMQUERYBUILDERBTNPAREN,
wxID_FRMQUERYBUILDERLBLCOLUMNS, wxID_FRMQUERYBUILDERLBLMAXIMUM,
wxID_FRMQUERYBUILDERLBLMINIMUM, wxID_FRMQUERYBUILDERLBLQUERY,
wxID_FRMQUERYBUILDERLBLVALUES, wxID_FRMQUERYBUILDERLISTCOLUMNS,
wxID_FRMQUERYBUILDERLISTUNIQEVALUES, wxID_FRMQUERYBUILDERPANEL1,
wxID_FRMQUERYBUILDERTBQUERY, wxID_FRMQUERYBUILDERTXTMAX,
wxID_FRMQUERYBUILDERTXTMIN,
] = [wx.NewId() for _init_ctrls in range(30)]
class frmQueryBuilder(wx.Dialog):
def _init_ctrls(self, prnt, filter= None):
# generated method, don't edit
wx.Dialog.__init__(self, id=wxID_FRMQUERYBUILDER,
name=u'frmQueryBuilder', parent=prnt, pos=wx.Point(547, 281),
size=wx.Size(379, 469), style=wx.DEFAULT_DIALOG_STYLE,
title=u'Advanced Query')
self.SetClientSize(wx.Size(363, 431))
self.SetMaxSize(wx.Size(379, 469))
self.SetMinSize(wx.Size(379, 469))
self.panel1 = wx.Panel(id=wxID_FRMQUERYBUILDERPANEL1, name='panel1',
parent=self, pos=wx.Point(0, 0), size=wx.Size(363, 431),
style=wx.TAB_TRAVERSAL)
self.listColumns = wx.ListBox(choices=["SeriesID", "SiteID",
"VariableID", "MethodID", "SourceID", "QualityControlLevelID",
"SiteName", "SiteCode", "Latitude", "Longitude", "VariableName",
"VariableCode", "DataType", "Value Type", "Speciation",
"SampleMedium", "TimeSupport", "GeneralCategory", "NoDataValue",
"VarialbeUnitsName", "TimeUnitsName", "MethodDescription",
"SourceDescription", "Organization", "Citation",
"QualityControlLevelCode", "QualityControlLevelDefinition",
"BeginDateTime", "EndDateTime", "BeginDateTimeUTC",
"EndDateTimeUTC", "ValueCount", "Checked"],
id=wxID_FRMQUERYBUILDERLISTCOLUMNS, name=u'listColumns',
parent=self.panel1, pos=wx.Point(16, 24), size=wx.Size(152, 152),
style=0)
self.listColumns.Bind(wx.EVT_LISTBOX_DCLICK,
self.OnListColumnsListboxDclick,
id=wxID_FRMQUERYBUILDERLISTCOLUMNS)
self.listColumns.Bind(wx.EVT_LISTBOX, self.OnListColumnsListbox,
id=wxID_FRMQUERYBUILDERLISTCOLUMNS)
self.tbQuery = wx.richtext.RichTextCtrl(id=wxID_FRMQUERYBUILDERTBQUERY,
parent=self.panel1, pos=wx.Point(16, 264), size=wx.Size(328, 128),
style=wx.richtext.RE_MULTILINE, value=u'')
self.tbQuery.SetLabel(u'')
self.tbQuery.SetName(u'tbQuery')
self.tbQuery.SetValue(str(filter))
self.listUniqeValues = wx.ListBox(choices=[],
id=wxID_FRMQUERYBUILDERLISTUNIQEVALUES, name=u'listUniqeValues',
parent=self.panel1, pos=wx.Point(216, 24), size=wx.Size(132, 152),
style=0)
self.txtMin = wx.TextCtrl(id=wxID_FRMQUERYBUILDERTXTMIN, name=u'txtMin',
parent=self.panel1, pos=wx.Point(256, 216), size=wx.Size(80, 21),
style=0, value='')
self.txtMax = wx.TextCtrl(id=wxID_FRMQUERYBUILDERTXTMAX, name=u'txtMax',
parent=self.panel1, pos=wx.Point(256, 240), size=wx.Size(84, 21),
style=0, value='')
self.lblColumns = wx.StaticText(id=wxID_FRMQUERYBUILDERLBLCOLUMNS,
label=u'Column Names:', name=u'lblColumns', parent=self.panel1,
pos=wx.Point(16, 8), size=wx.Size(75, 13), style=0)
self.lblValues = wx.StaticText(id=wxID_FRMQUERYBUILDERLBLVALUES,
label=u'Unique Values', name=u'lblValues', parent=self.panel1,
pos=wx.Point(216, 8), size=wx.Size(68, 13), style=0)
self.lblQuery = wx.StaticText(id=wxID_FRMQUERYBUILDERLBLQUERY,
label=u'SELECT * FROM [Attributes] WHERE', name=u'lblQuery',
parent=self.panel1, pos=wx.Point(16, 248), size=wx.Size(176, 13),
style=0)
self.lblMinimum = wx.StaticText(id=wxID_FRMQUERYBUILDERLBLMINIMUM,
label=u'Minimum', name=u'lblMinimum', parent=self.panel1,
pos=wx.Point(208, 213), size=wx.Size(41, 11), style=0)
self.lblMaximum = wx.StaticText(id=wxID_FRMQUERYBUILDERLBLMAXIMUM,
label=u'Maximum', name=u'lblMaximum', parent=self.panel1,
pos=wx.Point(208, 240), size=wx.Size(45, 13), style=0)
self.btnIsNull = wx.Button(id=wxID_FRMQUERYBUILDERBTNISNULL,
label=u'Is Null', name=u'btnIsNull', parent=self.panel1,
pos=wx.Point(16, 184), size=wx.Size(40, 23), style=0)
self.btnIsNull.Bind(wx.EVT_BUTTON, self.OnBtnIsNullButton,
id=wxID_FRMQUERYBUILDERBTNISNULL)
self.btnApply = wx.Button(id=wxID_FRMQUERYBUILDERBTNAPPLY,
label=u'Apply', name=u'btnApply', parent=self.panel1,
pos=wx.Point(200, 400), size=wx.Size(75, 23), style=0)
self.btnApply.Bind(wx.EVT_BUTTON, self.OnBtnApplyButton,
id=wxID_FRMQUERYBUILDERBTNAPPLY)
self.btnNot = wx.Button(id=wxID_FRMQUERYBUILDERBTNNOT, label=u'Not',
name=u'btnNot', parent=self.panel1, pos=wx.Point(160, 216),
size=wx.Size(40, 23), style=0)
self.btnNot.Bind(wx.EVT_BUTTON, self.OnBtnNotButton,
id=wxID_FRMQUERYBUILDERBTNNOT)
self.btnOr = wx.Button(id=wxID_FRMQUERYBUILDERBTNOR, label=u'OR',
name=u'btnOr', parent=self.panel1, pos=wx.Point(120, 216),
size=wx.Size(32, 23), style=0)
self.btnOr.Bind(wx.EVT_BUTTON, self.OnBtnOrButton,
id=wxID_FRMQUERYBUILDERBTNOR)
self.btnAnd = wx.Button(id=wxID_FRMQUERYBUILDERBTNAND, label=u'AND',
name=u'btnAnd', parent=self.panel1, pos=wx.Point(72, 216),
size=wx.Size(40, 23), style=0)
self.btnAnd.Bind(wx.EVT_BUTTON, self.OnBtnAndButton,
id=wxID_FRMQUERYBUILDERBTNAND)
self.btnAll = wx.Button(id=wxID_FRMQUERYBUILDERBTNALL, label=u'*',
name=u'btnAll', parent=self.panel1, pos=wx.Point(216, 184),
size=wx.Size(32, 23), style=0)
self.btnAll.Bind(wx.EVT_BUTTON, self.OnBtnAllButton,
id=wxID_FRMQUERYBUILDERBTNALL)
self.btnEqual = wx.Button(id=wxID_FRMQUERYBUILDERBTNEQUAL, label=u'=',
name=u'btnEqual', parent=self.panel1, pos=wx.Point(176, 24),
size=wx.Size(32, 23), style=0)
self.btnEqual.Bind(wx.EVT_BUTTON, self.OnBtnEqualButton,
id=wxID_FRMQUERYBUILDERBTNEQUAL)
self.btnParen = wx.Button(id=wxID_FRMQUERYBUILDERBTNPAREN, label=u'( )',
name=u'btnParen', parent=self.panel1, pos=wx.Point(136, 184),
size=wx.Size(32, 24), style=0)
self.btnParen.Bind(wx.EVT_BUTTON, self.OnBtnParenButton,
id=wxID_FRMQUERYBUILDERBTNPAREN)
self.btnLike = wx.Button(id=wxID_FRMQUERYBUILDERBTNLIKE, label=u'Like',
name=u'btnLike', parent=self.panel1, pos=wx.Point(16, 216),
size=wx.Size(48, 23), style=0)
self.btnLike.Bind(wx.EVT_BUTTON, self.OnBtnLikeButton,
id=wxID_FRMQUERYBUILDERBTNLIKE)
self.btnGTEqual = wx.Button(id=wxID_FRMQUERYBUILDERBTNGTEQUAL,
label=u'>=', name=u'btnGTEqual', parent=self.panel1,
pos=wx.Point(176, 152), size=wx.Size(32, 23), style=0)
self.btnGTEqual.Bind(wx.EVT_BUTTON, self.OnBtnGTEqualButton,
id=wxID_FRMQUERYBUILDERBTNGTEQUAL)
self.btnLTEqual = wx.Button(id=wxID_FRMQUERYBUILDERBTNLTEQUAL,
label=u'<=', name=u'btnLTEqual', parent=self.panel1,
pos=wx.Point(176, 184), size=wx.Size(32, 23), style=0)
self.btnLTEqual.Bind(wx.EVT_BUTTON, self.OnBtnLTEqualButton,
id=wxID_FRMQUERYBUILDERBTNLTEQUAL)
self.btnNotEqual = wx.Button(id=wxID_FRMQUERYBUILDERBTNNOTEQUAL,
label=u'<>', name=u'btnNotEqual', parent=self.panel1,
pos=wx.Point(176, 56), size=wx.Size(32, 23), style=0)
self.btnNotEqual.Bind(wx.EVT_BUTTON, self.OnBtnNotEqualButton,
id=wxID_FRMQUERYBUILDERBTNNOTEQUAL)
self.btnLessThan = wx.Button(id=wxID_FRMQUERYBUILDERBTNLESSTHAN,
label=u'<', name=u'btnLessThan', parent=self.panel1,
pos=wx.Point(176, 120), size=wx.Size(32, 23), style=0)
self.btnLessThan.Bind(wx.EVT_BUTTON, self.OnBtnLessThanButton,
id=wxID_FRMQUERYBUILDERBTNLESSTHAN)
self.btnGetUnique = wx.Button(id=wxID_FRMQUERYBUILDERBTNGETUNIQUE,
label=u'Get Unique Values', name=u'btnGetUnique',
parent=self.panel1, pos=wx.Point(256, 184), size=wx.Size(99, 23),
style=0)
self.btnGetUnique.Bind(wx.EVT_BUTTON, self.OnBtnGetUniqueButton,
id=wxID_FRMQUERYBUILDERBTNGETUNIQUE)
self.btnGreatThan = wx.Button(id=wxID_FRMQUERYBUILDERBTNGREATTHAN,
label=u'>', name=u'btnGreatThan', parent=self.panel1,
pos=wx.Point(176, 88), size=wx.Size(32, 23), style=0)
self.btnGreatThan.Bind(wx.EVT_BUTTON, self.OnBtnGreatThanButton,
id=wxID_FRMQUERYBUILDERBTNGREATTHAN)
self.btnIsNotNull = wx.Button(id=wxID_FRMQUERYBUILDERBTNISNOTNULL,
label=u'Is Not Null', name=u'btnIsNotNull', parent=self.panel1,
pos=wx.Point(64, 184), size=wx.Size(64, 24), style=0)
self.btnIsNotNull.Bind(wx.EVT_BUTTON, self.OnBtnIsNotNullButton,
id=wxID_FRMQUERYBUILDERBTNISNOTNULL)
self.btnCancel = wx.Button(id=wxID_FRMQUERYBUILDERBTNCANCEL,
label=u'Cancel', name=u'btnCancel', parent=self.panel1,
pos=wx.Point(280, 400), size=wx.Size(75, 23), style=0)
self.btnCancel.Bind(wx.EVT_BUTTON, self.OnBtnCancelButton,
id=wxID_FRMQUERYBUILDERBTNCANCEL)
self.btnClear = wx.Button(id=wxID_FRMQUERYBUILDERBTNCLEAR,
label=u'Clear Query', name=u'btnClear', parent=self.panel1,
pos=wx.Point(120, 400), size=wx.Size(75, 23), style=0)
self.btnClear.Bind(wx.EVT_BUTTON, self.OnBtnClearButton,
id=wxID_FRMQUERYBUILDERBTNCLEAR)
def __init__(self, parent, filter = None):
self._init_ctrls(parent, filter)
def OnBtnCancelButton(self, event):
## return ""
self.Close()
self.Destroy()
def OnBtnApplyButton(self, event):
## return self.tbQuery.GetValue()
self.Close()
self.Destroy()
def OnBtnClearButton(self, event):
self.tbQuery.SetValue("")
event.Skip()
def OnBtnEqualButton(self, event):
event.Skip()
def OnBtnNotEqualButton(self, event):
event.Skip()
def OnListColumnsListboxDclick(self, event):
event.Skip()
def OnBtnIsNullButton(self, event):
event.Skip()
def OnBtnNotButton(self, event):
event.Skip()
def OnBtnOrButton(self, event):
event.Skip()
def OnBtnAndButton(self, event):
event.Skip()
def OnBtnAllButton(self, event):
event.Skip()
def OnBtnParenButton(self, event):
event.Skip()
def OnBtnLikeButton(self, event):
event.Skip()
def OnBtnGTEqualButton(self, event):
event.Skip()
def OnBtnLTEqualButton(self, event):
event.Skip()
def OnBtnLessThanButton(self, event):
event.Skip()
def OnBtnGetUniqueButton(self, event):
event.Skip()
def OnBtnGreatThanButton(self, event):
event.Skip()
def OnBtnIsNotNullButton(self, event):
event.Skip()
def OnListColumnsListbox(self, event):
event.Skip()
|
|
import fnmatch
import glob
import os
import re
import tempfile
from datetime import datetime
from gppylib import gplog
from gppylib.commands.base import WorkerPool, Command, REMOTE
from gppylib.commands.unix import Scp
from gppylib.db import dbconn
from gppylib.db.dbconn import execSQL
from gppylib.gparray import GpArray
from gppylib.mainUtils import gp
from gppylib import pgconf
from optparse import Values
from pygresql import pg
from gppylib.operations.utils import DEFAULT_NUM_WORKERS
import gzip
logger = gplog.get_default_logger()
class Context(Values, object):
filename_dict = {
"ao": ("dump", "_ao_state_file"), "cdatabase": ("cdatabase_1_1", ""), "co": ("dump", "_co_state_file"), "dirty_table": ("dump", "_dirty_list"),
"dump": ("dump_%d_%d", ""), "files": ("dump", "_regular_files"), "filter": ("dump", "_filter"), "global": ("global_1_1", ""),
"increments": ("dump", "_increments"), "last_operation": ("dump", "_last_operation"), "master_config": ("master_config_files", ".tar"),
"metadata": ("dump_1_1", ""), "partition_list": ("dump", "_table_list"), "pipes": ("dump", "_pipes"), "plan": ("restore", "_plan"),
"postdata": ("dump_1_1", "_post_data"), "report": ("dump", ".rpt"), "schema": ("dump", "_schema"), "segment_config": ("segment_config_files_%d_%d", ".tar"),
"stats": ("statistics_1_1", ""), "status": ("dump_status_%d_%d", ""),
}
defaults = {
"backup_dir": None, "batch_default": 64, "change_schema": None, "cleanup_date": None, "cleanup_total": None, "clear_catalog_dumps": False,
"clear_dumps": False, "clear_dumps_only": False, "compress": True, "db_host_path": None, "ddboost": False, "ddboost_backupdir": None, "ddboost_config_remove": False,
"ddboost_hosts": None, "ddboost_ping": True, "ddboost_remote": False, "ddboost_show_config": False, "ddboost_storage_unit": None, "ddboost_user": None,
"ddboost_verify": False, "drop_db": False, "dump_config": False, "dump_databases": [], "dump_dir": "db_dumps", "dump_global": False, "dump_prefix": "",
"dump_schema": "", "dump_stats": False, "encoding": None, "exclude_dump_schema": "", "exclude_dump_tables": "", "exclude_dump_tables_file": "",
"exclude_schema_file": "", "free_space_percent": None, "history": False, "include_dump_tables": "", "include_dump_tables_file": "", "include_email_file": "",
"include_schema_file": "", "incremental": False, "list_backup_files": False, "list_filter_tables": False, "local_dump_prefix": None, "masterDataDirectory": None,
"master_port": 0, "max_streams": None, "netbackup_block_size": None, "netbackup_keyword": None, "netbackup_policy": None, "netbackup_schedule": None,
"netbackup_service_host": None, "metadata_only": False, "no_analyze": False, "no_ao_stats": False, "no_plan": False, "no_validate_table_name": False,
"output_options": [], "post_script": "", "redirected_restore_db": None, "replicate": False, "report_dir": "", "report_status_dir": "", "restore_db": None,
"restore_global": False, "restore_schemas": None, "restore_stats": None, "restore_tables": [], "timestamp": None, "timestamp_key": None,
}
def __init__(self, values=None):
if values:
self.defaults.update(values.__dict__) # Ensure that context has default values for all unset variables
super(self.__class__, self).__init__(vars(Values(self.defaults)))
if self.masterDataDirectory:
self.master_datadir = self.masterDataDirectory
else:
self.master_datadir = gp.get_masterdatadir()
self.master_port = self.get_master_port()
if self.local_dump_prefix:
self.dump_prefix = self.local_dump_prefix + "_"
else:
self.dump_prefix = ""
if not self.include_dump_tables: self.include_dump_tables = []
if not self.exclude_dump_tables: self.exclude_dump_tables = []
if not self.output_options: self.output_options = []
if not self.dump_schema: self.dump_schema = []
if not self.exclude_dump_schema: self.exclude_dump_schema = []
if self.netbackup_keyword and (len(self.netbackup_keyword) > 100):
raise Exception('NetBackup Keyword provided has more than max limit (100) characters. Cannot proceed with backup.')
def get_master_port(self):
pgconf_dict = pgconf.readfile(self.master_datadir + "/postgresql.conf")
return pgconf_dict.int('port')
def generate_filename(self, filetype, dbid=1, timestamp=None, directory=None):
if timestamp is None:
timestamp = self.timestamp
if directory:
use_dir = directory
else:
use_dir = self.get_backup_dir(timestamp)
format_str = "%s/%sgp_%s_%s%s" % (use_dir, self.dump_prefix, "%s", timestamp, "%s")
filename = format_str % (self.filename_dict[filetype][0], self.filename_dict[filetype][1])
if "%d" in filename:
if dbid == 1:
filename = filename % (1, 1)
else:
filename = filename % (0, dbid)
if self.compress and filetype in ["metadata", "dump", "postdata"]:
filename += ".gz"
return filename
def generate_prefix(self, filetype, dbid=1, timestamp=None):
if timestamp is None:
timestamp = self.timestamp
format_str = "%sgp_%s_" % (self.dump_prefix, "%s")
filename = format_str % (self.filename_dict[filetype][0])
if "%d" in filename:
if dbid == 1:
filename = filename % (1, 1)
else:
filename = filename % (0, dbid)
return filename
def get_backup_dir(self, timestamp=None):
if self.backup_dir and not self.ddboost:
use_dir = self.backup_dir
elif self.master_datadir:
use_dir = self.master_datadir
else:
raise Exception("Cannot locate backup directory with existing parameters")
if timestamp:
use_timestamp = timestamp
else:
use_timestamp = self.timestamp
if not use_timestamp:
raise Exception("Cannot locate backup directory without timestamp")
if not validate_timestamp(use_timestamp):
raise Exception('Invalid timestamp: "%s"' % use_timestamp)
return "%s/%s/%s" % (use_dir, self.dump_dir, use_timestamp[0:8])
def get_backup_root(self):
if self.backup_dir and not self.ddboost:
return self.backup_dir
else:
return self.master_datadir
def get_gpd_path(self):
gpd_path = os.path.join(self.dump_dir, self.timestamp[0:8])
if self.backup_dir:
gpd_path = os.path.join(self.backup_dir, gpd_path)
return gpd_path
def get_date_dir(self):
return os.path.join(self.get_backup_root(), self.dump_dir, self.db_date_dir)
def backup_dir_is_writable(self):
if self.backup_dir and not self.report_status_dir:
try:
check_dir_writable(self.get_backup_dir())
except Exception as e:
logger.warning('Backup directory %s is not writable. Error %s' % (self.get_backup_dir(), str(e)))
logger.warning('Since --report-status-dir option is not specified, report and status file will be written in segment data directory.')
return False
return True
def generate_dump_timestamp(self):
if self.timestamp_key:
timestamp_key = self.timestamp_key
else:
timestamp_key = datetime.now().strftime("%Y%m%d%H%M%S")
if not validate_timestamp(timestamp_key):
raise Exception('Invalid timestamp key')
year = int(timestamp_key[:4])
month = int(timestamp_key[4:6])
day = int(timestamp_key[6:8])
hours = int(timestamp_key[8:10])
minutes = int(timestamp_key[10:12])
seconds = int(timestamp_key[12:14])
self.timestamp = timestamp_key
self.db_date_dir = "%4d%02d%02d" % (year, month, day)
self.timestamp_object = datetime(year, month, day, hours, minutes, seconds)
def expand_partitions_and_populate_filter_file(dbname, partition_list, file_prefix):
expanded_partitions = expand_partition_tables(dbname, partition_list)
dump_partition_list = list(set(expanded_partitions + partition_list))
return create_temp_file_from_list(dump_partition_list, file_prefix)
def populate_filter_tables(table, rows, non_partition_tables, partition_leaves):
if not rows:
non_partition_tables.append(table)
else:
for (schema_name, partition_leaf_name) in rows:
partition_leaf = schema_name.strip() + '.' + partition_leaf_name.strip()
partition_leaves.append(partition_leaf)
return (non_partition_tables, partition_leaves)
def get_all_parent_tables(dbname):
SQL = "SELECT DISTINCT (schemaname || '.' || tablename) FROM pg_partitions"
data = []
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, SQL)
data = curs.fetchall()
return set([d[0] for d in data])
def list_to_quoted_string(filter_tables):
filter_string = "'" + "', '".join([pg.escape_string(t) for t in filter_tables]) + "'"
return filter_string
def convert_parents_to_leafs(dbname, parents):
partition_leaves_sql = """
SELECT x.partitionschemaname || '.' || x.partitiontablename
FROM (
SELECT distinct schemaname, tablename, partitionschemaname, partitiontablename, partitionlevel
FROM pg_partitions
WHERE schemaname || '.' || tablename in (%s)
) as X,
(SELECT schemaname, tablename maxtable, max(partitionlevel) maxlevel
FROM pg_partitions
group by (tablename, schemaname)
) as Y
WHERE x.schemaname = y.schemaname and x.tablename = Y.maxtable and x.partitionlevel = Y.maxlevel;
"""
if not parents:
return []
conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
partition_sql = partition_leaves_sql % list_to_quoted_string(parents)
curs = dbconn.execSQL(conn, partition_sql)
rows = curs.fetchall()
return [r[0] for r in rows]
#input: list of tables to be filtered
#output: same list but parent tables converted to leafs
def expand_partition_tables(dbname, filter_tables):
if not filter_tables or len(filter_tables) == 0:
return filter_tables
parent_tables = list()
non_parent_tables = list()
expanded_list = list()
all_parent_tables = get_all_parent_tables(dbname)
for table in filter_tables:
if table in all_parent_tables:
parent_tables.append(table)
else:
non_parent_tables.append(table)
expanded_list += non_parent_tables
local_batch_size = 1000
for (s, e) in get_batch_from_list(len(parent_tables), local_batch_size):
tmp = convert_parents_to_leafs(dbname, parent_tables[s:e])
expanded_list += tmp
return expanded_list
def get_batch_from_list(length, batch_size):
indices = []
for i in range(0, length, batch_size):
indices.append((i, i+batch_size))
return indices
def create_temp_file_from_list(entries, prefix):
"""
When writing the entries into temp file, don't do any strip as there might be
white space in schema name and table name.
"""
if len(entries) == 0:
return None
fd = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, delete=False)
for entry in entries:
fd.write(entry + '\n')
tmp_file_name = fd.name
fd.close()
return tmp_file_name
def create_temp_file_with_tables(table_list):
return create_temp_file_from_list(table_list, 'table_list_')
def create_temp_file_with_schemas(schema_list):
return create_temp_file_from_list(schema_list, 'schema_file_')
def validate_timestamp(timestamp):
if not timestamp:
return False
if len(timestamp) != 14:
return False
if timestamp.isdigit():
return True
else:
return False
def check_successful_dump(report_file_contents):
for line in report_file_contents:
if line.strip() == 'gp_dump utility finished successfully.':
return True
return False
# raise exception for bad data
def convert_report_filename_to_cdatabase_filename(context, report_file):
(dirname, fname) = os.path.split(report_file)
timestamp = fname[-18:-4]
return context.generate_filename("cdatabase", timestamp=timestamp)
def get_lines_from_dd_file(filename, ddboost_storage_unit):
cmdStr = 'gpddboost --readFile --from-file=%s' % filename
if ddboost_storage_unit:
cmdStr += ' --ddboost-storage-unit=%s' % ddboost_storage_unit
cmd = Command('DDBoost copy of master dump file', cmdStr)
cmd.run(validateAfter=True)
contents = cmd.get_results().stdout.splitlines()
return contents
def check_cdatabase_exists(context, report_file):
try:
filename = convert_report_filename_to_cdatabase_filename(context, report_file)
except Exception, err:
return False
if context.ddboost:
cdatabase_contents = get_lines_from_dd_file(filename, context.ddboost_storage_unit)
elif context.netbackup_service_host:
restore_file_with_nbu(context, path=filename)
cdatabase_contents = get_lines_from_file(filename)
else:
cdatabase_contents = get_lines_from_file(filename, context)
dbname = escapeDoubleQuoteInSQLString(context.dump_database, forceDoubleQuote=False)
for line in cdatabase_contents:
if 'CREATE DATABASE' in line:
dump_dbname = get_dbname_from_cdatabaseline(line)
if dump_dbname is None:
continue
else:
if dbname == checkAndRemoveEnclosingDoubleQuote(dump_dbname):
return True
return False
def get_dbname_from_cdatabaseline(line):
"""
Line format: CREATE DATABASE "DBNAME" WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = gpadmin;
To get the dbname:
substring between the ending index of the first statement: CREATE DATABASE and the starting index
of WITH TEMPLATE whichever is not inside any double quotes, based on the fact that double quote
inside any name will be escaped by extra double quote, so there's always only one WITH TEMPLATE not
inside any doubles, means its previous and post string should have only even number of double
quotes.
Note: OWER name can also have special characters with double quote.
"""
cdatabase = "CREATE DATABASE "
try:
start = line.index(cdatabase)
except Exception as e:
logger.error('Failed to find substring %s in line %s, error: %s' % (cdatabase, line, str(e)))
return None
keyword = " WITH TEMPLATE = "
pos = get_nonquoted_keyword_index(line, keyword, '"', len(keyword))
if pos != -1:
dbname = line[start+len(cdatabase) : pos]
return dbname
return None
def get_nonquoted_keyword_index(line, keyword, quote, keyword_len):
# quote can be single quote or double quote
all_positions = get_all_occurrences(keyword, line)
if all_positions != None and len(all_positions) > 0:
for pos in all_positions:
pre_string = line[:pos]
post_string = line[pos + keyword_len:]
quotes_before = get_all_occurrences('%s' % quote, pre_string)
quotes_after = get_all_occurrences('%s' % quote, post_string)
num_quotes_before = 0 if (quotes_before is None or len(quotes_before) == 0) else len(quotes_before)
num_quotes_after = 0 if (quotes_after is None or len(quotes_after) == 0) else len(quotes_after)
if num_quotes_before % 2 == 0 and num_quotes_after % 2 == 0:
return pos
return -1
def get_all_occurrences(substr, line):
# substr is used for generating the pattern, escape those special chars in regexp
if substr is None or line is None or len(substr) > len(line):
return None
return [m.start() for m in re.finditer('(?=%s)' % substr, line)]
def get_type_ts_from_report_file(context, report_file, backup_type):
report_file_contents = get_lines_from_file(report_file)
if not check_successful_dump(report_file_contents):
return None
if not check_cdatabase_exists(context, report_file):
return None
if check_backup_type(report_file_contents, backup_type):
return get_timestamp_val(report_file_contents)
return None
def get_full_ts_from_report_file(context, report_file):
return get_type_ts_from_report_file(context, report_file, 'Full')
def get_incremental_ts_from_report_file(context, report_file):
return get_type_ts_from_report_file(context, report_file, 'Incremental')
def get_timestamp_val(report_file_contents):
for line in report_file_contents:
if line.startswith('Timestamp Key'):
timestamp = line.split(':')[-1].strip()
if not validate_timestamp(timestamp):
raise Exception('Invalid timestamp value found in report_file')
return timestamp
return None
def check_backup_type(report_file_contents, backup_type):
for line in report_file_contents:
if line.startswith('Backup Type'):
if line.split(':')[-1].strip() == backup_type:
return True
return False
def get_lines_from_zipped_file(fname):
"""
Don't strip white space here as it may be part of schema name and table name
"""
content = []
fd = gzip.open(fname, 'r')
try:
for line in fd:
content.append(line.strip('\n'))
except err:
raise Exception("Error reading from file %s: %s" % (fname, err))
finally:
fd.close()
return content
def get_lines_from_file(fname, context=None):
"""
Don't strip white space here as it may be part of schema name and table name
"""
content = []
if context and context.ddboost:
contents = get_lines_from_dd_file(fname, context.ddboost_storage_unit)
return contents
else:
with open(fname) as fd:
for line in fd:
content.append(line.strip('\n'))
return content
def write_lines_to_file(filename, lines):
"""
Don't do strip in line for white space in case it is part of schema name or table name
"""
with open(filename, 'w') as fp:
for line in lines:
fp.write("%s\n" % line.strip('\n'))
def verify_lines_in_file(fname, expected):
lines = get_lines_from_file(fname)
if lines != expected:
raise Exception("After writing file '%s' contents not as expected.\nLines read from file: %s\nLines expected from file: %s\n" % (fname, lines, expected))
def check_dir_writable(directory):
fp = None
try:
tmp_file = os.path.join(directory, 'tmp_file')
fp = open(tmp_file, 'w')
except IOError as e:
raise Exception('No write access permission on %s' % directory)
except Exception as e:
raise Exception(str(e))
finally:
if fp is not None:
fp.close()
if os.path.isfile(tmp_file):
os.remove(tmp_file)
def execute_sql(query, master_port, dbname):
dburl = dbconn.DbURL(port=master_port, dbname=dbname)
conn = dbconn.connect(dburl)
cursor = execSQL(conn, query)
return cursor.fetchall()
def generate_master_status_prefix(dump_prefix):
return '%sgp_dump_status_1_1_' % (dump_prefix)
def generate_seg_dbdump_prefix(dump_prefix):
return '%sgp_dump_0_' % (dump_prefix)
def generate_seg_status_prefix(dump_prefix):
return '%sgp_dump_status_0_' % (dump_prefix)
def get_dump_dirs(context):
use_dir = context.get_backup_root()
dump_path = os.path.join(use_dir, context.dump_dir)
if not os.path.isdir(dump_path):
return []
initial_list = os.listdir(dump_path)
initial_list = fnmatch.filter(initial_list, '[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]')
dirnames = []
for d in initial_list:
pth = os.path.join(dump_path, d)
if os.path.isdir(pth):
dirnames.append(pth)
if len(dirnames) == 0:
return []
dirnames = sorted(dirnames, key=lambda x: int(os.path.basename(x)), reverse=True)
return dirnames
def get_latest_report_timestamp(context):
dump_dirs = get_dump_dirs(context)
for d in dump_dirs:
latest = get_latest_report_in_dir(d, context.dump_prefix)
if latest:
return latest
return None
def get_latest_report_in_dir(report_dir, dump_prefix):
files = os.listdir(report_dir)
if len(files) == 0:
return None
dump_report_files = fnmatch.filter(files, '%sgp_dump_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].rpt' % dump_prefix)
if len(dump_report_files) == 0:
return None
dump_report_files = sorted(dump_report_files, key=lambda x: int(x.split('_')[-1].split('.')[0]), reverse=True)
return dump_report_files[0][-18:-4]
def get_timestamp_from_increments_filename(filename, dump_prefix):
fname = os.path.basename(filename)
parts = fname.split('_')
# Check for 4 underscores if there is no prefix, or more than 4 if there is a prefix
if not ((not dump_prefix and len(parts) == 4) or (dump_prefix and len(parts) > 4)):
raise Exception("Invalid increments file '%s' passed to get_timestamp_from_increments_filename" % filename)
return parts[-2].strip()
def get_full_timestamp_for_incremental(context):
full_timestamp = None
if context.netbackup_service_host:
full_timestamp = get_full_timestamp_for_incremental_with_nbu(context)
else:
pattern = '%s/%s/[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]/%sgp_dump_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]_increments' % \
(context.get_backup_root(), context.dump_dir, context.dump_prefix)
increments_files = glob.glob(pattern)
for increments_file in increments_files:
increment_ts = get_lines_from_file(increments_file)
if context.timestamp in increment_ts:
full_timestamp = get_timestamp_from_increments_filename(increments_file, context.dump_prefix)
break
if not full_timestamp:
raise Exception("Could not locate full backup associated with timestamp '%s'. Either increments file or full backup is missing." % context.timestamp)
return full_timestamp
# backup_dir will be either MDD or some other directory depending on call
def get_latest_full_dump_timestamp(context):
backup_dir = context.get_backup_root()
dump_dirs = get_dump_dirs(context)
for dump_dir in dump_dirs:
files = sorted(os.listdir(dump_dir))
if len(files) == 0:
logger.warn('Dump directory %s is empty' % dump_dir)
continue
dump_report_files = fnmatch.filter(files, '%sgp_dump_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].rpt' % context.dump_prefix)
if len(dump_report_files) == 0:
logger.warn('No dump report files found in dump directory %s' % dump_dir)
continue
dump_report_files = sorted(dump_report_files, key=lambda x: int(x.split('_')[-1].split('.')[0]), reverse=True)
for dump_report_file in dump_report_files:
report_path = os.path.join(dump_dir, dump_report_file)
logger.debug('Checking for latest timestamp in report file %s' % report_path)
timestamp = get_full_ts_from_report_file(context, report_path)
logger.debug('Timestamp = %s' % timestamp)
if timestamp is not None:
return timestamp
raise Exception('No full backup found for incremental')
def get_all_segment_addresses(master_port):
gparray = GpArray.initFromCatalog(dbconn.DbURL(port=master_port), utility=True)
addresses = [seg.getSegmentAddress() for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
return list(set(addresses))
def scp_file_to_hosts(host_list, filename, batch_default):
pool = WorkerPool(numWorkers=min(len(host_list), batch_default))
for hname in host_list:
pool.addCommand(Scp('Copying table_filter_file to %s' % hname,
srcFile=filename,
dstFile=filename,
dstHost=hname))
pool.join()
pool.haltWork()
pool.check_results()
def run_pool_command(host_list, cmd_str, batch_default, check_results=True):
pool = WorkerPool(numWorkers=min(len(host_list), batch_default))
for host in host_list:
cmd = Command(host, cmd_str, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
if check_results:
pool.check_results()
def check_funny_chars_in_names(names, is_full_qualified_name=True):
"""
'\n' inside table name makes it hard to specify the object name in shell command line,
this may be worked around by using table file, but currently we read input line by line.
'!' inside table name will mess up with the shell history expansion.
',' is used for separating tables in plan file during incremental restore.
'.' dot is currently being used for full qualified table name in format: schema.table
"""
if names and len(names) > 0:
for name in names:
if ('\t' in name or '\n' in name or '!' in name or ',' in name or
(is_full_qualified_name and name.count('.') > 1) or (not is_full_qualified_name and name.count('.') > 0)):
raise Exception('Name has an invalid character "\\t" "\\n" "!" "," ".": "%s"' % name)
def backup_file_with_ddboost(context, filetype=None, dbid=1, timestamp=None):
if filetype is None:
raise Exception("Cannot call backup_file_with_ddboost without a filetype argument")
if timestamp is None:
timestamp = context.timestamp
path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
copy_file_to_dd(context, path, timestamp)
def copy_file_to_dd(context, filename, timestamp=None):
if timestamp is None:
timestamp = context.timestamp
basefilename = os.path.basename(filename)
cmdStr = "gpddboost --copyToDDBoost --from-file=%s --to-file=%s/%s/%s" % (filename, context.dump_dir, context.timestamp[0:8], basefilename)
if context.ddboost_storage_unit:
cmdStr += " --ddboost-storage-unit=%s" % context.ddboost_storage_unit
cmd = Command('copy file %s to DD machine' % basefilename, cmdStr)
cmd.run(validateAfter=True)
#Form and run command line to backup individual file with NBU
def backup_file_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None, timestamp=None):
if filetype and path:
raise Exception("Cannot supply both a file type and a file path to backup_file_with_nbu")
if filetype is None and path is None:
raise Exception("Cannot call backup_file_with_nbu with no type or path argument")
if timestamp is None:
timestamp = context.timestamp
if filetype:
path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
command_string = "cat %s | gp_bsa_dump_agent --netbackup-service-host %s --netbackup-policy %s --netbackup-schedule %s --netbackup-filename %s" % \
(path, context.netbackup_service_host, context.netbackup_policy, context.netbackup_schedule, path)
if context.netbackup_block_size is not None:
command_string += " --netbackup-block-size %s" % context.netbackup_block_size
if context.netbackup_keyword is not None:
command_string += " --netbackup-keyword %s" % context.netbackup_keyword
logger.debug("Command string inside backup_%s_file_with_nbu: %s\n", filetype, command_string)
if hostname is None:
Command("dumping metadata files from master", command_string).run(validateAfter=True)
else:
Command("dumping metadata files from segment", command_string, ctxt=REMOTE, remoteHost=hostname).run(validateAfter=True)
logger.debug("Command ran successfully\n")
#Form and run command line to restore individual file with NBU
def restore_file_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None, timestamp=None):
if filetype and path:
raise Exception("Cannot supply both a file type and a file path to restore_file_with_nbu")
if filetype is None and path is None:
raise Exception("Cannot call restore_file_with_nbu with no type or path argument")
if timestamp is None:
timestamp = context.timestamp
if filetype:
path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
command_string = "gp_bsa_restore_agent --netbackup-service-host %s" % context.netbackup_service_host
if context.netbackup_block_size is not None:
command_string += " --netbackup-block-size %s" % context.netbackup_block_size
command_string += " --netbackup-filename %s > %s" % (path, path)
logger.debug("Command string inside restore_%s_file_with_nbu: %s\n", filetype, command_string)
if hostname is None:
Command("restoring metadata files to master", command_string).run(validateAfter=True)
else:
Command("restoring metadata files to segment", command_string, ctxt=REMOTE, remoteHost=hostname).run(validateAfter=True)
def check_file_dumped_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None):
if filetype and path:
raise Exception("Cannot supply both a file type and a file path toeck_file_dumped_with_nbu")
if filetype is None and path is None:
raise Exception("Cannot call check_file_dumped_with_nbu with no type or path argument")
if filetype:
path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
command_string = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (context.netbackup_service_host, path)
logger.debug("Command string inside 'check_file_dumped_with_nbu': %s\n", command_string)
if hostname is None:
cmd = Command("Querying NetBackup server to check for dumped file", command_string)
else:
cmd = Command("Querying NetBackup server to check for dumped file", command_string, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() == path:
return True
else:
return False
def get_full_timestamp_for_incremental_with_nbu(context):
if context.dump_prefix:
get_inc_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*_increments" % (context.netbackup_service_host, context.dump_prefix)
else:
get_inc_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*_increments" % context.netbackup_service_host
cmd = Command("Query NetBackup server to get the list of increments files backed up", get_inc_files_cmd)
cmd.run(validateAfter=True)
files_list = cmd.get_results().stdout.split('\n')
for line in files_list:
fname = line.strip()
restore_file_with_nbu(context, path=fname)
contents = get_lines_from_file(fname)
if context.timestamp in contents:
full_timestamp = get_timestamp_from_increments_filename(fname, context.dump_prefix)
return full_timestamp
return None
def get_latest_full_ts_with_nbu(context):
if context.dump_prefix:
get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*.rpt" % \
(context.netbackup_service_host, context.dump_prefix)
else:
get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*.rpt" % context.netbackup_service_host
cmd = Command("Query NetBackup server to get the list of report files backed up", get_rpt_files_cmd)
cmd.run(validateAfter=True)
files_list = cmd.get_results().stdout.split('\n')
for line in files_list:
fname = line.strip()
if fname == '':
continue
if context.backup_dir is not None and context.backup_dir not in fname:
continue
if ("No object matched the specified predicate" in fname) or ("No objects of the format" in fname):
return None
restore_file_with_nbu(context, fname)
timestamp = get_full_ts_from_report_file(context, fname)
logger.debug('Timestamp = %s' % timestamp)
if timestamp is not None:
return timestamp
raise Exception('No full backup found for given incremental on the specified NetBackup server')
def getRows(dbname, exec_sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
results = curs.fetchall()
return results
def check_schema_exists(schema_name, dbname):
schemaname = pg.escape_string(schema_name)
schema_check_sql = "select * from pg_catalog.pg_namespace where nspname='%s';" % schemaname
if len(getRows(dbname, schema_check_sql)) < 1:
return False
return True
def unescape_string(string):
if string:
string = string.replace('\\\\', '\\').replace("''", "'")
return string
def isDoubleQuoted(string):
if len(string) > 2 and string[0] == '"' and string[-1] == '"':
return True
return False
def checkAndRemoveEnclosingDoubleQuote(string):
if isDoubleQuoted(string):
string = string[1 : len(string) - 1]
return string
def checkAndAddEnclosingDoubleQuote(string):
if not isDoubleQuoted(string):
string = '"' + string + '"'
return string
def escapeDoubleQuoteInSQLString(string, forceDoubleQuote=True):
"""
Accept true database name, schema name, table name, escape the double quote
inside the name, add enclosing double quote by default.
"""
string = string.replace('"', '""')
if forceDoubleQuote:
string = '"' + string + '"'
return string
def removeEscapingDoubleQuoteInSQLString(string, forceDoubleQuote=True):
"""
Remove the escaping double quote in database/schema/table name.
"""
if string is None:
return string
string = string.replace('""', '"')
if forceDoubleQuote:
string = '"' + string + '"'
return string
def formatSQLString(rel_file, isTableName=False):
"""
Read the full qualified schema or table name, do a split
if each item is a table name into schema and table,
escape the double quote inside the name properly.
"""
relnames = []
if rel_file and os.path.exists(rel_file):
with open(rel_file, 'r') as fr:
lines = fr.read().strip('\n').split('\n')
for line in lines:
if isTableName:
schema, table = split_fqn(line)
schema = escapeDoubleQuoteInSQLString(schema)
table = escapeDoubleQuoteInSQLString(table)
relnames.append(schema + '.' + table)
else:
schema = escapeDoubleQuoteInSQLString(line)
relnames.append(schema)
if len(relnames) > 0:
write_lines_to_file(rel_file, relnames)
return rel_file
def split_fqn(fqn_name):
"""
Split full qualified table name into schema and table by separator '.',
"""
try:
schema, table = fqn_name.split('.')
except Exception as e:
logger.error("Failed to split name %s into schema and table, please check the format is schema.table" % fqn_name)
raise Exception('%s' % str(e))
return schema, table
def remove_file_on_segments(context, filename):
addresses = get_all_segment_addresses(context.master_port)
try:
cmd = 'rm -f %s' % filename
run_pool_command(addresses, cmd, context.batch_default, check_results=False)
except Exception as e:
logger.error("cleaning up file failed: %s" % e.__str__())
def get_table_info(line):
"""
It's complex to split when table name/schema name/user name/ tablespace name
contains full context of one of others', which is very unlikely, but in
case it happens, return None.
Since we only care about table name, type, and schema name, strip the input
is safe here.
line: contains the true (un-escaped) schema name, table name, and user name.
"""
COMMENT_EXPR = '-- Name: '
TYPE_EXPR = '; Type: '
SCHEMA_EXPR = '; Schema: '
OWNER_EXPR = '; Owner: '
TABLESPACE_EXPR = '; Tablespace: '
temp = line.strip('\n')
type_start = get_all_occurrences(TYPE_EXPR, temp)
schema_start = get_all_occurrences(SCHEMA_EXPR, temp)
owner_start = get_all_occurrences(OWNER_EXPR, temp)
tblspace_start = get_all_occurrences(TABLESPACE_EXPR, temp)
if len(type_start) != 1 or len(schema_start) != 1 or len(owner_start) != 1:
return (None, None, None, None)
name = temp[len(COMMENT_EXPR) : type_start[0]]
type = temp[type_start[0] + len(TYPE_EXPR) : schema_start[0]]
schema = temp[schema_start[0] + len(SCHEMA_EXPR) : owner_start[0]]
if not tblspace_start:
tblspace_start.append(None)
owner = temp[owner_start[0] + len(OWNER_EXPR) : tblspace_start[0]]
return (name, type, schema, owner)
|
|
from __future__ import absolute_import, unicode_literals
from datetime import date
from django import forms
from django.conf import settings
from django.contrib.admin.options import (ModelAdmin, TabularInline,
HORIZONTAL, VERTICAL)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.validation import validate
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.contrib.admin import (SimpleListFilter,
BooleanFieldListFilter)
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import TestCase
from django.test.utils import str_prefix
from django.utils import unittest, six
from .models import Band, Concert, ValidationTestModel, ValidationTestInlineModel
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date'])
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fielsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields arent in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['bio', 'sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE'])
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(
type(ma.get_form(request).base_fields['sign_date'].widget),
AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `ModelAdmin.get_form()`
overrides all other declarations. Refs #8999.
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations. Refs #8999.
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE'])
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Beatles</option>\n'
'<option value="%d">The Doors</option>\n'
'</select>' % (band2.id, self.band.id))
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Doors</option>\n'
'</select>' % self.band.id)
def test_regression_for_ticket_15820(self):
"""
Ensure that `obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
concert = Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band'])
class ValidationTests(unittest.TestCase):
def test_validation_only_runs_in_debug(self):
# Ensure validation only runs when DEBUG = True
try:
settings.DEBUG = True
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
site = AdminSite()
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
site.register,
ValidationTestModel,
ValidationTestModelAdmin,
)
finally:
settings.DEBUG = False
site = AdminSite()
site.register(ValidationTestModel, ValidationTestModelAdmin)
def test_raw_id_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields\[0\]', 'name' must be either a ForeignKey or ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_fieldsets_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' does not have exactly two elements.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'fields' key is required in ValidationTestModelAdmin.fieldsets\[0\]\[1\] field options dict.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("non_existent_field",)}),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
fields = ["name",]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"Both fieldsets and fields are specified in ValidationTestModelAdmin.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fieldsets",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fields = ["name", "name"]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fields",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_form_validation(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
six.assertRaisesRegex(self,
ImproperlyConfigured,
"ValidationTestModelAdmin.form does not inherit from BaseModelForm.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('non_existent_field',)
}),
)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'BandAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
BandAdmin,
Band,
)
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
validate(BandAdmin, Band)
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('non_existent_field',)
}),
)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'BandAdmin.fieldsets\[0]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
BandAdmin,
Band,
)
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
validate(BandAdmin, Band)
def test_filter_vertical_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("non_existent_field",)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("name",)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical\[0\]' must be a ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_filter_horizontal_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("non_existent_field",)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("name",)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal\[0\]' must be a ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_radio_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"non_existent_field": None}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"name": None}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['name'\]' is neither an instance of ForeignKey nor does have choices set.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['state'\]' is neither admin.HORIZONTAL nor admin.VERTICAL.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_prepopulated_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"non_existent_field": None}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("non_existent_field",)}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['slug'\]\[0\]' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ("name",)}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['users'\]' is either a DateTimeField, ForeignKey or ManyToManyField. This isn't allowed.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_display_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
str_prefix("ValidationTestModelAdmin.list_display\[0\], %(_)s'non_existent_field' is not a callable or an attribute of 'ValidationTestModelAdmin' or found in the model 'ValidationTestModel'."),
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display\[0\]', 'users' is a ManyToManyField which is not supported.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_display_links_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'non_existent_field' which is not defined in 'list_display'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'name' which is not defined in 'list_display'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' refers to 'non_existent_field' which does not refer to a Field.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' is 'RandomClass' which is not a descendant of ListFilter.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]\[1\]' is 'RandomClass' which is not of type FieldListFilter.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_query_set(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]\[1\]' is 'AwesomeFilter' which is not of type FieldListFilter.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' is 'BooleanFieldListFilter' which is of type FieldListFilter but is not associated with a field name.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
# Valid declarations below -----------
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_per_page_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_per_page' should be a integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_max_show_all_allowed_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_max_show_all' should be an integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_search_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.search_fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_date_hierarchy_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy is neither an instance of DateField nor DateTimeField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_ordering_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering\[0\]' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' has the random ordering marker '\?', but contains other fields as well. Please either remove '\?' or the other fields.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_select_related_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_select_related' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_save_as_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_as' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_save_on_top_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_on_top' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_inlines_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\]' does not inherit from BaseModelAdmin.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'model' is a required attribute of 'ValidationTestModelAdmin.inlines\[0\]'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\].model' does not inherit from models.Model.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_fields_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = ("non_existent_field",)
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.fields' refers to field 'non_existent_field' that is missing from the form.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_fk_name_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "non_existent_field"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.fk_name' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestInlineModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_extra_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.extra' should be a integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_max_num_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.max_num' should be an integer or None \(default\).",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_formset_validation(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.formset' does not inherit from BaseModelFormSet.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
|
|
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import builtins
import fixtures
import mock
import testtools
import pypowervm.adapter as adp
import pypowervm.exceptions as exc
import pypowervm.helpers.vios_busy as vb
import pypowervm.tasks.storage as ts
import pypowervm.tests.tasks.util as tju
import pypowervm.tests.test_fixtures as fx
import pypowervm.tests.test_utils.test_wrapper_abc as twrap
import pypowervm.utils.transaction as tx
import pypowervm.wrappers.entry_wrapper as ewrap
import pypowervm.wrappers.logical_partition as lpar
import pypowervm.wrappers.storage as stor
import pypowervm.wrappers.vios_file as vf
import pypowervm.wrappers.virtual_io_server as vios
CLUSTER = "cluster.txt"
LU_LINKED_CLONE_JOB = 'cluster_LULinkedClone_job_template.txt'
UPLOAD_VOL_GRP_ORIG = 'upload_volgrp.txt'
UPLOAD_VOL_GRP_NEW_VDISK = 'upload_volgrp2.txt'
VG_FEED = 'fake_volume_group2.txt'
UPLOADED_FILE = 'upload_file.txt'
VIOS_FEED = 'fake_vios_feed.txt'
VIOS_FEED2 = 'fake_vios_hosting_vios_feed.txt'
VIOS_ENTRY = 'fake_vios_ssp_npiv.txt'
VIOS_ENTRY2 = 'fake_vios_mappings.txt'
LPAR_FEED = 'lpar.txt'
LU_FEED = 'lufeed.txt'
def _mock_update_by_path(ssp, etag, path, timeout=-1):
# Spoof adding UDID and defaulting thinness
for lu in ssp.logical_units:
if not lu.udid:
lu._udid('udid_' + lu.name)
if lu.is_thin is None:
lu._is_thin(True)
if lu.lu_type is None:
lu._lu_type(stor.LUType.DISK)
resp = adp.Response('meth', 'path', 200, 'reason', {'etag': 'after'})
resp.entry = ssp.entry
return resp
class TestUploadLV(testtools.TestCase):
"""Unit Tests for Instance uploads."""
def setUp(self):
super(TestUploadLV, self).setUp()
self.adptfx = self.useFixture(fx.AdapterFx(traits=fx.RemotePVMTraits))
self.adpt = self.adptfx.adpt
self.v_uuid = '14B854F7-42CE-4FF0-BD57-1D117054E701'
self.vg_uuid = 'b6bdbf1f-eddf-3c81-8801-9859eb6fedcb'
@mock.patch('tempfile.mkdtemp')
@mock.patch('pypowervm.tasks.storage.os')
@mock.patch('pypowervm.util.retry_io_command')
@mock.patch('pypowervm.tasks.storage.open')
def test_rest_api_pipe(self, mock_open, mock_retry, mock_os, mock_mkdtemp):
mock_writer = mock.Mock()
with ts._rest_api_pipe(mock_writer) as read_stream:
self.assertEqual(mock_retry.return_value, read_stream)
mock_mkdtemp.assert_called_once_with()
mock_os.path.join.assert_called_once_with(mock_mkdtemp.return_value,
'REST_API_Pipe')
mock_os.mkfifo.assert_called_once_with(mock_os.path.join.return_value)
mock_writer.assert_called_once_with(mock_os.path.join.return_value)
mock_os.remove.assert_called_once_with(mock_os.path.join.return_value)
mock_os.rmdir.assert_called_once_with(mock_mkdtemp.return_value)
# _eintr_retry_call was invoked once with open and once with close
mock_retry.assert_has_calls(
[mock.call(mock_open, mock_os.path.join.return_value, 'r')],
[mock.call(mock_retry.return_value.close)])
@mock.patch('pypowervm.tasks.storage._rest_api_pipe')
def test_upload_stream_api_func(self, mock_rap):
"""With FUNC, _upload_stream_api uses _rest_api_pipe properly."""
vio_file = mock.Mock()
vio_file.adapter.helpers = [vb.vios_busy_retry_helper]
ts._upload_stream_api(vio_file, 'io_handle', ts.UploadType.FUNC)
mock_rap.assert_called_once_with('io_handle')
vio_file.adapter.upload_file.assert_called_once_with(
vio_file.element, mock_rap.return_value.__enter__.return_value)
self.assertEqual(vio_file.adapter.helpers, [vb.vios_busy_retry_helper])
@mock.patch('pypowervm.tasks.storage._create_file')
def test_upload_new_vopt(self, mock_create_file):
"""Tests the uploads of the virtual disks."""
fake_file = self._fake_meta()
fake_file.adapter.helpers = [vb.vios_busy_retry_helper]
mock_create_file.return_value = fake_file
v_opt, f_wrap = ts.upload_vopt(self.adpt, self.v_uuid, None, 'test2',
f_size=50)
mock_create_file.assert_called_once_with(
self.adpt, 'test2', vf.FileType.MEDIA_ISO, self.v_uuid, None, 50)
# Test that vopt was 'uploaded'
self.adpt.upload_file.assert_called_with(mock.ANY, None, helpers=[])
self.assertIsNone(f_wrap)
self.assertIsNotNone(v_opt)
self.assertIsInstance(v_opt, stor.VOptMedia)
self.assertEqual('test2', v_opt.media_name)
# Ensure cleanup was called
self.adpt.delete.assert_called_once_with(
'File', service='web',
root_id='6233b070-31cc-4b57-99bd-37f80e845de9')
# Test cleanup failure
self.adpt.reset_mock()
self.adpt.delete.side_effect = exc.Error('Something bad')
vopt, f_wrap = ts.upload_vopt(self.adpt, self.v_uuid, None, 'test2',
f_size=50)
self.adpt.delete.assert_called_once_with(
'File', service='web',
root_id='6233b070-31cc-4b57-99bd-37f80e845de9')
self.assertIsNotNone(f_wrap)
self.assertIsNotNone(vopt)
self.assertIsInstance(vopt, stor.VOptMedia)
self.assertEqual('test2', v_opt.media_name)
@mock.patch.object(ts.LOG, 'warning')
@mock.patch('pypowervm.tasks.storage._create_file')
def test_upload_vopt_by_filepath(self, mock_create_file, mock_log_warn):
"""Tests the uploads of the virtual disks with an upload retry."""
fake_file = self._fake_meta()
fake_file.adapter.helpers = [vb.vios_busy_retry_helper]
mock_create_file.return_value = fake_file
self.adpt.upload_file.side_effect = [exc.Error("error"),
object()]
m = mock.mock_open()
with mock.patch.object(builtins, 'open', m):
v_opt, f_wrap = ts.upload_vopt(
self.adpt, self.v_uuid, 'fake-path', 'test2', f_size=50)
# Test that vopt was 'uploaded'
self.adpt.upload_file.assert_called_with(mock.ANY, m(), helpers=[])
self.assertIsNone(f_wrap)
self.assertIsNotNone(v_opt)
self.assertIsInstance(v_opt, stor.VOptMedia)
self.assertEqual('test2', v_opt.media_name)
# Validate that there was a warning log call and multiple executions
# of the upload
mock_log_warn.assert_called_once()
self.assertEqual(2, self.adpt.upload_file.call_count)
# Ensure cleanup was called twice since the first uploads fails.
self.adpt.delete.assert_has_calls([mock.call(
'File', service='web',
root_id='6233b070-31cc-4b57-99bd-37f80e845de9')] * 2)
@mock.patch('pypowervm.tasks.storage._create_file')
def test_upload_new_vopt_w_fail(self, mock_create_file):
"""Tests the uploads of the virtual disks with an upload fail."""
mock_create_file.return_value = self._fake_meta()
self.adpt.upload_file.side_effect = exc.Error("error")
self.assertRaises(exc.Error, ts.upload_vopt, self.adpt, self.v_uuid,
None, 'test2', f_size=50)
@mock.patch('pypowervm.tasks.storage.rm_vg_storage')
@mock.patch('pypowervm.wrappers.storage.VG.get')
@mock.patch('pypowervm.tasks.storage._upload_stream')
@mock.patch('pypowervm.tasks.storage._create_file')
@mock.patch('pypowervm.tasks.storage.crt_vdisk')
def test_upload_new_vdisk_failed(
self, mock_create_vdisk, mock_create_file, mock_upload_stream,
mock_vg_get, mock_rm):
"""Tests the uploads of the virtual disks."""
# First need to load in the various test responses.
mock_vdisk = mock.Mock()
mock_create_vdisk.return_value = mock_vdisk
mock_create_file.return_value = self._fake_meta()
fake_vg = mock.Mock()
mock_vg_get.return_value = fake_vg
mock_upload_stream.side_effect = exc.ConnectionError('fake error')
self.assertRaises(
exc.ConnectionError, ts.upload_new_vdisk, self.adpt, self.v_uuid,
self.vg_uuid, None, 'test2', 50, d_size=25, sha_chksum='abc123')
self.adpt.delete.assert_called_once()
mock_rm.assert_called_once_with(fake_vg, vdisks=[mock_vdisk])
@mock.patch('pypowervm.tasks.storage._create_file')
def test_upload_new_vdisk(self, mock_create_file):
"""Tests the uploads of the virtual disks."""
# traits are already set to use the REST API upload
# First need to load in the various test responses.
vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt)
vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt)
self.adpt.read.return_value = vg_orig
self.adpt.update_by_path.return_value = vg_post_crt
mock_create_file.return_value = self._fake_meta()
n_vdisk, f_wrap = ts.upload_new_vdisk(
self.adpt, self.v_uuid, self.vg_uuid, None, 'test2', 50,
d_size=25, sha_chksum='abc123')
# Ensure the create file was called
mock_create_file.assert_called_once_with(
self.adpt, 'test2', vf.FileType.DISK_IMAGE, self.v_uuid,
f_size=50, tdev_udid='0300f8d6de00004b000000014a54555cd9.3',
sha_chksum='abc123')
# Ensure cleanup was called after the upload
self.adpt.delete.assert_called_once_with(
'File', service='web',
root_id='6233b070-31cc-4b57-99bd-37f80e845de9')
self.assertIsNone(f_wrap)
self.assertIsNotNone(n_vdisk)
self.assertIsInstance(n_vdisk, stor.VDisk)
@mock.patch('pypowervm.tasks.storage.crt_vdisk')
def test_crt_copy_vdisk(self, mock_crt_vdisk):
"""Tests the uploads of the virtual disks."""
# traits are already set to use the REST API upload
# First need to load in the various test responses.
vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt)
vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt)
self.adpt.read.return_value = vg_orig
self.adpt.update_by_path.return_value = vg_post_crt
n_vdisk = ts.crt_copy_vdisk(
self.adpt, self.v_uuid, self.vg_uuid, 'src', 1073741824, 'test2',
d_size=2147483648, file_format=stor.FileFormatType.RAW)
self.assertIsNotNone(n_vdisk)
mock_crt_vdisk.assert_called_once_with(
self.adpt, self.v_uuid, self.vg_uuid, 'test2', 2,
base_image='src', file_format=stor.FileFormatType.RAW)
@mock.patch('pypowervm.tasks.storage.crt_vdisk')
@mock.patch('pypowervm.tasks.storage._create_file')
@mock.patch('pypowervm.tasks.storage._upload_stream_api')
def test_upload_new_vdisk_func_remote(self, mock_usa, mock_crt_file,
mock_crt_vdisk):
"""With FUNC and non-local, upload_new_vdisk uses REST API upload."""
mock_crt_file.return_value = mock.Mock(schema_type='File')
n_vdisk, maybe_file = ts.upload_new_vdisk(
self.adpt, 'v_uuid', 'vg_uuid', 'io_handle', 'd_name', 10,
upload_type=ts.UploadType.FUNC,
file_format=stor.FileFormatType.RAW)
mock_crt_vdisk.assert_called_once_with(
self.adpt, 'v_uuid', 'vg_uuid', 'd_name', 1.0,
file_format=stor.FileFormatType.RAW)
mock_crt_file.assert_called_once_with(
self.adpt, 'd_name', vf.FileType.DISK_IMAGE, 'v_uuid', f_size=10,
tdev_udid=mock_crt_vdisk.return_value.udid, sha_chksum=None)
mock_usa.assert_called_once_with(
mock_crt_file.return_value, 'io_handle', ts.UploadType.FUNC)
mock_crt_file.return_value.adapter.delete.assert_called_once_with(
vf.File.schema_type, root_id=mock_crt_file.return_value.uuid,
service='web')
self.assertEqual(mock_crt_vdisk.return_value, n_vdisk)
self.assertIsNone(maybe_file)
@mock.patch('pypowervm.tasks.storage._upload_stream_api')
@mock.patch('pypowervm.tasks.storage._create_file')
def test_upload_stream_via_stream_bld(self, mock_create_file,
mock_upload_st):
"""Tests the uploads of a vDisk - via UploadType.IO_STREAM_BUILDER."""
mock_file = self._fake_meta()
# Prove that COORDINATED is gone (uses API upload now)
mock_file._enum_type(vf.FileType.DISK_IMAGE_COORDINATED)
mock_create_file.return_value = mock_file
mock_io_stream = mock.MagicMock()
mock_io_handle = mock.MagicMock()
mock_io_handle.return_value = mock_io_stream
# Run the code
ts._upload_stream(mock_file, mock_io_handle,
ts.UploadType.IO_STREAM_BUILDER)
# Make sure the function was called.
mock_io_handle.assert_called_once_with()
mock_upload_st.assert_called_once_with(
mock_file, mock_io_stream, ts.UploadType.IO_STREAM)
@mock.patch('pypowervm.tasks.storage._create_file')
def test_upload_new_vdisk_failure(self, mock_create_file):
"""Tests the failure path for uploading of the virtual disks."""
# First need to load in the various test responses.
vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt)
vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt)
self.adpt.read.return_value = vg_orig
self.adpt.update_by_path.return_value = vg_post_crt
mock_create_file.return_value = self._fake_meta()
self.assertRaises(exc.Error, ts.upload_new_vdisk, self.adpt,
self.v_uuid, self.vg_uuid, None, 'test3', 50)
# Test cleanup failure
self.adpt.delete.side_effect = exc.Error('Something bad')
f_wrap = ts.upload_new_vdisk(self.adpt, self.v_uuid, self.vg_uuid,
None, 'test2', 50, sha_chksum='abc123')
self.adpt.delete.assert_called_once_with(
'File', service='web',
root_id='6233b070-31cc-4b57-99bd-37f80e845de9')
self.assertIsNotNone(f_wrap)
@mock.patch('pypowervm.tasks.storage._create_file')
@mock.patch('pypowervm.tasks.storage.crt_lu')
def test_upload_new_lu(self, mock_crt_lu, mock_create_file):
"""Tests create/upload of SSP LU."""
# traits are already set to use the REST API upload
ssp = mock.Mock(adapter=mock.Mock(traits=mock.Mock(local_api=True)))
interim_lu = mock.Mock(adapter=self.adpt)
mock_create_file.return_value = self._fake_meta()
mock_crt_lu.return_value = ssp, interim_lu
size_b = 1224067890
new_lu, f_wrap = ts.upload_new_lu(
self.v_uuid, ssp, None, 'lu1', size_b, d_size=25,
sha_chksum='abc123')
# The LU created by crt_lu was returned
self.assertEqual(interim_lu, new_lu)
# crt_lu was called properly
# 1224067890 / 1GB = 1.140002059; round up to 2dp
mock_crt_lu.assert_called_with(ssp, 'lu1', 1.15, typ=stor.LUType.IMAGE)
# Ensure the create file was called
mock_create_file.assert_called_once_with(
self.adpt, interim_lu.name, vf.FileType.DISK_IMAGE, self.v_uuid,
f_size=size_b, tdev_udid=interim_lu.udid, sha_chksum='abc123')
# Ensure cleanup was called after the upload
self.adpt.delete.assert_called_once_with(
'File', service='web',
root_id='6233b070-31cc-4b57-99bd-37f80e845de9')
self.assertIsNone(f_wrap)
@mock.patch('pypowervm.util.convert_bytes_to_gb')
@mock.patch('pypowervm.tasks.storage.crt_lu')
@mock.patch('pypowervm.tasks.storage.upload_lu')
def test_upload_new_lu_calls(self, mock_upl, mock_crt, mock_b2g):
"""Various permutations of how to call upload_new_lu."""
mock_crt.return_value = 'ssp_out', 'new_lu'
f_size = 10
# No optionals
self.assertEqual(('new_lu', mock_upl.return_value), ts.upload_new_lu(
'v_uuid', 'ssp_in', 'd_stream', 'lu_name', f_size))
mock_b2g.assert_called_with(f_size, dp=2)
mock_crt.assert_called_with('ssp_in', 'lu_name', mock_b2g.return_value,
typ=stor.LUType.IMAGE)
mock_upl.assert_called_with('v_uuid', 'new_lu', 'd_stream', f_size,
sha_chksum=None,
upload_type=ts.UploadType.IO_STREAM)
mock_b2g.reset_mock()
mock_crt.reset_mock()
mock_upl.reset_mock()
# d_size < f_size; sha_chksum specified
self.assertEqual(('new_lu', mock_upl.return_value), ts.upload_new_lu(
'v_uuid', 'ssp_in', 'd_stream', 'lu_name', f_size, d_size=1,
sha_chksum='sha_chksum'))
mock_b2g.assert_called_with(10, dp=2)
mock_crt.assert_called_with('ssp_in', 'lu_name', mock_b2g.return_value,
typ=stor.LUType.IMAGE)
mock_upl.assert_called_with('v_uuid', 'new_lu', 'd_stream', f_size,
sha_chksum='sha_chksum',
upload_type=ts.UploadType.IO_STREAM)
mock_b2g.reset_mock()
mock_crt.reset_mock()
mock_upl.reset_mock()
# d_size > f_size; return_ssp specified
self.assertEqual(('ssp_out', 'new_lu', mock_upl.return_value),
ts.upload_new_lu(
'v_uuid', 'ssp_in', 'd_stream', 'lu_name', f_size,
d_size=100, return_ssp=True))
mock_b2g.assert_called_with(100, dp=2)
mock_crt.assert_called_with('ssp_in', 'lu_name', mock_b2g.return_value,
typ=stor.LUType.IMAGE)
mock_upl.assert_called_with('v_uuid', 'new_lu', 'd_stream', f_size,
sha_chksum=None,
upload_type=ts.UploadType.IO_STREAM)
@mock.patch('pypowervm.tasks.storage._create_file')
@mock.patch('pypowervm.tasks.storage._upload_stream_api')
def test_upload_lu_func_remote(self, mock_usa, mock_crt_file):
"""With FUNC and non-local, upload_lu uses REST API upload."""
lu = mock.Mock(adapter=self.adpt)
self.assertIsNone(ts.upload_lu('v_uuid', lu, 'io_handle', 'f_size',
upload_type=ts.UploadType.FUNC))
mock_crt_file.assert_called_once_with(
lu.adapter, lu.name, vf.FileType.DISK_IMAGE, 'v_uuid',
f_size='f_size', tdev_udid=lu.udid, sha_chksum=None)
mock_usa.assert_called_once_with(mock_crt_file.return_value,
'io_handle', ts.UploadType.FUNC)
@mock.patch('pypowervm.util.convert_bytes_to_gb')
@mock.patch('pypowervm.tasks.storage.crt_lu')
@mock.patch('pypowervm.tasks.storage.upload_lu')
def test_upload_new_lu_calls_via_func(self, mock_upl, mock_crt, mock_b2g):
"""Various permutations of how to call upload_new_lu."""
mock_crt.return_value = 'ssp_out', 'new_lu'
f_size = 10
# Successful call
ssp_in = mock.Mock(adapter=mock.Mock(traits=mock.Mock(local_api=True)))
self.assertEqual(('new_lu', mock_upl.return_value), ts.upload_new_lu(
'v_uuid', ssp_in, 'd_stream', 'lu_name', f_size,
upload_type=ts.UploadType.FUNC))
mock_b2g.assert_called_with(f_size, dp=2)
mock_crt.assert_called_with(ssp_in, 'lu_name', mock_b2g.return_value,
typ=stor.LUType.IMAGE)
mock_upl.assert_called_with('v_uuid', 'new_lu', 'd_stream', f_size,
sha_chksum=None,
upload_type=ts.UploadType.FUNC)
def test_create_file(self):
"""Validates that the _create_file builds the Element properly."""
def validate_in(*args, **kwargs):
# Validate that the element is built properly
wrap = args[0]
self.assertEqual('chk', wrap._get_val_str(vf._FILE_CHKSUM))
self.assertEqual(50, wrap.expected_file_size)
self.assertEqual('f_name', wrap.file_name)
self.assertEqual('application/octet-stream',
wrap.internet_media_type)
self.assertEqual('f_type', wrap.enum_type)
self.assertEqual('v_uuid', wrap.vios_uuid)
self.assertEqual('tdev_uuid', wrap.tdev_udid)
ret = adp.Response('reqmethod', 'reqpath', 'status', 'reason', {})
ret.entry = ewrap.EntryWrapper._bld(self.adpt, tag='File').entry
return ret
self.adpt.create.side_effect = validate_in
ts._create_file(self.adpt, 'f_name', 'f_type', 'v_uuid', 'chk', 50,
'tdev_uuid')
self.assertTrue(self.adpt.create.called)
def _fake_meta(self):
"""Returns a fake meta class for the _create_file mock."""
resp = tju.load_file(UPLOADED_FILE, self.adpt)
return vf.File.wrap(resp)
class TestVG(twrap.TestWrapper):
file = VG_FEED
wrapper_class_to_test = stor.VG
def setUp(self):
super(TestVG, self).setUp()
# TestWrapper sets up the VG feed.
self.mock_vg_get = self.useFixture(fixtures.MockPatch(
'pypowervm.wrappers.storage.VG.get')).mock
self.mock_vg_get.return_value = self.entries
# Need a VIOS feed too.
self.vios_feed = vios.VIOS.wrap(tju.load_file(VIOS_FEED))
self.mock_vio_get = self.useFixture(fixtures.MockPatch(
'pypowervm.wrappers.virtual_io_server.VIOS.get')).mock
self.mock_vio_get.return_value = self.vios_feed
self.mock_vio_search = self.useFixture(fixtures.MockPatch(
'pypowervm.wrappers.virtual_io_server.VIOS.search')).mock
def test_find_vg_all_vioses(self):
ret_vio, ret_vg = ts.find_vg('adap', 'image_pool')
self.assertEqual(self.vios_feed[0], ret_vio)
self.assertEqual(self.entries[1], ret_vg)
self.mock_vio_get.assert_called_once_with('adap')
self.mock_vio_search.assert_not_called()
self.mock_vg_get.assert_called_once_with(
'adap', parent=self.vios_feed[0])
def test_find_vg_specified_vios(self):
self.mock_vio_search.return_value = self.vios_feed[1:]
ret_vio, ret_vg = ts.find_vg(
'adap', 'image_pool', vios_name='nimbus-ch03-p2-vios1')
self.assertEqual(self.vios_feed[1], ret_vio)
self.assertEqual(self.entries[1], ret_vg)
self.mock_vio_get.assert_not_called()
self.mock_vio_search.assert_called_once_with(
'adap', name='nimbus-ch03-p2-vios1')
self.mock_vg_get.assert_called_once_with(
'adap', parent=self.vios_feed[1])
def test_find_vg_no_vios(self):
self.mock_vio_search.return_value = []
self.assertRaises(exc.VIOSNotFound,
ts.find_vg, 'adap', 'n/a', vios_name='no_such_vios')
self.mock_vio_get.assert_not_called()
self.mock_vio_search.assert_called_once_with(
'adap', name='no_such_vios')
self.mock_vg_get.assert_not_called()
def test_find_vg_not_found(self):
self.assertRaises(exc.VGNotFound, ts.find_vg, 'adap', 'n/a')
self.mock_vio_get.assert_called_once_with('adap')
self.mock_vio_search.assert_not_called()
self.mock_vg_get.assert_has_calls([
mock.call('adap', parent=self.vios_feed[0]),
mock.call('adap', parent=self.vios_feed[1])])
class TestVDisk(testtools.TestCase):
def setUp(self):
super(TestVDisk, self).setUp()
self.adptfx = self.useFixture(fx.AdapterFx(traits=fx.RemotePVMTraits))
self.adpt = self.adptfx.adpt
self.v_uuid = '14B854F7-42CE-4FF0-BD57-1D117054E701'
self.vg_uuid = 'b6bdbf1f-eddf-3c81-8801-9859eb6fedcb'
self.vg_resp = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt)
@mock.patch('pypowervm.adapter.Adapter.update_by_path')
@mock.patch('pypowervm.adapter.Adapter.read')
def test_crt_vdisk(self, mock_read, mock_update):
mock_read.return_value = self.vg_resp
def _mock_update(*a, **kwa):
vg_wrap = a[0]
new_vdisk = vg_wrap.virtual_disks[-1]
self.assertEqual('vdisk_name', new_vdisk.name)
self.assertEqual(10, new_vdisk.capacity)
return vg_wrap.entry
mock_update.side_effect = _mock_update
ret = ts.crt_vdisk(
self.adpt, self.v_uuid, self.vg_uuid, 'vdisk_name', 10,
file_format=stor.FileFormatType.RAW)
self.assertEqual('vdisk_name', ret.name)
self.assertEqual(10, ret.capacity)
self.assertEqual(stor.FileFormatType.RAW, ret.file_format)
def _mock_update_path(*a, **kwa):
vg_wrap = a[0]
vg_wrap.virtual_disks[-1].name = ('/path/to/' +
vg_wrap.virtual_disks[-1].name)
new_vdisk = vg_wrap.virtual_disks[-1]
self.assertEqual('/path/to/vdisk_name2', new_vdisk.name)
self.assertEqual(10, new_vdisk.capacity)
return vg_wrap.entry
mock_update.side_effect = _mock_update_path
ret = ts.crt_vdisk(
self.adpt, self.v_uuid, self.vg_uuid, 'vdisk_name2', 10,
file_format=stor.FileFormatType.RAW)
self.assertEqual('/path/to/vdisk_name2', ret.name)
self.assertEqual(10, ret.capacity)
self.assertEqual(stor.FileFormatType.RAW, ret.file_format)
@mock.patch('pypowervm.wrappers.job.Job.run_job')
@mock.patch('pypowervm.adapter.Adapter.read')
def test_rescan_vstor(self, mock_adpt_read, mock_run_job):
mock_vio = mock.Mock(adapter=None, uuid='vios_uuid')
mock_vopt = mock.Mock(adapter=None, udid='stor_udid')
mock_adpt_read.return_value = self.vg_resp
def verify_run_job(vios_uuid, job_parms=None):
self.assertEqual('vios_uuid', vios_uuid)
self.assertEqual(1, len(job_parms))
job_parm = (b'<web:JobParameter xmlns:web="http://www.ibm.com/'
b'xmlns/systems/power/firmware/web/mc/2012_10/" '
b'schemaVersion="V1_0"><web:ParameterName>'
b'VirtualDiskUDID</web:ParameterName>'
b'<web:ParameterValue>stor_udid</web:ParameterValue>'
b'</web:JobParameter>')
self.assertEqual(job_parm, job_parms[0].toxmlstring())
mock_run_job.side_effect = verify_run_job
# Ensure that AdapterNotFound exception is raised correctly
self.assertRaises(
exc.AdapterNotFound, ts.rescan_vstor, mock_vio, mock_vopt)
self.assertEqual(0, self.adpt.read.call_count)
self.assertEqual(0, mock_run_job.call_count)
# Add valid adapter
mock_vio.adapter = self.adpt
ts.rescan_vstor(mock_vio, mock_vopt)
# Validate method invocations
self.assertEqual(1, self.adpt.read.call_count)
self.assertEqual(1, mock_run_job.call_count)
mock_vio = "vios_uuid"
mock_vopt = "stor_udid"
ts.rescan_vstor(mock_vio, mock_vopt, adapter=self.adpt)
self.assertEqual(2, mock_run_job.call_count)
class TestRMStorage(testtools.TestCase):
def setUp(self):
super(TestRMStorage, self).setUp()
self.adptfx = self.useFixture(fx.AdapterFx(traits=fx.RemotePVMTraits))
self.adpt = self.adptfx.adpt
self.v_uuid = '14B854F7-42CE-4FF0-BD57-1D117054E701'
self.vg_uuid = 'b6bdbf1f-eddf-3c81-8801-9859eb6fedcb'
self.vg_resp = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt)
def test_rm_dev_by_udid(self):
dev1 = mock.Mock(udid=None)
# dev doesn't have a UDID
with self.assertLogs(ts.__name__, 'WARNING'):
self.assertIsNone(ts._rm_dev_by_udid(dev1, None))
dev1.toxmlstring.assert_called_with(pretty=True)
# Remove from empty list returns None, and warns (like not-found)
dev1.udid = 123
with self.assertLogs(ts.__name__, 'WARNING'):
self.assertIsNone(ts._rm_dev_by_udid(dev1, []))
# Works when exact same dev is in the list,
devlist = [dev1]
self.assertEqual(dev1, ts._rm_dev_by_udid(dev1, devlist))
self.assertEqual([], devlist)
# Works when matching-but-not-same dev is in the list. Return is the
# one that was in the list, not the one that was passed in.
devlist = [dev1]
dev2 = mock.Mock(udid=123)
# Two different mocks are not equal
self.assertNotEqual(dev1, dev2)
self.assertEqual(dev1, ts._rm_dev_by_udid(dev2, devlist))
self.assertEqual([], devlist)
# Error when multiples found
devlist = [dev1, dev2, dev1]
self.assertRaises(exc.FoundDevMultipleTimes, ts._rm_dev_by_udid, dev1,
devlist)
# One more good path with a longer list
dev3 = mock.Mock()
dev4 = mock.Mock(udid=456)
devlist = [dev3, dev2, dev4]
self.assertEqual(dev2, ts._rm_dev_by_udid(dev1, devlist))
self.assertEqual([dev3, dev4], devlist)
@mock.patch('pypowervm.adapter.Adapter.update_by_path')
def test_rm_vdisks(self, mock_update):
mock_update.return_value = self.vg_resp
vg_wrap = stor.VG.wrap(self.vg_resp)
# Remove a valid VDisk
valid_vd = vg_wrap.virtual_disks[0]
# Removal should hit.
vg_wrap = ts.rm_vg_storage(vg_wrap, vdisks=[valid_vd])
# Update happens, by default
self.assertEqual(1, mock_update.call_count)
self.assertEqual(1, len(vg_wrap.virtual_disks))
self.assertNotEqual(valid_vd.udid, vg_wrap.virtual_disks[0].udid)
# Bogus removal doesn't affect vg_wrap, and doesn't update.
mock_update.reset_mock()
invalid_vd = mock.Mock()
invalid_vd.name = 'vdisk_name'
invalid_vd.udid = 'vdisk_udid'
vg_wrap = ts.rm_vg_storage(vg_wrap, vdisks=[invalid_vd])
# Update doesn't happen, because no changes
self.assertEqual(0, mock_update.call_count)
self.assertEqual(1, len(vg_wrap.virtual_disks))
# Valid (but sparse) removal; invalid is ignored.
mock_update.reset_mock()
valid_vd = mock.Mock()
valid_vd.name = 'vdisk_name'
valid_vd.udid = '0300f8d6de00004b000000014a54555cd9.3'
vg_wrap = ts.rm_vg_storage(vg_wrap, vdisks=[valid_vd, invalid_vd])
self.assertEqual(1, mock_update.call_count)
self.assertEqual(0, len(vg_wrap.virtual_disks))
@mock.patch('pypowervm.adapter.Adapter.update_by_path')
def test_rm_vopts(self, mock_update):
mock_update.return_value = self.vg_resp
vg_wrap = stor.VG.wrap(self.vg_resp)
repo = vg_wrap.vmedia_repos[0]
# Remove a valid VOptMedia
valid_vopt = repo.optical_media[0]
# Removal should hit.
vg_wrap = ts.rm_vg_storage(vg_wrap, vopts=[valid_vopt])
# Update happens, by default
self.assertEqual(1, mock_update.call_count)
repo = vg_wrap.vmedia_repos[0]
self.assertEqual(2, len(repo.optical_media))
self.assertNotEqual(valid_vopt.udid, repo.optical_media[0].udid)
self.assertNotEqual(valid_vopt.udid, repo.optical_media[1].udid)
# Bogus removal doesn't affect vg_wrap, and doesn't update.
mock_update.reset_mock()
invalid_vopt = stor.VOptMedia.bld(self.adpt, 'bogus')
mock_update.reset_mock()
vg_wrap = ts.rm_vg_storage(vg_wrap, vopts=[invalid_vopt])
self.assertEqual(0, mock_update.call_count)
self.assertEqual(2, len(vg_wrap.vmedia_repos[0].optical_media))
# Valid multiple removal
mock_update.reset_mock()
vg_wrap = ts.rm_vg_storage(vg_wrap, vopts=repo.optical_media[:])
self.assertEqual(1, mock_update.call_count)
self.assertEqual(0, len(vg_wrap.vmedia_repos[0].optical_media))
class TestTier(testtools.TestCase):
@mock.patch('pypowervm.wrappers.storage.Tier.search')
def test_default_tier_for_ssp(self, mock_srch):
ssp = mock.Mock()
self.assertEqual(mock_srch.return_value, ts.default_tier_for_ssp(ssp))
mock_srch.assert_called_with(ssp.adapter, parent=ssp, is_default=True,
one_result=True)
mock_srch.return_value = None
self.assertRaises(exc.NoDefaultTierFoundOnSSP,
ts.default_tier_for_ssp, ssp)
class TestLUEnt(twrap.TestWrapper):
file = LU_FEED
wrapper_class_to_test = stor.LUEnt
def setUp(self):
super(TestLUEnt, self).setUp()
self.mock_feed_get = self.useFixture(fixtures.MockPatch(
'pypowervm.wrappers.storage.LUEnt.get')).mock
self.mock_feed_get.return_value = self.entries
self.tier = mock.Mock(spec=stor.Tier, get=mock.Mock(
return_value=self.entries))
# Mock out each LUEnt's .delete so I can know I called the right ones.
for luent in self.entries:
luent.delete = mock.Mock()
# This image LU...
self.img_lu = self.entries[4]
# ...backs these three linked clones
self.clone1 = self.entries[9]
self.clone2 = self.entries[11]
self.clone3 = self.entries[21]
self.orig_len = len(self.entries)
def test_rm_tier_storage_errors(self):
"""Test rm_tier_storage ValueErrors."""
# Neither tier nor lufeed provided
self.assertRaises(ValueError, ts.rm_tier_storage, self.entries)
# Invalid lufeed provided
self.assertRaises(ValueError, ts.rm_tier_storage,
self.entries, lufeed=[1, 2])
# Same, even if tier provided
self.assertRaises(ValueError, ts.rm_tier_storage,
self.entries, tier=self.tier, lufeed=[1, 2])
@mock.patch('pypowervm.tasks.storage._rm_lus')
def test_rm_tier_storage_feed_get(self, mock_rm_lus):
"""Verify rm_tier_storage does a feed GET if lufeed not provided."""
# Empty return from _rm_lus so the loop doesn't run
mock_rm_lus.return_value = []
lus_to_rm = [mock.Mock()]
ts.rm_tier_storage(lus_to_rm, tier=self.tier)
self.mock_feed_get.assert_called_once_with(self.tier.adapter,
parent=self.tier)
mock_rm_lus.assert_called_once_with(self.entries, lus_to_rm,
del_unused_images=True)
self.mock_feed_get.reset_mock()
mock_rm_lus.reset_mock()
# Now ensure we don't do the feed get if a valid lufeed is provided.
lufeed = [mock.Mock(spec=stor.LUEnt)]
# Also test del_unused_images=False
ts.rm_tier_storage(lus_to_rm, lufeed=lufeed, del_unused_images=False)
self.mock_feed_get.assert_not_called()
mock_rm_lus.assert_called_once_with(lufeed, lus_to_rm,
del_unused_images=False)
def test_rm_tier_storage1(self):
"""Verify rm_tier_storage removes what it oughtta."""
# Should be able to use either LUEnt or LU
clone1 = stor.LU.bld(None, self.clone1.name, 1)
clone1._udid(self.clone1.udid)
# HttpError doesn't prevent everyone from deleting.
clone1.side_effect = exc.HttpError(mock.Mock())
ts.rm_tier_storage([clone1, self.clone2], lufeed=self.entries)
self.clone1.delete.assert_called_once_with()
self.clone2.delete.assert_called_once_with()
# Backing image should not be removed because clone3 still linked. So
# final result should be just the two removed.
self.assertEqual(self.orig_len - 2, len(self.entries))
# Now if we remove the last clone, the image LU should go too.
ts.rm_tier_storage([self.clone3], lufeed=self.entries)
self.clone3.delete.assert_called_once_with()
self.img_lu.delete.assert_called_once_with()
self.assertEqual(self.orig_len - 4, len(self.entries))
class TestLU(testtools.TestCase):
def setUp(self):
super(TestLU, self).setUp()
self.adpt = self.useFixture(fx.AdapterFx()).adpt
self.adpt.update_by_path = _mock_update_by_path
self.adpt.extend_path = lambda x, xag: x
self.ssp = stor.SSP.bld(self.adpt, 'ssp1', [])
for i in range(5):
lu = stor.LU.bld(self.adpt, 'lu%d' % i, i + 1)
lu._udid('udid_' + lu.name)
self.ssp.logical_units.append(lu)
self.ssp.entry.properties = {
'links': {'SELF': ['/rest/api/uom/SharedStoragePool/123']}}
self.ssp._etag = 'before'
@mock.patch('pypowervm.wrappers.storage.LUEnt.bld')
@mock.patch('pypowervm.wrappers.storage.Tier.search')
def test_crt_lu(self, mock_tier_srch, mock_lu_bld):
ssp = mock.Mock(spec=stor.SSP)
tier = mock.Mock(spec=stor.Tier)
def validate(ret, use_ssp, thin, typ, clone):
self.assertEqual(ssp.refresh.return_value if use_ssp else tier,
ret[0])
self.assertEqual(mock_lu_bld.return_value.create.return_value,
ret[1])
if use_ssp:
mock_tier_srch.assert_called_with(
ssp.adapter, parent=ssp, is_default=True, one_result=True)
mock_lu_bld.assert_called_with(
ssp.adapter if use_ssp else tier.adapter, 'lu5', 10, thin=thin,
typ=typ, clone=clone)
mock_lu_bld.return_value.create.assert_called_with(
parent=mock_tier_srch.return_value if use_ssp else tier)
mock_lu_bld.reset_mock()
# No optionals
validate(ts.crt_lu(tier, 'lu5', 10), False, None, None, None)
validate(ts.crt_lu(ssp, 'lu5', 10), True, None, None, None)
# Thin
validate(ts.crt_lu(tier, 'lu5', 10, thin=True), False, True, None,
None)
validate(ts.crt_lu(ssp, 'lu5', 10, thin=True), True, True, None, None)
# Type
validate(ts.crt_lu(tier, 'lu5', 10, typ=stor.LUType.IMAGE), False,
None, stor.LUType.IMAGE, None)
validate(ts.crt_lu(ssp, 'lu5', 10, typ=stor.LUType.IMAGE), True, None,
stor.LUType.IMAGE, None)
# Clone
clone = mock.Mock(udid='cloned_from_udid')
validate(ts.crt_lu(tier, 'lu5', 10, clone=clone), False, None, None,
clone)
validate(ts.crt_lu(ssp, 'lu5', 10, clone=clone), True, None, None,
clone)
# Exception path
mock_tier_srch.return_value = None
self.assertRaises(exc.NoDefaultTierFoundOnSSP, ts.crt_lu, ssp, '5', 10)
# But that doesn't happen if specifying tier
validate(ts.crt_lu(tier, 'lu5', 10), False, None, None, None)
def test_rm_lu_by_lu(self):
lu = self.ssp.logical_units[2]
ssp = ts.rm_ssp_storage(self.ssp, [lu])
self.assertNotIn(lu, ssp.logical_units)
self.assertEqual(ssp.etag, 'after')
self.assertEqual(len(ssp.logical_units), 4)
class TestLULinkedClone(testtools.TestCase):
def setUp(self):
super(TestLULinkedClone, self).setUp()
self.adpt = self.useFixture(fx.AdapterFx()).adpt
self.adpt.update_by_path = _mock_update_by_path
self.adpt.extend_path = lambda x, xag: x
self.ssp = stor.SSP.bld(self.adpt, 'ssp1', [])
# img_lu1 not cloned
self.img_lu1 = self._mk_img_lu(1)
self.ssp.logical_units.append(self.img_lu1)
# img_lu2 has two clones
self.img_lu2 = self._mk_img_lu(2)
self.ssp.logical_units.append(self.img_lu2)
self.dsk_lu3 = self._mk_dsk_lu(3, 2)
self.ssp.logical_units.append(self.dsk_lu3)
self.dsk_lu4 = self._mk_dsk_lu(4, 2)
self.ssp.logical_units.append(self.dsk_lu4)
# img_lu5 has one clone
self.img_lu5 = self._mk_img_lu(5)
self.ssp.logical_units.append(self.img_lu5)
self.dsk_lu6 = self._mk_dsk_lu(6, 5)
self.ssp.logical_units.append(self.dsk_lu6)
self.dsk_lu_orphan = self._mk_dsk_lu(7, None)
self.ssp.logical_units.append(self.dsk_lu_orphan)
self.ssp.entry.properties = {
'links': {'SELF': ['/rest/api/uom/SharedStoragePool/123']}}
self.ssp._etag = 'before'
def _mk_img_lu(self, idx):
lu = stor.LU.bld(self.adpt, 'img_lu%d' % idx, 123,
typ=stor.LUType.IMAGE)
lu._udid('xxabc123%d' % idx)
return lu
def _mk_dsk_lu(self, idx, cloned_from_idx):
lu = stor.LU.bld(self.adpt, 'dsk_lu%d' % idx, 123,
typ=stor.LUType.DISK)
lu._udid('xxDisk-LU-UDID-%d' % idx)
# Allow for "orphan" clones
if cloned_from_idx is not None:
lu._cloned_from_udid('yyabc123%d' % cloned_from_idx)
return lu
@mock.patch('warnings.warn')
@mock.patch('pypowervm.tasks.storage.crt_lu')
def test_crt_lu_linked_clone(self, mock_crt_lu, mock_warn):
src_lu = self.ssp.logical_units[0]
mock_crt_lu.return_value = ('ssp', 'dst_lu')
self.assertEqual(('ssp', 'dst_lu'), ts.crt_lu_linked_clone(
self.ssp, 'clust1', src_lu, 'linked_lu'))
mock_crt_lu.assert_called_once_with(
self.ssp, 'linked_lu', 0, thin=True, typ=stor.LUType.DISK,
clone=src_lu)
mock_warn.assert_called_once_with(mock.ANY, DeprecationWarning)
def test_image_lu_in_use(self):
# The orphan will trigger a warning as we cycle through all the LUs
# without finding any backed by this image.
with self.assertLogs(ts.__name__, 'WARNING'):
self.assertFalse(ts._image_lu_in_use(self.ssp.logical_units,
self.img_lu1))
self.assertTrue(ts._image_lu_in_use(self.ssp.logical_units,
self.img_lu2))
def test_image_lu_for_clone(self):
self.assertEqual(self.img_lu2,
ts._image_lu_for_clone(self.ssp.logical_units,
self.dsk_lu3))
self.dsk_lu3._cloned_from_udid(None)
self.assertIsNone(ts._image_lu_for_clone(self.ssp.logical_units,
self.dsk_lu3))
def test_rm_ssp_storage(self):
lu_names = set(lu.name for lu in self.ssp.logical_units)
# This one should remove the disk LU but *not* the image LU
ssp = ts.rm_ssp_storage(self.ssp, [self.dsk_lu3],
del_unused_images=False)
lu_names.remove(self.dsk_lu3.name)
self.assertEqual(lu_names, set(lu.name for lu in ssp.logical_units))
# This one should remove *both* the disk LU and the image LU
ssp = ts.rm_ssp_storage(self.ssp, [self.dsk_lu4])
lu_names.remove(self.dsk_lu4.name)
lu_names.remove(self.img_lu2.name)
self.assertEqual(lu_names, set(lu.name for lu in ssp.logical_units))
# This one should remove the disk LU but *not* the image LU, even
# though it's now unused.
self.assertTrue(ts._image_lu_in_use(self.ssp.logical_units,
self.img_lu5))
ssp = ts.rm_ssp_storage(self.ssp, [self.dsk_lu6],
del_unused_images=False)
lu_names.remove(self.dsk_lu6.name)
self.assertEqual(lu_names, set(lu.name for lu in ssp.logical_units))
self.assertFalse(ts._image_lu_in_use(self.ssp.logical_units,
self.img_lu5))
# No update if no change
self.adpt.update_by_path = lambda *a, **k: self.fail()
ssp = ts.rm_ssp_storage(self.ssp, [self.dsk_lu4])
class TestScrub(testtools.TestCase):
"""Two VIOSes in feed; no VFC mappings; no storage in VSCSI mappings."""
def setUp(self):
super(TestScrub, self).setUp()
adpt = self.useFixture(fx.AdapterFx()).adpt
self.vio_feed = vios.VIOS.wrap(tju.load_file(VIOS_FEED, adpt))
self.txfx = self.useFixture(fx.FeedTaskFx(self.vio_feed))
self.logfx = self.useFixture(fx.LoggingFx())
self.ftsk = tx.FeedTask('scrub', self.vio_feed)
@mock.patch('pypowervm.tasks.storage._RemoveStorage.execute')
def test_no_matches(self, mock_rm_stg):
"""When removals have no hits, log debug messages, but no warnings."""
# Our data set has no VFC mappings and no VSCSI mappings with LPAR ID 1
ts.add_lpar_storage_scrub_tasks([1], self.ftsk, lpars_exist=True)
self.ftsk.execute()
self.assertEqual(0, self.logfx.patchers['warning'].mock.call_count)
for vname in (vwrap.name for vwrap in self.vio_feed):
self.logfx.patchers['debug'].mock.assert_any_call(
mock.ANY, dict(stg_type='VSCSI', lpar_id=1, vios_name=vname))
self.logfx.patchers['debug'].mock.assert_any_call(
mock.ANY, dict(stg_type='VFC', lpar_id=1, vios_name=vname))
self.assertEqual(0, self.txfx.patchers['update'].mock.call_count)
self.assertEqual(1, mock_rm_stg.call_count)
@mock.patch('pypowervm.tasks.vfc_mapper.remove_maps')
def test_matches_warn(self, mock_rm_vfc_maps):
"""When removals hit, log warnings including the removal count."""
# Mock vfc remove_maps with a multi-element list to verify num_maps
mock_rm_vfc_maps.return_value = [1, 2, 3]
ts.add_lpar_storage_scrub_tasks([32], self.ftsk, lpars_exist=True)
self.ftsk.execute()
mock_rm_vfc_maps.assert_has_calls(
[mock.call(wrp, 32) for wrp in self.vio_feed], any_order=True)
for vname in (vwrap.name for vwrap in self.vio_feed):
self.logfx.patchers['warning'].mock.assert_any_call(
mock.ANY, dict(stg_type='VFC', num_maps=3, lpar_id=32,
vios_name=vname))
self.logfx.patchers['warning'].mock.assert_any_call(
mock.ANY, dict(stg_type='VSCSI', num_maps=1, lpar_id=32,
vios_name='nimbus-ch03-p2-vios1'))
self.logfx.patchers['debug'].mock.assert_any_call(
mock.ANY, dict(stg_type='VSCSI', lpar_id=32,
vios_name='nimbus-ch03-p2-vios2'))
self.assertEqual(2, self.txfx.patchers['update'].mock.call_count)
# By not mocking _RemoveStorage, prove it shorts out (the mapping for
# LPAR ID 32 has no backing storage).
@mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.wrap')
def test_multiple_removals(self, mock_wrap):
# Pretend LPAR feed is "empty" so we don't skip any removals.
mock_wrap.return_value = []
v1 = self.vio_feed[0]
v2 = self.vio_feed[1]
v1_map_count = len(v1.scsi_mappings)
v2_map_count = len(v2.scsi_mappings)
# Zero removals works
ts.add_lpar_storage_scrub_tasks([], self.ftsk)
self.ftsk.execute()
self.assertEqual(0, self.txfx.patchers['update'].mock.call_count)
# Removals for which no mappings exist
ts.add_lpar_storage_scrub_tasks([71, 72, 76, 77], self.ftsk)
self.ftsk.execute()
self.assertEqual(0, self.txfx.patchers['update'].mock.call_count)
# Remove some from each VIOS
self.assertEqual(v1_map_count, len(v1.scsi_mappings))
self.assertEqual(v2_map_count, len(v2.scsi_mappings))
ts.add_lpar_storage_scrub_tasks([3, 37, 80, 7, 27, 85], self.ftsk)
self.ftsk.execute()
self.assertEqual(2, self.txfx.patchers['update'].mock.call_count)
self.assertEqual(v1_map_count - 3, len(v1.scsi_mappings))
self.assertEqual(v2_map_count - 3, len(v2.scsi_mappings))
# Now make the LPAR feed hit some of the removals. They should be
# skipped.
self.txfx.patchers['update'].mock.reset_mock()
v1_map_count = len(v1.scsi_mappings)
v2_map_count = len(v2.scsi_mappings)
mock_wrap.return_value = [mock.Mock(id=i) for i in (4, 5, 8, 11)]
ts.add_lpar_storage_scrub_tasks([4, 5, 6, 8, 11, 12], self.ftsk)
self.ftsk.execute()
self.assertEqual(2, self.txfx.patchers['update'].mock.call_count)
self.assertEqual(v1_map_count - 1, len(v1.scsi_mappings))
self.assertEqual(v2_map_count - 1, len(v2.scsi_mappings))
# Make sure the right ones were ignored
v1_map_lids = [sm.server_adapter.lpar_id for sm in v1.scsi_mappings]
v2_map_lids = [sm.server_adapter.lpar_id for sm in v2.scsi_mappings]
self.assertIn(4, v1_map_lids)
self.assertIn(5, v1_map_lids)
self.assertIn(8, v2_map_lids)
self.assertIn(11, v2_map_lids)
# ...and the right ones were removed
self.assertNotIn(6, v1_map_lids)
self.assertNotIn(12, v2_map_lids)
class TestScrub2(testtools.TestCase):
"""One VIOS in feed; VFC mappings; interesting VSCSI mappings."""
def setUp(self):
super(TestScrub2, self).setUp()
self.adpt = self.useFixture(
fx.AdapterFx(traits=fx.RemotePVMTraits)).adpt
self.vio_feed = [vios.VIOS.wrap(tju.load_file(VIOS_ENTRY, self.adpt))]
self.txfx = self.useFixture(fx.FeedTaskFx(self.vio_feed))
self.logfx = self.useFixture(fx.LoggingFx())
self.ftsk = tx.FeedTask('scrub', self.vio_feed)
@mock.patch('pypowervm.tasks.storage._rm_vdisks')
@mock.patch('pypowervm.tasks.storage._rm_vopts')
@mock.patch('pypowervm.tasks.storage._rm_lus')
def test_lu_vopt_vdisk(self, mock_rm_lu, mock_rm_vopt, mock_rm_vd):
def verify_rm_stg_call(exp_list):
def _rm_stg(wrapper, stglist, *a, **k):
self.assertEqual(len(exp_list), len(stglist))
for exp, act in zip(exp_list, stglist):
self.assertEqual(exp.udid, act.udid)
return _rm_stg
warns = [mock.call(
mock.ANY, {'stg_type': 'VSCSI', 'lpar_id': 3, 'num_maps': 3,
'vios_name': self.vio_feed[0].name})]
# We should ignore the LUs...
mock_rm_lu.side_effect = self.fail
# ...but should emit a warning about ignoring them
warns.append(mock.call(
mock.ANY,
{'stg_name': 'volume-boot-8246L1C_0604CAA-salsman66-00000004',
'stg_type': 'LogicalUnit'}))
vorm = self.vio_feed[0].scsi_mappings[5].backing_storage
mock_rm_vopt.side_effect = verify_rm_stg_call([vorm])
warns.append(mock.call(
mock.ANY, {'vocount': 1, 'vios': self.vio_feed[0].name,
'volist' '': ["%s (%s)" % (vorm.name, vorm.udid)]}))
vdrm = self.vio_feed[0].scsi_mappings[8].backing_storage
mock_rm_vd.side_effect = verify_rm_stg_call([vdrm])
warns.append(mock.call(
mock.ANY, {'vdcount': 1, 'vios': self.vio_feed[0].name,
'vdlist' '': ["%s (%s)" % (vdrm.name, vdrm.udid)]}))
ts.add_lpar_storage_scrub_tasks([3], self.ftsk, lpars_exist=True)
# LPAR ID 45 is not represented in the mappings. Test a) that it is
# ignored, b) that we can have two separate LPAR storage scrub tasks
# in the same FeedTask (no duplicate 'provides' names).
ts.add_lpar_storage_scrub_tasks([45], self.ftsk, lpars_exist=True)
self.ftsk.execute()
self.assertEqual(2, mock_rm_vopt.call_count)
self.assertEqual(2, mock_rm_vd.call_count)
self.logfx.patchers['warning'].mock.assert_has_calls(
warns, any_order=True)
@mock.patch('pypowervm.tasks.storage._rm_vdisks')
@mock.patch('pypowervm.tasks.storage._rm_vopts')
@mock.patch('pypowervm.tasks.storage._rm_lus')
def test_no_remove_storage(self, mock_rm_lu, mock_rm_vopt, mock_rm_vd):
ts.add_lpar_storage_scrub_tasks([3], self.ftsk, lpars_exist=True,
remove_storage=False)
self.ftsk.execute()
mock_rm_lu.assert_not_called()
mock_rm_vopt.assert_not_called()
mock_rm_vd.assert_not_called()
@mock.patch('pypowervm.wrappers.logical_partition.LPAR.get')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
def test_find_stale_lpars(self, mock_vios, mock_lpar):
mock_vios.return_value = self.vio_feed
mock_lpar.return_value = lpar.LPAR.wrap(
tju.load_file(LPAR_FEED, adapter=self.adpt))
self.assertEqual({55, 21}, set(ts.find_stale_lpars(self.vio_feed[0])))
class TestScrub3(testtools.TestCase):
"""One VIOS; lots of orphan VSCSI and VFC mappings."""
def setUp(self):
super(TestScrub3, self).setUp()
self.adpt = self.useFixture(fx.AdapterFx()).adpt
self.vio_feed = [vios.VIOS.wrap(tju.load_file(VIOS_ENTRY2, self.adpt))]
self.txfx = self.useFixture(fx.FeedTaskFx(self.vio_feed))
self.logfx = self.useFixture(fx.LoggingFx())
self.ftsk = tx.FeedTask('scrub', self.vio_feed)
@mock.patch('pypowervm.tasks.storage._rm_vopts')
def test_orphan(self, mock_rm_vopts):
"""Scrub orphan VSCSI and VFC mappings."""
def validate_rm_vopts(vgwrap, vopts, **kwargs):
# Two of the VSCSI mappings have storage; both are vopts
self.assertEqual(2, len(vopts))
mock_rm_vopts.side_effect = validate_rm_vopts
vwrap = self.vio_feed[0]
# Save the "before" sizes of the mapping lists
vscsi_len = len(vwrap.scsi_mappings)
vfc_len = len(vwrap.vfc_mappings)
ts.add_orphan_storage_scrub_tasks(self.ftsk)
ret = self.ftsk.execute()
# One for vscsi maps, one for vfc maps, one for vopt storage
self.assertEqual(3, self.logfx.patchers['warning'].mock.call_count)
# Pull out the WrapperTask returns from the (one) VIOS
wtr = ret['wrapper_task_rets'].popitem()[1]
vscsi_removals = wtr['vscsi_removals_orphans']
self.assertEqual(18, len(vscsi_removals))
# Removals are really orphans
for srm in vscsi_removals:
self.assertIsNone(srm.client_adapter)
# The right number of maps remain.
self.assertEqual(vscsi_len - 18, len(vwrap.scsi_mappings))
# Assert the "any" adapter still exists in the mappings.
self.assertIn(stor.ANY_SLOT, [smp.server_adapter.lpar_slot_num for
smp in vwrap.scsi_mappings])
# Remaining maps are not orphans.
for smp in vwrap.scsi_mappings:
if smp.server_adapter.lpar_slot_num != stor.ANY_SLOT:
self.assertIsNotNone(smp.client_adapter)
# _RemoveOrphanVfcMaps doesn't "provide", so the following are limited.
# The right number of maps remain.
self.assertEqual(vfc_len - 19, len(vwrap.vfc_mappings))
# Remaining maps are not orphans.
for fmp in vwrap.vfc_mappings:
self.assertIsNotNone(fmp.client_adapter)
# POST was warranted.
self.assertEqual(1, self.txfx.patchers['update'].mock.call_count)
# _RemoveStorage invoked _rm_vopts
self.assertEqual(1, mock_rm_vopts.call_count)
@mock.patch('pypowervm.tasks.storage._rm_vdisks')
@mock.patch('pypowervm.tasks.storage._rm_vopts')
@mock.patch('pypowervm.tasks.storage.find_stale_lpars')
@mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.wrap')
def test_comprehensive_scrub(self, mock_wrap, mock_stale_lids,
mock_rm_vopts, mock_rm_vdisks):
# Don't confuse the 'update' call count with the VG POST
mock_rm_vopts.return_value = None
mock_rm_vdisks.return_value = None
# Three "stale" LPARs in addition to the orphans. These LPAR IDs are
# represented in both VSCSI and VFC mappings.
mock_stale_lids.return_value = [15, 18, 22]
# Make sure all our "stale" lpars hit.
mock_wrap.return_value = []
vwrap = self.vio_feed[0]
# Save the "before" sizes of the mapping lists
vscsi_len = len(vwrap.scsi_mappings)
vfc_len = len(vwrap.vfc_mappings)
ts.ComprehensiveScrub(self.adpt).execute()
# The right number of maps remain.
self.assertEqual(vscsi_len - 21, len(vwrap.scsi_mappings))
self.assertEqual(vfc_len - 22, len(vwrap.vfc_mappings))
self.assertEqual(1, self.txfx.patchers['update'].mock.call_count)
self.assertEqual(1, mock_rm_vopts.call_count)
self.assertEqual(1, mock_rm_vdisks.call_count)
@staticmethod
def count_maps_for_lpar(mappings, lpar_id):
"""Count the mappings whose client side is the specified LPAR ID.
:param mappings: List of VFC or VSCSI mappings to search.
:param lpar_id: The client LPAR ID to search for.
:return: Integer - the number of mappings whose server_adapter.lpar_id
matches the specified lpar_id.
"""
return len([1 for amap in mappings
if amap.server_adapter.lpar_id == lpar_id])
def test_remove_portless_vfc_maps1(self):
"""Test _remove_portless_vfc_maps with no LPAR ID."""
vwrap = self.vio_feed[0]
# Save the "before" size of the VFC mapping list
vfc_len = len(vwrap.vfc_mappings)
# Count our target LPARs' mappings before
lpar24maps = self.count_maps_for_lpar(vwrap.vfc_mappings, 24)
lpar124maps = self.count_maps_for_lpar(vwrap.vfc_mappings, 124)
ts.ScrubPortlessVFCMaps(self.adpt).execute()
# Overall two fewer maps
self.assertEqual(vfc_len - 2, len(vwrap.vfc_mappings))
# ...and they were the right ones
self.assertEqual(lpar24maps - 1,
self.count_maps_for_lpar(vwrap.vfc_mappings, 24))
self.assertEqual(lpar124maps - 1,
self.count_maps_for_lpar(vwrap.vfc_mappings, 124))
self.assertEqual(1, self.txfx.patchers['update'].mock.call_count)
def test_remove_portless_vfc_maps2(self):
"""Test _remove_portless_vfc_maps specifying an LPAR ID."""
vwrap = self.vio_feed[0]
# Save the "before" size of the VFC mapping list
vfc_len = len(vwrap.vfc_mappings)
# Count our target LPAR's mappings before
lpar24maps = self.count_maps_for_lpar(vwrap.vfc_mappings, 24)
ts.ScrubPortlessVFCMaps(self.adpt, lpar_id=24).execute()
# Overall one map was scrubbed
self.assertEqual(vfc_len - 1, len(vwrap.vfc_mappings))
# ...and it was the right one
self.assertEqual(lpar24maps - 1,
self.count_maps_for_lpar(vwrap.vfc_mappings, 24))
self.assertEqual(1, self.txfx.patchers['update'].mock.call_count)
@mock.patch('pypowervm.tasks.storage._rm_vopts')
@mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.wrap')
def test_orphans_by_lpar_id(self, mock_wrap, mock_rm_vopts):
# Don't confuse the 'update' call count with the VG POST
mock_rm_vopts.return_value = None
mock_wrap.return_value = []
vwrap = self.vio_feed[0]
# Save the "before" sizes of the mapping lists
vscsi_len = len(vwrap.scsi_mappings)
vfc_len = len(vwrap.vfc_mappings)
# LPAR 24 has one orphan FC mapping, one portless FC mapping, one legit
# FC mapping, and one orphan SCSI mapping (for a vopt).
ts.ScrubOrphanStorageForLpar(self.adpt, 24).execute()
# The right number of maps remain.
self.assertEqual(vscsi_len - 1, len(vwrap.scsi_mappings))
self.assertEqual(vfc_len - 1, len(vwrap.vfc_mappings))
self.assertEqual(1, self.txfx.patchers['update'].mock.call_count)
self.assertEqual(1, mock_rm_vopts.call_count)
class TestScrub4(testtools.TestCase):
"""Novalink partition hosting storage for another VIOS partition"""
def setUp(self):
super(TestScrub4, self).setUp()
self.adpt = self.useFixture(fx.AdapterFx()).adpt
self.vio_feed = vios.VIOS.wrap(tju.load_file(VIOS_FEED2, self.adpt))
self.txfx = self.useFixture(fx.FeedTaskFx(self.vio_feed))
self.logfx = self.useFixture(fx.LoggingFx())
self.ftsk = tx.FeedTask('scrub', [self.vio_feed[0]])
self.mock_lpar = self.useFixture(
fixtures.MockPatch('pypowervm.tasks.storage.lpar.LPAR.get')).mock
self.mock_vios = self.useFixture(
fixtures.MockPatch('pypowervm.tasks.storage.vios.VIOS.get')).mock
# Set default mock return values, these may be overridden per test
self.mock_lpar.return_value = lpar.LPAR.wrap(
tju.load_file(LPAR_FEED), self.adpt)
self.mock_vios.return_value = self.vio_feed
def test_find_stale_lpars_vios_only(self):
self.mock_lpar.return_value = []
self.assertEqual({16, 102}, set(ts.find_stale_lpars(self.vio_feed[0])))
def test_find_stale_lpars_combined(self):
self.assertEqual([102], ts.find_stale_lpars(self.vio_feed[0]))
@mock.patch('pypowervm.tasks.storage._remove_lpar_maps')
def test_orphan_scrub(self, mock_rm_lpar):
def client_adapter_data(mappings):
return {(smap.server_adapter.lpar_id,
smap.server_adapter.lpar_slot_num) for smap in mappings}
scsi_maps = client_adapter_data(self.vio_feed[0].scsi_mappings)
vfc_maps = client_adapter_data(self.vio_feed[0].vfc_mappings)
ts.ComprehensiveScrub(self.adpt).execute()
# Assert that stale lpar detection works correctly
# (LPAR 102 does not exist)
mock_rm_lpar.assert_has_calls([
mock.call(self.vio_feed[0], [102], mock.ANY),
mock.call(self.vio_feed[1], [], mock.ANY),
mock.call(self.vio_feed[2], [], mock.ANY)
], any_order=True)
# Assert that orphan detection removed the correct SCSI mapping
# (VSCSI Mapping for VIOS 101, slot 17 has no client adapter)
scsi_maps -= client_adapter_data(self.vio_feed[0].scsi_mappings)
self.assertEqual({(101, 17)}, scsi_maps)
# Assert that orphan detection removed the correct VFC mapping
# (VFC Mapping for LP 100 slot 50 has no client adapter)
vfc_maps -= client_adapter_data(self.vio_feed[0].vfc_mappings)
self.assertEqual({(100, 50)}, vfc_maps)
@mock.patch('pypowervm.tasks.storage._remove_lpar_maps')
def test_add_lpar_storage_scrub_tasks(self, mock_rm_lpar):
# Some of the IDs in "lpar_list" appear in the LPAR feed,
# and others appear in the VIOS feed.
# IDs in "stale_lpars" do not exist in either the LPAR or VIOS feed.
lpar_list = [100, 101, 102, 55, 21, 4, 2, 16]
stale_lpars = {102, 55, 21}
ts.add_lpar_storage_scrub_tasks(lpar_list, self.ftsk,
remove_storage=False)
self.ftsk.execute()
self.assertEqual(2, mock_rm_lpar.call_count)
mock_rm_lpar.assert_has_calls([
mock.call(self.vio_feed[0], stale_lpars, 'VSCSI'),
mock.call(self.vio_feed[0], stale_lpars, 'VFC')
], any_order=True)
|
|
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the aggregates admin api."""
from webob import exc
from nova.api.openstack.compute.contrib import aggregates
from nova import context
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
AGGREGATE_LIST = [
{"name": "aggregate1", "id": "1", "availability_zone": "nova1"},
{"name": "aggregate2", "id": "2", "availability_zone": "nova1"},
{"name": "aggregate3", "id": "3", "availability_zone": "nova2"},
{"name": "aggregate1", "id": "4", "availability_zone": "nova1"}]
AGGREGATE = {"name": "aggregate1",
"id": "1",
"availability_zone": "nova1",
"metadata": {"foo": "bar"},
"hosts": ["host1, host2"]}
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class AggregateTestCase(test.NoDBTestCase):
"""Test Case for aggregates admin api."""
def setUp(self):
super(AggregateTestCase, self).setUp()
self.controller = aggregates.AggregateController()
self.req = FakeRequest()
self.user_req = fakes.HTTPRequest.blank('/v2/os-aggregates')
self.context = self.req.environ['nova.context']
def test_index(self):
def stub_list_aggregates(context):
if context is None:
raise Exception()
return AGGREGATE_LIST
self.stubs.Set(self.controller.api, 'get_aggregate_list',
stub_list_aggregates)
result = self.controller.index(self.req)
self.assertEqual(AGGREGATE_LIST, result["aggregates"])
def test_index_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index,
self.user_req)
def test_create(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertEqual("nova1", availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create, self.user_req,
{"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_duplicate_aggregate_name(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.AggregateNameExists(aggregate_name=name)
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_incorrect_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.InvalidAggregateAction(action='create_aggregate',
aggregate_id="'N/A'",
reason='invalid zone')
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exception.InvalidAggregateAction,
self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova_bad"}})
def test_create_with_no_aggregate(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"foo":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_no_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"foo": "test",
"availability_zone": "nova1"}})
def test_create_with_no_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertIsNone(availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req,
{"aggregate": {"name": "test"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_with_null_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "",
"availability_zone": "nova1"}})
def test_create_with_name_too_long(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "x" * 256,
"availability_zone": "nova1"}})
def test_create_with_extra_invalid_arg(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, dict(name="test",
availability_zone="nova1",
foo='bar'))
def test_show(self):
def stub_get_aggregate(context, id):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", id, "id")
return AGGREGATE
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
aggregate = self.controller.show(self.req, "1")
self.assertEqual(AGGREGATE, aggregate["aggregate"])
def test_show_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show,
self.user_req, "1")
def test_show_with_invalid_id(self):
def stub_get_aggregate(context, id):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
self.assertRaises(exc.HTTPNotFound,
self.controller.show, self.req, "2")
def test_update(self):
body = {"aggregate": {"name": "new_name",
"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual(body["aggregate"], values, "values")
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.update,
self.user_req, "1", body={})
def test_update_with_only_name(self):
body = {"aggregate": {"name": "new_name"}}
def stub_update_aggregate(context, aggregate, values):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_only_availability_zone(self):
body = {"aggregate": {"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_no_updates(self):
test_metadata = {"aggregate": {}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_no_update_key(self):
test_metadata = {"asdf": {}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_wrong_updates(self):
test_metadata = {"aggregate": {"status": "disable",
"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_null_name(self):
test_metadata = {"aggregate": {"name": ""}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_name_too_long(self):
test_metadata = {"aggregate": {"name": "x" * 256}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_bad_aggregate(self):
test_metadata = {"aggregate": {"name": "test_name"}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_duplicated_name(self):
test_metadata = {"aggregate": {"name": "test_name"}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNameExists(aggregate_name="test_name")
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.update,
self.req, "2", body=test_metadata)
def test_invalid_action(self):
body = {"append_host": {"host": "host1"}}
self.assertRaises(exc.HTTPBadRequest,
self.controller.action, self.req, "1", body=body)
def test_add_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual("host1", host, "host")
return AGGREGATE
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
aggregate = self.controller.action(self.req, "1",
body={"add_host": {"host":
"host1"}})
self.assertEqual(aggregate["aggregate"], AGGREGATE)
def test_add_host_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.action,
self.user_req, "1",
body={"add_host": {"host": "host1"}})
def test_add_host_with_already_added_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.AggregateHostExists(aggregate_id=aggregate,
host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.action,
self.req, "1",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_aggregate(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "bogus_aggregate",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "1",
body={"add_host": {"host": "bogus_host"}})
def test_add_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"add_host": {"asdf": "asdf"}})
def test_add_host_raises_key_error(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise KeyError
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
#NOTE(mtreinish) The check for a KeyError here is to ensure that
# if add_host_to_aggregate() raises a KeyError it propagates. At
# one point the api code would mask the error as a HTTPBadRequest.
# This test is to ensure that this doesn't occur again.
self.assertRaises(KeyError, self.controller.action, self.req, "1",
body={"add_host": {"host": "host1"}})
def test_remove_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual("host1", host, "host")
stub_remove_host_from_aggregate.called = True
return {}
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.controller.action(self.req, "1",
body={"remove_host": {"host": "host1"}})
self.assertTrue(stub_remove_host_from_aggregate.called)
def test_remove_host_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.action,
self.user_req, "1",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_bad_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "bogus_aggregate",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_host_not_in_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateHostNotFound(aggregate_id=aggregate,
host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "1",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_bad_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "1", body={"remove_host": {"host": "bogushost"}})
def test_remove_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"asdf": "asdf"})
def test_remove_host_with_extra_param(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"remove_host": {"asdf": "asdf",
"host": "asdf"}})
def test_set_metadata(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertThat(body["set_metadata"]['metadata'],
matchers.DictMatches(values))
return AGGREGATE
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
result = self.controller.action(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_set_metadata_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._set_metadata,
self.user_req, "1",
body={"set_metadata": {"metadata":
{"foo": "bar"}}})
def test_set_metadata_with_bad_aggregate(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "bad_aggregate", body=body)
def test_set_metadata_with_missing_metadata(self):
body = {"asdf": {"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body=body)
def test_set_metadata_with_extra_params(self):
body = {"metadata": {"foo": "bar"}, "asdf": {"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body=body)
def test_delete_aggregate(self):
def stub_delete_aggregate(context, aggregate):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
stub_delete_aggregate.called = True
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
self.controller.delete(self.req, "1")
self.assertTrue(stub_delete_aggregate.called)
def test_delete_aggregate_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete,
self.user_req, "1")
def test_delete_aggregate_with_bad_aggregate(self):
def stub_delete_aggregate(context, aggregate):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.delete,
self.req, "bogus_aggregate")
|
|
# -*- coding: utf-8 -*-
'''
Created on 2014-3-5
@author: CL.lam
'''
from cgi import FieldStorage
from tg import flash, redirect, expose
from tg.decorators import paginate
from repoze.what import authorize
from sqlalchemy.sql.expression import and_, desc
from rpac.lib.base import BaseController
from rpac.util.common import tabFocus, sysUpload, generate_thumbnail
from rpac.model import qry, Care, COO, Fibers, Division, Brand, Category, DBSession, GarmentPart, Product, Size
from rpac.widgets.master import master_search_form1, master_search_form2, \
master_search_form3, master_search_form4, master_search_form5
from rpac.constant import INACTIVE, ACTIVE
from rpac.model import FileObject
__all__ = ['MasterController', ]
class MasterController( BaseController ):
allow_only = authorize.not_anonymous()
@expose( 'rpac.templates.master.index' )
@paginate( "result", items_per_page = 20 )
@tabFocus( tab_type = "master" )
def index( self , **kw ):
t = kw.get( 't', None )
vs = self._check( t )
dbclz = vs['dbclz']
search_form = vs['search_form']
label = vs['label']
ws = []
if search_form == master_search_form1:
if kw.get( "name", False ) : ws.append( dbclz.name.op( "ilike" )( "%%%s%%" % kw["name"] ) )
elif search_form == master_search_form2 or search_form == master_search_form3:
if kw.get( "english", False ) : ws.append( dbclz.english.op( "ilike" )( "%%%s%%" % kw["english"] ) )
if search_form == master_search_form3:
if kw.get( "category", False ) : ws.append( dbclz.category == kw['category'] )
elif search_form == master_search_form4:
if kw.get( "brand", False ) : ws.append( dbclz.brandId == kw["brand"] )
if kw.get( "division", False ) : ws.append( dbclz.divisionId == kw["division"] )
if kw.get( "category", False ) : ws.append( dbclz.categoryId == kw["category"] )
if kw.get( "itemCode", False ) : ws.append( dbclz.itemCode.op( "ilike" )( "%%%s%%" % kw["itemCode"] ) )
elif search_form == master_search_form5:
if kw.get( "brand", False ) : ws.append( dbclz.brandId == kw["brand"] )
if kw.get( "category", False ) : ws.append( dbclz.categoryId == kw["category"] )
if kw.get( "us_size", False ) : ws.append( dbclz.us_size.op( "ilike" )( "%%%s%%" % kw["us_size"] ) )
if kw.get( "create_time_from", False ) : ws.append( dbclz.createTime >= kw["create_time_from"] )
if kw.get( "create_time_to", False ) : ws.append( dbclz.createTime <= kw["create_time_to"] )
ws.append( dbclz.active == ACTIVE )
result = qry( dbclz ).filter( and_( *ws ) ).order_by( desc( dbclz.createTime ) ).all()
return { "result" : result , "values" : kw, "widget" : search_form, 'label' : label}
@expose( 'rpac.templates.master.add' )
@tabFocus( tab_type = "master" )
def add( self, **kw ):
t = kw.get( 't', None )
vs = self._check( t )
data = {'t' : t , 'label' : vs['label']}
if t in [ 'Product', 'Size' ]:
cats = qry(Category.id, Category.name).order_by(Category.name)
data['cats'] = cats
brands = qry(Brand.id, Brand.name).order_by(Brand.name)
data['brands'] = brands
divisions = qry(Division.id, Division.name).order_by(Division.name)
data['divisions'] = divisions
return data
@expose()
def save_new( self, **kw ):
t = kw.get( 't', None )
vs = self._check( t )
dbclz = vs['dbclz']
if t in ['Division', 'Category', 'Brand', ]:
name = kw.get( 'name', None ) or None
if not name:
flash( "The value could not be blank!", "warn" )
return redirect( '/master/add?t=%s' % t )
DBSession.add( dbclz( name = name ) )
flash( 'Save the new recored successfully!', "ok" )
elif t in ['Fibers', 'COO', 'GarmentPart', ]:
params = {}
for f in ['english', 'french_canadian', 'spanish_mx', 'spanish_latin', 'russian',
'french', 'arabic', 'japanese', 'hebrew', 'turkish', 'polish', 'chinese_simple',
'bahasa', 'german', 'dutch', 'hindi'] : params[f] = kw.get( f, None ) or None
if not params['english'] :
flash( 'The English value could not be blank!', "warn" )
return redirect( 'master/add?t=%s' % t )
DBSession.add( dbclz( **params ) )
flash( 'Save the new recored successfully!', "ok" )
elif t in ['Care', ]:
params = {}
for f in ['english', 'french_canadian', 'spanish_mx', 'spanish_latin', 'russian',
'french', 'arabic', 'japanese', 'hebrew', 'turkish', 'polish', 'chinese_simple',
'bahasa', 'german', 'dutch', 'hindi', 'category'] : params[f] = kw.get( f, None ) or None
for f in [''] : params[f] = kw.get( f, None ) or None
if not params['english'] or not params['category'] :
flash( 'The English value could not be blank!', "warn" )
return redirect( 'master/add?t=%s' % t )
DBSession.add( dbclz( **params ) )
flash( 'Save the new recored successfully!', "ok" )
elif t in ['Product']:
del kw['t']
itemCode = kw.get('itemCode')
width = kw.get('width')
substrate = kw.get('substrate')
if not itemCode or not width or not substrate:
flash("Can't be empty!", 'warn')
return redirect( '/master/add?t=%s' % t )
c = qry(Product).filter_by(itemCode=itemCode).count()
if c:
flash("ItemCode exist!", 'warn')
return redirect( '/master/add?t=%s' % t )
image = kw['image']
# if image not in ['', None]:
if isinstance(image, FieldStorage):
img_info = sysUpload(image, folder='files')
img = qry(FileObject).get(img_info[1][0])
kw['image'] = img.url
thumb = generate_thumbnail(img)
kw['thumb'] = thumb.url
obj = dbclz(**kw)
DBSession.add(obj)
flash('Success!', 'ok')
elif t in ['Size']:
del kw['t']
obj = dbclz(**kw)
DBSession.add(obj)
flash('Success!', 'ok')
return redirect( '/master/add?t=%s' % t )
return redirect( '/master/index?t=%s' % t )
@expose( 'rpac.templates.master.edit' )
@tabFocus( tab_type = "master" )
def edit( self, **kw ):
t = kw.get( 't', None )
vs = self._check( t )
_id = kw.get( 'id', None )
dbclz = vs['dbclz']
obj = qry( dbclz ).get( _id )
result = {'t' : t, 'obj' : obj , 'label' : vs['label']}
if t in [ 'Product', 'Size' ]:
data = {}
categorys = qry(Category.id, Category.name).order_by(Category.name)
data['categorys'] = categorys
brands = qry(Brand.id, Brand.name).order_by(Brand.name)
data['brands'] = brands
divisions = qry(Division.id, Division.name).order_by(Division.name)
data['divisions'] = divisions
result['data'] = data
return result
@expose()
def save_edit( self, **kw ):
t = kw.get( 't', None )
vs = self._check( t )
_id = kw.get( 'id', None )
dbclz = vs['dbclz']
obj = qry( dbclz ).get( _id )
if t in ['Division', 'Category', 'Brand', ]:
name = kw.get( 'name', None ) or None
if not name:
flash( "The value could not be blank!", "warn" )
return redirect( '/master/edit?id=%s&t=%s' % ( obj.id, t ) )
obj.name = name
elif t in ['Fibers', 'COO', ]:
params = {}
for f in ['english', 'french_canadian', 'spanish_mx', 'spanish_latin', 'russian',
'french', 'arabic', 'japanese', 'hebrew', 'turkish', 'polish', 'chinese_simple',
'bahasa', 'german', 'dutch', 'hindi'] : params[f] = kw.get( f, None ) or None
if not params['english'] :
flash( 'The English value could not be blank!', "warn" )
return redirect( 'master/edit?t=%s' % t )
for k, v in params.items() : setattr( obj, k, v )
elif t in ['Care', ]:
params = {}
for f in ['english', 'french_canadian', 'spanish_mx', 'spanish_latin', 'russian',
'french', 'arabic', 'japanese', 'hebrew', 'turkish', 'polish', 'chinese_simple',
'bahasa', 'german', 'dutch', 'hindi', 'category'] : params[f] = kw.get( f, None ) or None
if not params['english'] or not params['category']:
flash( 'The English value could not be blank!', "warn" )
return redirect( 'master/edit?t=%s' % t )
for k, v in params.items() : setattr( obj, k, v )
elif t in ['Product', ]:
itemCode = kw.get('itemCode')
width = kw.get('width')
substrate = kw.get('substrate')
if not itemCode or not width or not substrate:
flash("Can't be empty!", 'warn')
return redirect( '/master/edit?t=%s&id=%s' % (t, obj.id) )
c = qry(Product).filter_by(itemCode=itemCode).count() and itemCode != obj.itemCode
if c:
flash("ItemCode exist!", 'warn')
return redirect( '/master/edit?t=%s&id=%s' % (t, obj.id) )
del kw['t']
image = kw['image']
if isinstance(image, FieldStorage):
img_info = sysUpload(image, folder='files')
img = qry(FileObject).get(img_info[1][0])
kw['image'] = img.url
thumb = generate_thumbnail(img)
kw['thumb'] = thumb.url
for k, v in kw.items(): setattr(obj, k, v)
elif t in ['Size', ]:
for k, v in kw.items(): setattr(obj, k, v)
flash( 'Update the record successfully!', "ok" )
return redirect( '/master/index?t=%s' % t )
@expose()
def delete( self, **kw ):
t = kw.get( 't', None )
vs = self._check( t )
_id = kw.get( 'id', None )
dbclz = vs['dbclz']
obj = qry( dbclz ).get( _id )
obj.active = INACTIVE
flash( 'Delete the record successfully!', 'ok' )
return redirect( '/master/index?t=%s' % t )
def _check( self, t ):
if t not in ['Division', 'Brand', 'Category', 'Product', 'Fibers', 'GarmentPart', 'COO', 'Care', 'Size']:
flash( "No such acton!", "warn" )
return redirect( '/index' )
if t == 'Care': dbclz, search_form, label = Care, master_search_form3, 'Care Instruction'
if t == 'GarmentPart': dbclz, search_form, label = GarmentPart, master_search_form2, 'Garment Parts'
elif t == 'COO': dbclz, search_form, label = COO, master_search_form2, 'Country Of Origin'
elif t == 'Fibers': dbclz, search_form, label = Fibers, master_search_form1, 'Fabrics'
elif t == 'Division': dbclz, search_form, label = Division, master_search_form1, 'Division'
elif t == 'Brand': dbclz, search_form, label = Brand, master_search_form1, 'Brand'
elif t == 'Category': dbclz, search_form, label = Category, master_search_form1, 'Category'
elif t == 'Product': dbclz, search_form, label = Product, master_search_form4, 'Product'
elif t == 'Size': dbclz, search_form, label = Size, master_search_form5, 'Size'
return {'dbclz' : dbclz, 'search_form' : search_form, 'label' : label }
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
class HeatAutoscalingTest(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: 2014-10-16
resources:
random_group:
type: OS::Heat::AutoScalingGroup
properties:
cooldown: 0
desired_capacity: 3
max_size: 5
min_size: 2
resource:
type: OS::Heat::RandomString
scale_up_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: random_group }
scaling_adjustment: 1
scale_down_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: random_group }
scaling_adjustment: -1
outputs:
all_values:
value: {get_attr: [random_group, outputs_list, value]}
value_0:
value: {get_attr: [random_group, resource.0.value]}
value_1:
value: {get_attr: [random_group, resource.1.value]}
value_2:
value: {get_attr: [random_group, resource.2.value]}
asg_size:
value: {get_attr: [random_group, current_size]}
'''
template_nested = '''
heat_template_version: 2014-10-16
resources:
random_group:
type: OS::Heat::AutoScalingGroup
properties:
desired_capacity: 3
max_size: 5
min_size: 2
resource:
type: randomstr.yaml
outputs:
all_values:
value: {get_attr: [random_group, outputs_list, random_str]}
value_0:
value: {get_attr: [random_group, resource.0.random_str]}
value_1:
value: {get_attr: [random_group, resource.1.random_str]}
value_2:
value: {get_attr: [random_group, resource.2.random_str]}
'''
template_randomstr = '''
heat_template_version: 2013-05-23
resources:
random_str:
type: OS::Heat::RandomString
outputs:
random_str:
value: {get_attr: [random_str, value]}
'''
def _assert_output_values(self, stack_id):
stack = self.client.stacks.get(stack_id)
all_values = self._stack_output(stack, 'all_values')
self.assertEqual(3, len(all_values))
self.assertEqual(all_values[0], self._stack_output(stack, 'value_0'))
self.assertEqual(all_values[1], self._stack_output(stack, 'value_1'))
self.assertEqual(all_values[2], self._stack_output(stack, 'value_2'))
def test_asg_scale_up_max_size(self):
stack_id = self.stack_create(template=self.template,
expected_status='CREATE_COMPLETE')
stack = self.client.stacks.get(stack_id)
asg_size = self._stack_output(stack, 'asg_size')
# Ensure that initial desired capacity is met
self.assertEqual(3, asg_size)
# send scale up signals and ensure that asg honors max_size
asg = self.client.resources.get(stack_id, 'random_group')
max_size = 5
for num in range(asg_size + 1, max_size + 2):
expected_resources = num if num <= max_size else max_size
self.client.resources.signal(stack_id, 'scale_up_policy')
self.assertTrue(
test.call_until_true(self.conf.build_timeout,
self.conf.build_interval,
self.check_autoscale_complete,
asg.physical_resource_id,
expected_resources, stack_id,
'random_group'))
def test_asg_scale_down_min_size(self):
stack_id = self.stack_create(template=self.template,
expected_status='CREATE_COMPLETE')
stack = self.client.stacks.get(stack_id)
asg_size = self._stack_output(stack, 'asg_size')
# Ensure that initial desired capacity is met
self.assertEqual(3, asg_size)
# send scale down signals and ensure that asg honors min_size
asg = self.client.resources.get(stack_id, 'random_group')
min_size = 2
for num in range(asg_size - 1, 0, -1):
expected_resources = num if num >= min_size else min_size
self.client.resources.signal(stack_id, 'scale_down_policy')
self.assertTrue(
test.call_until_true(self.conf.build_timeout,
self.conf.build_interval,
self.check_autoscale_complete,
asg.physical_resource_id,
expected_resources, stack_id,
'random_group'))
def test_asg_cooldown(self):
cooldown_tmpl = self.template.replace('cooldown: 0',
'cooldown: 60')
stack_id = self.stack_create(template=cooldown_tmpl,
expected_status='CREATE_COMPLETE')
stack = self.client.stacks.get(stack_id)
asg_size = self._stack_output(stack, 'asg_size')
# Ensure that initial desired capacity is met
self.assertEqual(3, asg_size)
# send scale up signal.
# Since cooldown is in effect, number of resources should not change
asg = self.client.resources.get(stack_id, 'random_group')
expected_resources = 3
self.client.resources.signal(stack_id, 'scale_up_policy')
self.assertTrue(
test.call_until_true(self.conf.build_timeout,
self.conf.build_interval,
self.check_autoscale_complete,
asg.physical_resource_id,
expected_resources, stack_id,
'random_group'))
def test_path_attrs(self):
stack_id = self.stack_create(template=self.template)
expected_resources = {'random_group': 'OS::Heat::AutoScalingGroup',
'scale_up_policy': 'OS::Heat::ScalingPolicy',
'scale_down_policy': 'OS::Heat::ScalingPolicy'}
self.assertEqual(expected_resources, self.list_resources(stack_id))
self._assert_output_values(stack_id)
def test_path_attrs_nested(self):
files = {'randomstr.yaml': self.template_randomstr}
stack_id = self.stack_create(template=self.template_nested,
files=files)
expected_resources = {'random_group': 'OS::Heat::AutoScalingGroup'}
self.assertEqual(expected_resources, self.list_resources(stack_id))
self._assert_output_values(stack_id)
class AutoScalingGroupUpdateWithNoChanges(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: 2013-05-23
resources:
test_group:
type: OS::Heat::AutoScalingGroup
properties:
desired_capacity: 0
max_size: 0
min_size: 0
resource:
type: OS::Heat::RandomString
test_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: test_group }
scaling_adjustment: 1
'''
def test_as_group_update_without_resource_changes(self):
stack_identifier = self.stack_create(template=self.template)
new_template = self.template.replace(
'scaling_adjustment: 1',
'scaling_adjustment: 2')
self.update_stack(stack_identifier, template=new_template)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
KiwiMarkup Test Unit
Run using 'python test.py'
"""
# Standard library imports
import imp
import re
# Application specific imports
# Because Kiwimark is not installed into the Python library we need to load it
# manually.
scriptfile, pathname, description = imp.find_module("kiwimark", ["../kiwimark"])
try:
kiwimark = imp.load_module("kiwimark", scriptfile, pathname, description)
finally:
scriptfile.close()
if (__name__ == "__main__"):
# Basic unit tests
import unittest
class KiwiMarkupCase(unittest.TestCase):
def setUp(self):
self.api = kiwimark.KiwiMarkup()
def tearDown(self):
self.api = None
def testBasic(self):
""" Verify that the instance was created """
self.assertNotEqual(self.api, None)
def testExecute(self):
""" Verify the main execution method """
self.assertNotEqual(self.api.execute("# Test Number 1"), 0)
def testHeaderRegex(self):
"""
Regex for SETEXT style headers, starting (after up to three
whitespace characters) with a row of up to six '#' characters. The
header text can also be followed by additional '#' characters up
to the end of the line -- these will be omitted from the output.
"""
regex = kiwimark.HEADER_REGEX = r"^[\s]{0,3}([#]{1,6})[\s]*([^#]*)"
# No match
m = re.search(regex, "There is no header markup here")
self.assertEqual(m, None)
# Simple match for Header 1
m = re.search(regex, "# Header 1")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.group(1), "#")
self.assertEqual(m.group(2), "Header 1")
# Match for Header 3
m = re.search(regex, "### Header 3")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.group(1), "###")
self.assertEqual(m.group(2), "Header 3")
def testListRegex(self):
"""
Regex for list items, optionally indented by whitespace, and
indicated by a single asterisk followed by whitespace and then
the actual text of the item.
"""
regex = kiwimark.LIST_REGEX = r"^([\s]*)[\*][\s]+(.*)"
# No match
m = re.search(regex, "There is no list markup here")
self.assertEqual(m, None)
# Simple match for list entry
m = re.search(regex, "* List entry")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.group(1), "")
self.assertEqual(m.group(2), "List entry")
# Match including white-space
m = re.search(regex, " * List entry")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.group(1), " ")
self.assertEqual(m.group(2), "List entry")
def testTableHeaderRegex(self):
"""
Regex for table headers, which consist of a row of '-' characters
split up by one or more '|' characters or '+' characters.
"""
regex = kiwimark.TABLE_HEADER_REGEX = r"^[\s]{0,3}(\||\+)*((-{3,})(\||\+))+"
# No match
m = re.search(regex, "There is no table header markup here")
self.assertEqual(m, None)
# Match with "|" separators
m = re.search(regex, "---|---|---")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.group(1), None)
self.assertEqual(m.group(2), "---|")
# Match with "+" separators
m = re.search(regex, "---+---+---")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.group(1), None)
self.assertEqual(m.group(2), "---+")
def testBoldStartRegex(self):
"""
Regex for start of bold text
"""
regex = kiwimark.BOLD_START_REGEX = r"(^|\s)(\*\*)([^\s])"
# No match
m = re.search(regex, "There is no bold markup here")
self.assertEqual(m, None)
# Simple match
m = re.search(regex, "**Some bold** text.")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.groups(2), ("", "**", "S"))
def testBoldEndRegex(self):
"""
Regex for end of bold text
"""
regex = kiwimark.BOLD_END_REGEX = r"([^\s])(\*\*)([\):;.,?\s]+|$)"
# No match
m = re.search(regex, "There is no bold markup here")
self.assertEqual(m, None)
# Simple match
m = re.search(regex, "**Some bold** text.")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.groups(2), ("d", "**", " "))
def testEmphStartRegex(self):
"""
Regex for start of emphasized text
"""
regex = kiwimark.EMPH_START_REGEX = r"(^|\s)(_)([^\s])"
# No match
m = re.search(regex, "There is no emphasized markup here")
self.assertEqual(m, None)
# Simple match
m = re.search(regex, "Some _emphasized_ text.")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.groups(2), (" ", "_", "e"))
def testEmphEndRegex(self):
"""
Regex for end of emphasized text
"""
regex = kiwimark.EMPH_END_REGEX = r"([^\s])(_)([\):;.,?\s]+|$)"
# No match
m = re.search(regex, "There is no emphasized markup here")
self.assertEqual(m, None)
# Simple match
m = re.search(regex, "Some text which has been _emphasized_.")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.groups(2), ("d", "_", "."))
# Must ignore in-word underscores
m = re.search(regex, "There is no emphasized mark_up here")
self.assertEqual(m, None)
def testURLRegex(self):
"""
Regex for Markdown-style URL mark-up: [title-text](path/to/url).
This doesn't check for valid URL -- there are too many options.
"""
regex = kiwimark.URL_REGEX = r"\[([^]]*)\]\(([^\)]*)\)"
# No match
m = re.search(regex, "There is no URL markup here")
self.assertEqual(m, None)
# Simple match
m = re.search(regex, "Here is a [link](www.link.com) to a website.")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.groups(1), ("link", "www.link.com"))
def testMarkdownImgRegex(self):
"""
Regex for Markdown-style image mark-up: 
"""
regex = kiwimark.MD_IMG_REGEX = r"!\[([^]]*)\]\(([^\)]*)\)"
# No match
m = re.search(regex, "There is no image markup here")
self.assertEqual(m, None)
# Simple match
m = re.search(regex, "Here is a  of something.")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.groups(1), ("picture", "/path/image.png"))
def testImgRegex(self):
""" Verify the img markup regular expression """
regex = kiwimark.IMG_REGEX
# No match
m = re.search(regex, "There is no img markup here")
self.assertEqual(m, None)
# Simple match
m = re.search(regex, '[img](graphics/test.png)')
self.assertNotEqual(len(m.groups()), 0)
# Match with CSS class
m = re.search(regex, '[img.left](graphics/test.png)')
self.assertEqual(m.group(3), 'left')
# Match with alt text
m = re.search(regex, '[img:alt](graphics/test.png)')
self.assertEqual(m.group(6), 'alt')
# Match with CSS class and alt text
m = re.search(regex, '[img.left:alt](graphics/test.png)')
self.assertEqual(m.group(3), 'left')
self.assertEqual(m.group(6), 'alt')
# Return the expected line
line = "[img.left:alt](graphics/test.png)"
expected_result = "<img src='graphics/test.png' class='left' alt='alt' title='alt'/>"
line = self.api.re_sub(self.api.imgPattern, r"<img src='\7' class='\3' alt='\6' title='\6'/>", line)
self.assertEqual(line, expected_result)
def testFootnoteRegex(self):
"""
FOOTNOTE_REGEX for footnotes (links to footnote_nn)
"""
regex = kiwimark.FOOTNOTE_REGEX = r"\[\^([0-9]+)\]"
# No match
m = re.search(regex, "There is no footnote here")
self.assertEqual(m, None)
m = re.search(regex, "See the footnote[^1] below")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.group(1), "1")
def testFootnoteTargetRegex(self):
"""
FOOTNOTE_TARGET_REGEX for footnote targets (links to footnote_ref_nn)
"""
regex = kiwimark.FOOTNOTE_TARGET_REGEX = r"\[\^([0-9]+)\]:"
# No match
m = re.search(regex, "There is no footnote here")
self.assertEqual(m, None)
m = re.search(regex, "[^1]: Some additional info")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.group(1), "1")
def testCodeBlockStartRegex(self):
regex = kiwimark.CODEBLOCK_START_REGEX
# No match
m = re.search(regex, "There is no code: marker here")
self.assertEqual(m, None)
# Simple match
m = re.search(regex, "code:\n")
self.assertNotEqual(len(m.groups()), 0)
# Match with language specified
m = re.search(regex, "code:javascript\n")
self.assertNotEqual(len(m.groups()), 0)
self.assertEqual(m.group(1), "javascript")
def testCodeBlockEndRegex(self):
regex = kiwimark.CODEBLOCK_END_REGEX
# No match
m = re.search(regex, "There is no :code marker here")
# Simple match
m = re.search(regex, ":code\n")
self.assertNotEqual(m, None)
unittest.main()
|
|
# Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import datetime
import json
import traceback
import unittest
from unittest import TestCase
from decimal import Decimal
import jsonschema
from pycroft.helpers.i18n import (
ErroneousMessage, Message, NumericalMessage, SimpleMessage,
deserialize_param, serialize_param, schema, deferred_dgettext,
deferred_dngettext, deferred_gettext, deferred_ngettext, format_datetime,
Money)
from pycroft.helpers.interval import (
UnboundedInterval, closed, closedopen, openclosed, open)
class TestParameterSerialization(unittest.TestCase):
def assertValidSerialization(self, param):
s = serialize_param(param)
try:
json.dumps(s)
except (ValueError, TypeError):
self.fail("Param {} cannot be serialized to JSON.".format(param))
self.assertEqual(deserialize_param(s), param)
def test_serialize_string(self):
self.assertValidSerialization("test")
def test_serialize_unicode(self):
self.assertValidSerialization(u"test")
def test_serialize_bool(self):
self.assertValidSerialization(True)
def test_serialize_int(self):
self.assertValidSerialization(42)
def test_serialize_float(self):
self.assertValidSerialization(0.5)
def test_serialize_decimal(self):
self.assertValidSerialization(Decimal('3.8e2'))
def test_serialize_money(self):
self.assertValidSerialization(Money(Decimal(20.0), "EUR"))
def test_serialize_datetime(self):
self.assertValidSerialization(datetime.datetime.utcnow())
def test_serialize_date(self):
self.assertValidSerialization(datetime.date.today())
def test_serialize_timedelta(self):
self.assertValidSerialization(datetime.timedelta(1))
def test_serialize_time(self):
self.assertValidSerialization(datetime.datetime.utcnow().time())
def test_serialize_interval(self):
self.assertValidSerialization(UnboundedInterval)
now = datetime.datetime.utcnow()
then = now + datetime.timedelta(1)
self.assertValidSerialization(closed(now, then))
self.assertValidSerialization(closedopen(now, then))
self.assertValidSerialization(openclosed(now, then))
self.assertValidSerialization(open(now, then))
def test_serialize_unknown_type(self):
self.assertRaises(TypeError, serialize_param, object())
class DeferredMessageTestCase(TestCase):
validator = jsonschema.Draft4Validator(schema)
def assertValidJSON(self, json_string):
try:
obj = json.loads(json_string)
except (ValueError, TypeError):
self.fail()
try:
self.validator.validate(obj)
except jsonschema.ValidationError as e:
self.fail("Export failed schema validation: {}".format(e))
def assertMessageEquals(self, m, domain, args, kwargs, expected_result):
self.assertEqual(m.domain, domain)
self.assertEqual(m.args, args)
self.assertEqual(m.kwargs, kwargs)
self.assertEqual(m.localize(), expected_result)
def assertSimpleMessageCorrect(self, m, message, domain, args, kwargs,
expected_result):
self.assertIsInstance(m, SimpleMessage)
self.assertEqual(m.message, message)
self.assertMessageEquals(m, domain, args, kwargs, expected_result)
json_string = m.to_json()
self.assertValidJSON(json_string)
m2 = Message.from_json(json_string)
self.assertIsInstance(m2, SimpleMessage)
self.assertMessageEquals(m2, domain, args, kwargs, expected_result)
def assertNumericMessageCorrect(self, m, singular, plural, n, domain,
args, kwargs, expected_result):
self.assertIsInstance(m, NumericalMessage)
self.assertEqual(m.singular, singular)
self.assertEqual(m.plural, plural)
self.assertEqual(m.n, n)
self.assertMessageEquals(m, domain, args, kwargs, expected_result)
json_string = m.to_json()
self.assertValidJSON(json_string)
m2 = Message.from_json(json_string)
self.assertIsInstance(m2, NumericalMessage)
self.assertMessageEquals(m2, domain, args, kwargs, expected_result)
class TestJSONExport(DeferredMessageTestCase):
def test_invalid_json(self):
self.assertIsInstance(Message.from_json("not JSON"), ErroneousMessage)
def test_wrong_json(self):
json_string = json.dumps({"key": "value"})
self.assertIsInstance(Message.from_json(json_string), ErroneousMessage)
def test_simple(self):
message = "test"
m = deferred_gettext(message)
self.assertSimpleMessageCorrect(m, message, None, (), {}, message)
def test_simple_with_domain(self):
message = "test"
domain = "domain"
m = deferred_dgettext(domain, message)
self.assertSimpleMessageCorrect(m, message, domain, (), {}, message)
def test_simple_format_args(self):
message = "test {} at {}"
arg1 = "arg1"
arg2 = datetime.datetime.utcnow()
m = deferred_gettext(message).format(arg1, arg2)
expected_result = message.format(arg1, format_datetime(arg2))
self.assertSimpleMessageCorrect(m, message, None, (arg1, arg2), {},
expected_result)
def test_simple_format_kwargs(self):
message = "test {arg1} at {arg2}"
arg1 = "arg1"
arg2 = datetime.datetime.utcnow()
m = deferred_gettext(message).format(arg1=arg1, arg2=arg2)
expected_result = message.format(arg1=arg1, arg2=format_datetime(arg2))
self.assertSimpleMessageCorrect(m, message, None, (),
{"arg1": arg1, "arg2": arg2},
expected_result)
def test_singular(self):
singular = "singular"
plural = "plural"
n = 1
m = deferred_ngettext(singular, plural, n)
self.assertNumericMessageCorrect(m, singular, plural, n, None, (), {},
singular)
def test_singular_domain(self):
singular = "singular"
plural = "plural"
n = 1
domain = "domain"
m = deferred_dngettext(domain, singular, plural, n)
self.assertNumericMessageCorrect(m, singular, plural, n, domain, (), {},
singular)
def test_plural(self):
singular = "singular"
plural = "plural"
n = 1000
domain = "domain"
m = deferred_dngettext(domain, singular, plural, n)
self.assertNumericMessageCorrect(m, singular, plural, n, domain, (), {},
plural)
def get_format_error_message(self, message, args, kwargs):
try:
message.format(*args, **kwargs)
except (TypeError, ValueError, IndexError, KeyError) as e:
return u''.join(traceback.format_exception_only(type(e), e))
else:
raise AssertionError()
def test_missing_positional_argument(self):
message = u"{0} {1}"
args = (1,)
kwargs = {}
error = self.get_format_error_message(message, args, kwargs)
m = deferred_gettext(message).format(*args)
text = (u'Could not format message "{}" (args={}, kwargs={}): {}'
.format(message, args, kwargs, error))
self.assertSimpleMessageCorrect(m, message, None, args, kwargs, text)
def test_missing_keyword_argument(self):
message = u"{foo}"
args = (1,)
kwargs = {}
error = self.get_format_error_message(message, args, kwargs)
m = deferred_gettext(message).format(*args)
text = (u'Could not format message "{}" (args={}, kwargs={}): {}'
.format(message, args, kwargs, error))
self.assertSimpleMessageCorrect(m, message, None, args, kwargs, text)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# libcloud.org licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Amazon EC2 driver
"""
from libcloud.providers import Provider
from libcloud.types import NodeState, InvalidCredsException
from libcloud.base import Node, Response, ConnectionUserAndKey
from libcloud.base import NodeDriver, NodeSize, NodeImage, NodeLocation
import base64
import hmac
from hashlib import sha256
import time
import urllib
from xml.etree import ElementTree as ET
EC2_US_EAST_HOST = 'ec2.us-east-1.amazonaws.com'
EC2_US_WEST_HOST = 'ec2.us-west-1.amazonaws.com'
EC2_EU_WEST_HOST = 'ec2.eu-west-1.amazonaws.com'
API_VERSION = '2009-04-04'
NAMESPACE = "http://ec2.amazonaws.com/doc/%s/" % (API_VERSION)
"""
Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
From http://aws.amazon.com/ec2/instance-types/
"""
EC2_INSTANCE_TYPES = {
'm1.small': {
'id': 'm1.small',
'name': 'Small Instance',
'ram': 1740,
'disk': 160,
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Large Instance',
'ram': 7680,
'disk': 850,
'bandwidth': None
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Extra Large Instance',
'ram': 15360,
'disk': 1690,
'bandwidth': None
},
'c1.medium': {
'id': 'c1.medium',
'name': 'High-CPU Medium Instance',
'ram': 1740,
'disk': 350,
'bandwidth': None
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'High-CPU Extra Large Instance',
'ram': 7680,
'disk': 1690,
'bandwidth': None
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High-Memory Double Extra Large Instance',
'ram': 35021,
'disk': 850,
'bandwidth': None
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High-Memory Quadruple Extra Large Instance',
'ram': 70042,
'disk': 1690,
'bandwidth': None
},
}
EC2_US_EAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
EC2_US_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
EC2_EU_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
EC2_US_EAST_INSTANCE_TYPES['m1.small']['price'] = '.085'
EC2_US_EAST_INSTANCE_TYPES['m1.large']['price'] = '.34'
EC2_US_EAST_INSTANCE_TYPES['m1.xlarge']['price'] = '.68'
EC2_US_EAST_INSTANCE_TYPES['c1.medium']['price'] = '.17'
EC2_US_EAST_INSTANCE_TYPES['c1.xlarge']['price'] = '.68'
EC2_US_EAST_INSTANCE_TYPES['m2.2xlarge']['price'] = '1.2'
EC2_US_EAST_INSTANCE_TYPES['m2.4xlarge']['price'] = '2.4'
EC2_US_WEST_INSTANCE_TYPES['m1.small']['price'] = '.095'
EC2_US_WEST_INSTANCE_TYPES['m1.large']['price'] = '.38'
EC2_US_WEST_INSTANCE_TYPES['m1.xlarge']['price'] = '.76'
EC2_US_WEST_INSTANCE_TYPES['c1.medium']['price'] = '.19'
EC2_US_WEST_INSTANCE_TYPES['c1.xlarge']['price'] = '.76'
EC2_US_WEST_INSTANCE_TYPES['m2.2xlarge']['price'] = '1.34'
EC2_US_WEST_INSTANCE_TYPES['m2.4xlarge']['price'] = '2.68'
EC2_EU_WEST_INSTANCE_TYPES['m1.small']['price'] = '.095'
EC2_EU_WEST_INSTANCE_TYPES['m1.large']['price'] = '.38'
EC2_EU_WEST_INSTANCE_TYPES['m1.xlarge']['price'] = '.76'
EC2_EU_WEST_INSTANCE_TYPES['c1.medium']['price'] = '.19'
EC2_EU_WEST_INSTANCE_TYPES['c1.xlarge']['price'] = '.76'
EC2_EU_WEST_INSTANCE_TYPES['m2.2xlarge']['price'] = '1.34'
EC2_EU_WEST_INSTANCE_TYPES['m2.4xlarge']['price'] = '2.68'
class EC2Response(Response):
def parse_body(self):
if not self.body:
return None
return ET.XML(self.body)
def parse_error(self):
err_list = []
for err in ET.XML(self.body).findall('Errors/Error'):
code, message = err.getchildren()
err_list.append("%s: %s" % (code.text, message.text))
if code.text == "InvalidClientTokenId":
raise InvalidCredsException(err_list[-1])
if code.text == "SignatureDoesNotMatch":
raise InvalidCredsException(err_list[-1])
return "\n".join(err_list)
class EC2Connection(ConnectionUserAndKey):
host = EC2_US_EAST_HOST
responseCls = EC2Response
def add_default_params(self, params):
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['AWSAccessKeyId'] = self.user_id
params['Version'] = API_VERSION
params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
time.gmtime())
params['Signature'] = self._get_aws_auth_param(params, self.key)
return params
def _get_aws_auth_param(self, params, secret_key, path='/'):
"""
Creates the signature required for AWS, per
http://bit.ly/aR7GaQ [docs.amazonwebservices.com]:
StringToSign = HTTPVerb + "\n" +
ValueOfHostHeaderInLowercase + "\n" +
HTTPRequestURI + "\n" +
CanonicalizedQueryString <from the preceding step>
"""
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
pairs.append(urllib.quote(key, safe='') + '=' +
urllib.quote(params[key], safe='-_~'))
qs = '&'.join(pairs)
string_to_sign = '\n'.join(('GET', self.host, path, qs))
b64_hmac = base64.b64encode(
hmac.new(secret_key, string_to_sign, digestmod=sha256).digest()
)
return b64_hmac
class EC2NodeDriver(NodeDriver):
connectionCls = EC2Connection
type = Provider.EC2
name = 'Amazon EC2 (us-east-1)'
_instance_types = EC2_US_EAST_INSTANCE_TYPES
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.TERMINATED,
'terminated': NodeState.TERMINATED
}
def _findtext(self, element, xpath):
return element.findtext(self._fixxpath(xpath))
def _fixxpath(self, xpath):
# ElementTree wants namespaces in its xpaths, so here we add them.
return "/".join(["{%s}%s" % (NAMESPACE, e) for e in xpath.split("/")])
def _findattr(self, element, xpath):
return element.findtext(self._fixxpath(xpath))
def _findall(self, element, xpath):
return element.findall(self._fixxpath(xpath))
def _pathlist(self, key, arr):
"""
Converts a key and an array of values into AWS query param format.
"""
params = {}
i = 0
for value in arr:
i += 1
params["%s.%s" % (key, i)] = value
return params
def _get_boolean(self, element):
tag = "{%s}%s" % (NAMESPACE, 'return')
return element.findtext(tag) == 'true'
def _get_terminate_boolean(self, element):
status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name'))
return any([ term_status == status
for term_status
in ('shutting-down', 'terminated') ])
def _to_nodes(self, object, xpath):
return [ self._to_node(el)
for el in object.findall(self._fixxpath(xpath)) ]
def _to_node(self, element):
try:
state = self.NODE_STATE_MAP[
self._findattr(element, "instanceState/name")
]
except KeyError:
state = NodeState.UNKNOWN
n = Node(
id=self._findtext(element, 'instanceId'),
name=self._findtext(element, 'instanceId'),
state=state,
public_ip=[self._findtext(element, 'dnsName')],
private_ip=[self._findtext(element, 'privateDnsName')],
driver=self.connection.driver,
extra={
'dns_name': self._findattr(element, "dnsName"),
'instanceId': self._findattr(element, "instanceId"),
'imageId': self._findattr(element, "imageId"),
'private_dns': self._findattr(element, "privateDnsName"),
'status': self._findattr(element, "instanceState/name"),
'keyname': self._findattr(element, "keyName"),
'launchindex': self._findattr(element, "amiLaunchIndex"),
'productcode':
[p.text for p in self._findall(
element, "productCodesSet/item/productCode"
)],
'instancetype': self._findattr(element, "instanceType"),
'launchdatetime': self._findattr(element, "launchTime"),
'availability': self._findattr(element,
"placement/availabilityZone"),
'kernelid': self._findattr(element, "kernelId"),
'ramdiskid': self._findattr(element, "ramdiskId")
}
)
return n
def _to_images(self, object):
return [ self._to_image(el)
for el in object.findall(
self._fixxpath('imagesSet/item')
) ]
def _to_image(self, element):
n = NodeImage(id=self._findtext(element, 'imageId'),
name=self._findtext(element, 'imageLocation'),
driver=self.connection.driver)
return n
def list_nodes(self):
params = {'Action': 'DescribeInstances' }
nodes = self._to_nodes(
self.connection.request('/', params=params).object,
'reservationSet/item/instancesSet/item')
return nodes
def list_sizes(self, location=None):
return [ NodeSize(driver=self.connection.driver, **i)
for i in self._instance_types.values() ]
def list_images(self, location=None):
params = {'Action': 'DescribeImages'}
images = self._to_images(
self.connection.request('/', params=params).object
)
return images
def create_security_group(self, name, description):
params = {'Action': 'CreateSecurityGroup',
'GroupName': name,
'GroupDescription': description}
return self.connection.request('/', params=params).object
def authorize_security_group_permissive(self, name):
results = []
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': 'tcp',
'FromPort': '0',
'ToPort': '65535',
'CidrIp': '0.0.0.0/0'}
try:
results.append(
self.connection.request('/', params=params.copy()).object
)
except Exception, e:
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params['IpProtocol'] = 'udp'
try:
results.append(
self.connection.request('/', params=params.copy()).object
)
except Exception, e:
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'})
try:
results.append(
self.connection.request('/', params=params.copy()).object
)
except Exception, e:
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
return results
def create_node(self, **kwargs):
"""Create a new EC2 node
See L{NodeDriver.create_node} for more keyword args.
Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com]
@keyword name: Name (unused by EC2)
@type name: C{str}
@keyword mincount: Minimum number of instances to launch
@type mincount: C{int}
@keyword maxcount: Maximum number of instances to launch
@type maxcount: C{int}
@keyword securitygroup: Name of security group
@type securitygroup: C{str}
@keyword keyname: The name of the key pair
@type keyname: C{str}
@keyword userdata: User data
@type userdata: C{str}
"""
name = kwargs["name"]
image = kwargs["image"]
size = kwargs["size"]
params = {
'Action': 'RunInstances',
'ImageId': image.id,
'MinCount': kwargs.get('mincount','1'),
'MaxCount': kwargs.get('maxcount','1'),
'InstanceType': size.id
}
if 'securitygroup' in kwargs:
params['SecurityGroup'] = kwargs['securitygroup']
if 'keyname' in kwargs:
params['KeyName'] = kwargs['keyname']
if 'userdata' in kwargs:
params['UserData'] = base64.b64encode(kwargs['userdata'])
object = self.connection.request('/', params=params).object
nodes = self._to_nodes(object, 'instancesSet/item')
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def reboot_node(self, node):
"""
Reboot the node by passing in the node object
"""
params = {'Action': 'RebootInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request('/', params=params).object
return self._get_boolean(res)
def destroy_node(self, node):
"""
Destroy node by passing in the node object
"""
params = {'Action': 'TerminateInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request('/', params=params).object
return self._get_terminate_boolean(res)
def list_locations(self):
return [NodeLocation(0, 'Amazon US N. Virginia', 'US', self)]
class EC2EUConnection(EC2Connection):
host = EC2_EU_WEST_HOST
class EC2EUNodeDriver(EC2NodeDriver):
connectionCls = EC2EUConnection
_instance_types = EC2_EU_WEST_INSTANCE_TYPES
def list_locations(self):
return [NodeLocation(0, 'Amazon Europe Ireland', 'IE', self)]
class EC2USWestConnection(EC2Connection):
host = EC2_US_WEST_HOST
class EC2USWestNodeDriver(EC2NodeDriver):
connectionCls = EC2USWestConnection
_instance_types = EC2_US_WEST_INSTANCE_TYPES
def list_locations(self):
return [NodeLocation(0, 'Amazon US N. California', 'US', self)]
|
|
#!/usr/bin/python
# The following is so that insights-client continues to work normally in places where
# Docker is not installed.
#
# Note that this is actually testing if the python docker client is importable (is installed),
# and if the docker server on this machine is accessable, which isn't exactly the
# same thing as 'there is no docker on this machine'.
import os
import logging
import shlex
import subprocess
import sys
from constants import InsightsConstants as constants
from config import CONFIG as config
APP_NAME = constants.app_name
logger = logging.getLogger(__name__)
def run_command_very_quietly(cmdline): # shhhhhhhh
# this takes a string (not an array)
# need to redirect stdout and stderr to /dev/null
with open(os.devnull, 'w') as devnull:
cmd = shlex.split(cmdline.encode('utf8'))
proc = subprocess.Popen(cmd, stdout=devnull, stderr=subprocess.STDOUT)
returncode = proc.wait()
return returncode
# only run docker commands OR atomic commands
# defaults to atomic (if present)
UseAtomic = True
UseDocker = False
# Check to see if we have access to docker
HaveDocker = False
HaveDockerException = None
try:
if run_command_very_quietly("which docker") == 0:
# a returncode of 0 means cmd ran correctly
HaveDocker = True
except Exception as e:
HaveDockerException = e
# Check to see if we have access to Atomic through the 'atomic' command
HaveAtomic = False
HaveAtomicException = None
try:
if run_command_very_quietly("which atomic") == 0:
# a returncode of 0 means cmd ran correctly
HaveAtomic = True
else:
# anything else indicates problem
HaveAtomic = False
except Exception as e:
# this happens when atomic isn't installed or is otherwise unrunable
HaveAtomic = False
HaveAtomicException = e
HaveAtomicMount = HaveAtomic
# failsafe for if atomic / docker is/isnt present
if not HaveAtomic and HaveDocker:
UseDocker = True
UseAtomic = False
if not HaveDocker and HaveAtomic:
UseAtomic = True
UseDocker = False
# force atomic or docker
if config['use_docker']:
UseAtomic = False
UseDocker = True
if config['use_atomic']:
UseAtomic = True
UseDocker = False
# Check if docker is running
DockerIsRunning = False
try:
if run_command_very_quietly("docker info") == 0:
# a returncode of 0 means cmd ran correctly
DockerIsRunning = True
except Exception as e:
HaveDockerException = e
# if HaveDocker:
if ((DockerIsRunning and UseDocker and HaveDocker) or
(DockerIsRunning and UseAtomic and HaveAtomic)):
import tempfile
import shutil
import json
def runcommand(cmd):
# this takes an array (not a string)
logger.debug("Running Command: %s" % cmd)
proc = subprocess.Popen(cmd)
returncode = proc.wait()
return returncode
def run_command_capture_output(cmdline):
cmd = shlex.split(cmdline.encode('utf8'))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
return out
def use_atomic_run():
return UseAtomic and HaveAtomic
def use_atomic_mount():
return UseAtomic and HaveAtomicMount
def pull_image(image):
return runcommand(shlex.split("docker pull") + [image])
def get_targets():
targets = []
logger.debug('Getting targets to scan...')
for d in _docker_all_image_ids():
logger.debug('Checking if %s equals %s.' % (d, config['analyze_image_id']))
# pull the sha256: off the id to compare short IDs
if (config['analyze_image_id'] == d or
d.split('sha256:')[-1].startswith(config['analyze_image_id'])):
logger.debug('%s equals %s' % (d, config['analyze_image_id']))
targets.append({'type': 'docker_image', 'name': d})
return targets # return the first one that matches
for d in _docker_all_container_ids():
logger.debug('Checking if %s equals %s.' % (d, config['analyze_image_id']))
if config['analyze_image_id'] == d or d.startswith(config['analyze_image_id']):
logger.debug('%s equals %s' % (d, config['analyze_image_id']))
targets.append({'type': 'docker_container', 'name': d})
return targets # return the first one that matches
logger.debug('Done collecting targets')
logger.debug(targets)
if len(targets) == 0:
logger.error("There was an error collecting targets. No image or container was found matching this ID.")
sys.exit(constants.sig_kill_bad)
return targets
def docker_display_name(docker_name, docker_type):
inspect = _docker_inspect_image(docker_name, docker_type)
if not inspect:
return docker_name
if docker_type == 'image':
try:
display_name = inspect['RepoTags'][0]
except LookupError:
display_name = docker_name
if docker_type == 'container':
display_name = inspect['Name'].lstrip('/')
return display_name
def container_image_links():
from insights_client.utilities import generate_analysis_target_id
link_dict = {}
if UseAtomic:
docker_atomic = "atomic"
else:
docker_atomic = "docker"
ps_output = run_command_capture_output(docker_atomic + " ps --no-trunc --all")
ps_data = ps_output.splitlines()
ps_data.pop(0) # remove heading
for l in ps_data:
elements = l.split()
c_id = elements[0]
i_id = elements[1]
link_dict[c_id] = [{'system_id': generate_analysis_target_id('docker_image', i_id),
'type': 'image'}]
if i_id not in link_dict:
link_dict[i_id] = []
link_dict[i_id].append({'system_id': generate_analysis_target_id('docker_container', c_id),
'type': 'container'})
return link_dict
class AtomicTemporaryMountPoint:
# this is used for both images and containers
def __init__(self, image_id, mount_point):
self.image_id = image_id
self.mount_point = mount_point
def get_fs(self):
return self.mount_point
def close(self):
try:
logger.debug("Closing Id %s On %s" % (self.image_id, self.mount_point))
runcommand(shlex.split("atomic unmount") + [self.mount_point])
except Exception as e:
logger.debug("exception while unmounting image or container: %s" % e)
shutil.rmtree(self.mount_point, ignore_errors=True)
from mount import DockerMount, Mount
class DockerTemporaryMountPoint:
# this is used for both images and containers
def __init__(self, driver, image_id, mount_point, cid):
self.driver = driver
self.image_id = image_id
self.mount_point = mount_point
self.cid = cid
def get_fs(self):
return self.mount_point
def close(self):
try:
logger.debug("Closing Id %s On %s" % (self.image_id, self.mount_point))
# If using device mapper, unmount the bind-mount over the directory
if self.driver == 'devicemapper':
Mount.unmount_path(self.mount_point)
DockerMount(self.mount_point).unmount(self.cid)
except Exception as e:
logger.debug("exception while unmounting image or container: %s" % e)
shutil.rmtree(self.mount_point, ignore_errors=True)
def open_image(image_id):
global HaveAtomicException
if HaveAtomicException and UseAtomic:
logger.debug("atomic is either not installed or not accessable %s" %
HaveAtomicException)
HaveAtomicException = None
if use_atomic_mount():
mount_point = tempfile.mkdtemp()
logger.debug("Opening Image Id %s On %s using atomic" % (image_id, mount_point))
if runcommand(shlex.split("atomic mount") + [image_id, mount_point]) == 0:
return AtomicTemporaryMountPoint(image_id, mount_point)
else:
logger.error('Could not mount Image Id %s On %s' % (image_id, mount_point))
shutil.rmtree(mount_point, ignore_errors=True)
return None
else:
driver = _docker_driver()
if driver is None:
return None
mount_point = tempfile.mkdtemp()
logger.debug("Opening Image Id %s On %s using docker client" % (image_id, mount_point))
# docker mount creates a temp image
# we have to use this temp image id to remove the device
mount_point, cid = DockerMount(mount_point).mount(image_id)
if driver == 'devicemapper':
DockerMount.mount_path(os.path.join(mount_point, "rootfs"), mount_point, bind=True)
if cid:
return DockerTemporaryMountPoint(driver, image_id, mount_point, cid)
else:
logger.error('Could not mount Image Id %s On %s' % (image_id, mount_point))
shutil.rmtree(mount_point, ignore_errors=True)
return None
def open_container(container_id):
global HaveAtomicException
if HaveAtomicException and UseAtomic:
logger.debug("atomic is either not installed or not accessable %s" %
HaveAtomicException)
HaveAtomicException = None
if use_atomic_mount():
mount_point = tempfile.mkdtemp()
logger.debug("Opening Container Id %s On %s using atomic" %
(container_id, mount_point))
if runcommand(shlex.split("atomic mount") + [container_id, mount_point]) == 0:
return AtomicTemporaryMountPoint(container_id, mount_point)
else:
logger.error('Could not mount Container Id %s On %s' % (container_id, mount_point))
shutil.rmtree(mount_point, ignore_errors=True)
return None
else:
driver = _docker_driver()
if driver is None:
return None
mount_point = tempfile.mkdtemp()
logger.debug("Opening Container Id %s On %s using docker client" %
(container_id, mount_point))
# docker mount creates a temp image
# we have to use this temp image id to remove the device
mount_point, cid = DockerMount(mount_point).mount(container_id)
if driver == 'devicemapper':
DockerMount.mount_path(os.path.join(mount_point, "rootfs"), mount_point, bind=True)
if cid:
return DockerTemporaryMountPoint(driver, container_id, mount_point, cid)
else:
logger.error('Could not mount Container Id %s On %s' % (container_id, mount_point))
shutil.rmtree(mount_point, ignore_errors=True)
return None
def _docker_inspect_image(docker_name, docker_type):
a = json.loads(run_command_capture_output(
"docker inspect --type %s %s" % (docker_type, docker_name)))
if len(a) == 0:
return None
else:
return a[0]
def _docker_driver():
x = "Storage Driver:"
if UseAtomic:
atomic_docker = "atomic"
else:
atomic_docker = "docker"
for each in run_command_capture_output(atomic_docker + " info").splitlines():
if each.startswith(x):
return each[len(x):].strip()
return ""
def _docker_all_image_ids():
l = []
# why are we running docker images here and not atomic images?
if UseAtomic:
atomic_docker = "atomic images list"
else:
atomic_docker = "docker images"
for each in run_command_capture_output(atomic_docker + " --quiet --no-trunc").splitlines():
if each not in l:
l.append(each)
return l
def _docker_all_container_ids():
l = []
if UseAtomic:
atomic_docker = "atomic"
else:
atomic_docker = "docker"
for each in run_command_capture_output(atomic_docker + " ps --all --quiet --no-trunc").splitlines():
if each not in l:
l.append(each)
return l
else:
# If we can't import docker or atomic then we stub out all the main functions to report errors
if UseAtomic:
the_verbiage = "Atomic"
the_exception = HaveAtomicException
else:
the_verbiage = "Docker"
the_exception = HaveDockerException
def get_targets():
logger.error('Could not connect to ' + the_verbiage + ' to collect from images and containers')
logger.error(the_verbiage + ' is either not installed or not accessable: %s' %
(the_exception if the_exception else ''))
return []
def open_image(image_id):
logger.error('Could not connect to ' + the_verbiage + ' to examine image %s' % image_id)
logger.error(the_verbiage + ' is either not installed or not accessable: %s' %
(the_exception if the_exception else ''))
return None
def open_container(container_id):
logger.error('Could not connect to ' + the_verbiage + ' to examine container %s' % container_id)
logger.error(the_verbiage + ' is either not installed or not accessable: %s' %
(the_exception if the_exception else ''))
return None
def docker_display_name(image_id):
logger.error('Could not connect to ' + the_verbiage + ' to examine image %s' % image_id)
logger.error(the_verbiage + ' is either not installed or not accessable: %s' %
(the_exception if the_exception else ''))
return None
def container_image_links():
logger.error('Could not connect to ' + the_verbiage + '.')
logger.error(the_verbiage + ' is either not installed or not accessable: %s' %
(the_exception if the_exception else ''))
return None
#
# JSON data has lots of nested dictionaries, that are often optional.
#
# so for example you want to write:
#
# foo = d['meta_specs']['uploader_log']['something_else']
#
# but d might not have 'meta_specs' and that might not have 'uploader_log' and ...
# so write this instead
#
# idx = ('meta_specs','uploader_log','something_else')
# if dictmultihas(d, idx):
# foo = dictmultiget(d, idx)
# else:
# ....
#
def dictmultihas(d, idx):
# 'idx' is a tuple of strings, indexing into 'd'
# if d doesn't have these indexes, return False
for each in idx[:-1]:
if d and each in d:
d = d[each]
if d and len(idx) > 0 and idx[-1] in d:
return True
else:
return False
def dictmultiget(d, idx):
# 'idx' is a tuple of strings, indexing into 'd'
for each in idx[:-1]:
d = d[each]
return d[idx[-1]]
|
|
"""
Test Suites
-----------
Provides a LazySuite, which is a suite whose test list is a generator
function, and ContextSuite,which can run fixtures (setup/teardown
functions or methods) for the context that contains its tests.
"""
from __future__ import generators
import logging
import sys
import unittest
from nose.case import Test
from nose.config import Config
from nose.proxy import ResultProxyFactory
from nose.util import isclass, resolve_name, try_run
if sys.platform == 'cli':
if sys.version_info[:2] < (2, 6):
import clr
clr.AddReference("IronPython")
from IronPython.Runtime.Exceptions import StringException
else:
class StringException(Exception):
pass
log = logging.getLogger(__name__)
#log.setLevel(logging.DEBUG)
# Singleton for default value -- see ContextSuite.__init__ below
_def = object()
def _strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
class MixedContextError(Exception):
"""Error raised when a context suite sees tests from more than
one context.
"""
pass
class LazySuite(unittest.TestSuite):
"""A suite that may use a generator as its list of tests
"""
def __init__(self, tests=()):
"""Initialize the suite. tests may be an iterable or a generator
"""
self._set_tests(tests)
def __iter__(self):
return iter(self._tests)
def __repr__(self):
return "<%s tests=generator (%s)>" % (
_strclass(self.__class__), id(self))
def __hash__(self):
return object.__hash__(self)
__str__ = __repr__
def addTest(self, test):
self._precache.append(test)
# added to bypass run changes in 2.7's unittest
def run(self, result):
for test in self._tests:
if result.shouldStop:
break
test(result)
return result
def __nonzero__(self):
log.debug("tests in %s?", id(self))
if self._precache:
return True
if self.test_generator is None:
return False
try:
test = self.test_generator.next()
if test is not None:
self._precache.append(test)
return True
except StopIteration:
pass
return False
def _get_tests(self):
log.debug("precache is %s", self._precache)
for test in self._precache:
yield test
if self.test_generator is None:
return
for test in self.test_generator:
yield test
def _set_tests(self, tests):
self._precache = []
is_suite = isinstance(tests, unittest.TestSuite)
if callable(tests) and not is_suite:
self.test_generator = tests()
elif is_suite:
# Suites need special treatment: they must be called like
# tests for their setup/teardown to run (if any)
self.addTests([tests])
self.test_generator = None
else:
self.addTests(tests)
self.test_generator = None
_tests = property(_get_tests, _set_tests, None,
"Access the tests in this suite. Access is through a "
"generator, so iteration may not be repeatable.")
class ContextSuite(LazySuite):
"""A suite with context.
A ContextSuite executes fixtures (setup and teardown functions or
methods) for the context containing its tests.
The context may be explicitly passed. If it is not, a context (or
nested set of contexts) will be constructed by examining the tests
in the suite.
"""
failureException = unittest.TestCase.failureException
was_setup = False
was_torndown = False
classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll',
'setUpClass', 'setUpAll')
classTeardown = ('teardown_class', 'teardown_all', 'teardownClass',
'teardownAll', 'tearDownClass', 'tearDownAll')
moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup',
'setUp')
moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule',
'teardown', 'tearDown')
packageSetup = ('setup_package', 'setupPackage', 'setUpPackage')
packageTeardown = ('teardown_package', 'teardownPackage',
'tearDownPackage')
def __init__(self, tests=(), context=None, factory=None,
config=None, resultProxy=None, can_split=True):
log.debug("Context suite for %s (%s) (%s)", tests, context, id(self))
self.context = context
self.factory = factory
if config is None:
config = Config()
self.config = config
self.resultProxy = resultProxy
self.has_run = False
self.can_split = can_split
self.error_context = None
LazySuite.__init__(self, tests)
def __repr__(self):
return "<%s context=%s>" % (
_strclass(self.__class__),
getattr(self.context, '__name__', self.context))
__str__ = __repr__
def id(self):
if self.error_context:
return '%s:%s' % (repr(self), self.error_context)
else:
return repr(self)
def __hash__(self):
return object.__hash__(self)
# 2.3 compat -- force 2.4 call sequence
def __call__(self, *arg, **kw):
return self.run(*arg, **kw)
def exc_info(self):
"""Hook for replacing error tuple output
"""
return sys.exc_info()
def _exc_info(self):
"""Bottleneck to fix up IronPython string exceptions
"""
e = self.exc_info()
if sys.platform == 'cli':
if isinstance(e[0], StringException):
# IronPython throws these StringExceptions, but
# traceback checks type(etype) == str. Make a real
# string here.
e = (str(e[0]), e[1], e[2])
return e
def run(self, result):
"""Run tests in suite inside of suite fixtures.
"""
# proxy the result for myself
log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
#import pdb
#pdb.set_trace()
if self.resultProxy:
result, orig = self.resultProxy(result, self), result
else:
result, orig = result, result
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
self.error_context = 'setup'
result.addError(self, self._exc_info())
return
try:
for test in self._tests:
if result.shouldStop:
log.debug("stopping")
break
# each nose.case.Test will create its own result proxy
# so the cases need the original result, to avoid proxy
# chains
test(orig)
finally:
self.has_run = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
self.error_context = 'teardown'
result.addError(self, self._exc_info())
def hasFixtures(self, ctx_callback=None):
context = self.context
if context is None:
return False
if self.implementsAnyFixture(context, ctx_callback=ctx_callback):
return True
# My context doesn't have any, but its ancestors might
factory = self.factory
if factory:
ancestors = factory.context.get(self, [])
for ancestor in ancestors:
if self.implementsAnyFixture(
ancestor, ctx_callback=ctx_callback):
return True
return False
def implementsAnyFixture(self, context, ctx_callback):
if isclass(context):
names = self.classSetup + self.classTeardown
else:
names = self.moduleSetup + self.moduleTeardown
if hasattr(context, '__path__'):
names += self.packageSetup + self.packageTeardown
# If my context has any fixture attribute, I have fixtures
fixt = False
for m in names:
if hasattr(context, m):
fixt = True
break
if ctx_callback is None:
return fixt
return ctx_callback(context, fixt)
def setUp(self):
log.debug("suite %s setUp called, tests: %s", id(self), self._tests)
if not self:
# I have no tests
log.debug("suite %s has no tests", id(self))
return
if self.was_setup:
log.debug("suite %s already set up", id(self))
return
context = self.context
if context is None:
return
# before running my own context's setup, I need to
# ask the factory if my context's contexts' setups have been run
factory = self.factory
if factory:
# get a copy, since we'll be destroying it as we go
ancestors = factory.context.get(self, [])[:]
while ancestors:
ancestor = ancestors.pop()
log.debug("ancestor %s may need setup", ancestor)
if ancestor in factory.was_setup:
continue
log.debug("ancestor %s does need setup", ancestor)
self.setupContext(ancestor)
if not context in factory.was_setup:
self.setupContext(context)
else:
self.setupContext(context)
self.was_setup = True
log.debug("completed suite setup")
def setupContext(self, context):
self.config.plugins.startContext(context)
log.debug("%s setup context %s", self, context)
if self.factory:
if context in self.factory.was_setup:
return
# note that I ran the setup for this context, so that I'll run
# the teardown in my teardown
self.factory.was_setup[context] = self
if isclass(context):
names = self.classSetup
else:
names = self.moduleSetup
if hasattr(context, '__path__'):
names = self.packageSetup + names
try_run(context, names)
def shortDescription(self):
if self.context is None:
return "test suite"
return "test suite for %s" % self.context
def tearDown(self):
log.debug('context teardown')
if not self.was_setup or self.was_torndown:
log.debug(
"No reason to teardown (was_setup? %s was_torndown? %s)"
% (self.was_setup, self.was_torndown))
return
self.was_torndown = True
context = self.context
if context is None:
log.debug("No context to tear down")
return
# for each ancestor... if the ancestor was setup
# and I did the setup, I can do teardown
factory = self.factory
if factory:
ancestors = factory.context.get(self, []) + [context]
for ancestor in ancestors:
log.debug('ancestor %s may need teardown', ancestor)
if not ancestor in factory.was_setup:
log.debug('ancestor %s was not setup', ancestor)
continue
if ancestor in factory.was_torndown:
log.debug('ancestor %s already torn down', ancestor)
continue
setup = factory.was_setup[ancestor]
log.debug("%s setup ancestor %s", setup, ancestor)
if setup is self:
self.teardownContext(ancestor)
else:
self.teardownContext(context)
def teardownContext(self, context):
log.debug("%s teardown context %s", self, context)
if self.factory:
if context in self.factory.was_torndown:
return
self.factory.was_torndown[context] = self
if isclass(context):
names = self.classTeardown
else:
names = self.moduleTeardown
if hasattr(context, '__path__'):
names = self.packageTeardown + names
try_run(context, names)
self.config.plugins.stopContext(context)
# FIXME the wrapping has to move to the factory?
def _get_wrapped_tests(self):
for test in self._get_tests():
if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
yield test
else:
yield Test(test,
config=self.config,
resultProxy=self.resultProxy)
_tests = property(_get_wrapped_tests, LazySuite._set_tests, None,
"Access the tests in this suite. Tests are returned "
"inside of a context wrapper.")
class ContextSuiteFactory(object):
"""Factory for ContextSuites. Called with a collection of tests,
the factory decides on a hierarchy of contexts by introspecting
the collection or the tests themselves to find the objects
containing the test objects. It always returns one suite, but that
suite may consist of a hierarchy of nested suites.
"""
suiteClass = ContextSuite
def __init__(self, config=None, suiteClass=None, resultProxy=_def):
if config is None:
config = Config()
self.config = config
if suiteClass is not None:
self.suiteClass = suiteClass
# Using a singleton to represent default instead of None allows
# passing resultProxy=None to turn proxying off.
if resultProxy is _def:
resultProxy = ResultProxyFactory(config=config)
self.resultProxy = resultProxy
self.suites = {}
self.context = {}
self.was_setup = {}
self.was_torndown = {}
def __call__(self, tests, **kw):
"""Return ``ContextSuite`` for tests. ``tests`` may either
be a callable (in which case the resulting ContextSuite will
have no parent context and be evaluated lazily) or an
iterable. In that case the tests will wrapped in
nose.case.Test, be examined and the context of each found and a
suite of suites returned, organized into a stack with the
outermost suites belonging to the outermost contexts.
"""
log.debug("Create suite for %s", tests)
context = kw.pop('context', getattr(tests, 'context', None))
log.debug("tests %s context %s", tests, context)
if context is None:
tests = self.wrapTests(tests)
try:
context = self.findContext(tests)
except MixedContextError:
return self.makeSuite(self.mixedSuites(tests), None, **kw)
return self.makeSuite(tests, context, **kw)
def ancestry(self, context):
"""Return the ancestry of the context (that is, all of the
packages and modules containing the context), in order of
descent with the outermost ancestor last.
This method is a generator.
"""
log.debug("get ancestry %s", context)
if context is None:
return
# Methods include reference to module they are defined in, we
# don't want that, instead want the module the class is in now
# (classes are re-ancestored elsewhere).
if hasattr(context, 'im_class'):
context = context.im_class
elif hasattr(context, '__self__'):
context = context.__self__.__class__
if hasattr(context, '__module__'):
ancestors = context.__module__.split('.')
elif hasattr(context, '__name__'):
ancestors = context.__name__.split('.')[:-1]
else:
raise TypeError("%s has no ancestors?" % context)
while ancestors:
log.debug(" %s ancestors %s", context, ancestors)
yield resolve_name('.'.join(ancestors))
ancestors.pop()
def findContext(self, tests):
if callable(tests) or isinstance(tests, unittest.TestSuite):
return None
context = None
for test in tests:
# Don't look at suites for contexts, only tests
ctx = getattr(test, 'context', None)
if ctx is None:
continue
if context is None:
context = ctx
elif context != ctx:
raise MixedContextError(
"Tests with different contexts in same suite! %s != %s"
% (context, ctx))
return context
def makeSuite(self, tests, context, **kw):
suite = self.suiteClass(
tests, context=context, config=self.config, factory=self,
resultProxy=self.resultProxy, **kw)
if context is not None:
self.suites.setdefault(context, []).append(suite)
self.context.setdefault(suite, []).append(context)
log.debug("suite %s has context %s", suite,
getattr(context, '__name__', None))
for ancestor in self.ancestry(context):
self.suites.setdefault(ancestor, []).append(suite)
self.context[suite].append(ancestor)
log.debug("suite %s has ancestor %s", suite, ancestor.__name__)
return suite
def mixedSuites(self, tests):
"""The complex case where there are tests that don't all share
the same context. Groups tests into suites with common ancestors,
according to the following (essentially tail-recursive) procedure:
Starting with the context of the first test, if it is not
None, look for tests in the remaining tests that share that
ancestor. If any are found, group into a suite with that
ancestor as the context, and replace the current suite with
that suite. Continue this process for each ancestor of the
first test, until all ancestors have been processed. At this
point if any tests remain, recurse with those tests as the
input, returning a list of the common suite (which may be the
suite or test we started with, if no common tests were found)
plus the results of recursion.
"""
if not tests:
return []
head = tests.pop(0)
if not tests:
return [head] # short circuit when none are left to combine
suite = head # the common ancestry suite, so far
tail = tests[:]
context = getattr(head, 'context', None)
if context is not None:
ancestors = [context] + [a for a in self.ancestry(context)]
for ancestor in ancestors:
common = [suite] # tests with ancestor in common, so far
remain = [] # tests that remain to be processed
for test in tail:
found_common = False
test_ctx = getattr(test, 'context', None)
if test_ctx is None:
remain.append(test)
continue
if test_ctx is ancestor:
common.append(test)
continue
for test_ancestor in self.ancestry(test_ctx):
if test_ancestor is ancestor:
common.append(test)
found_common = True
break
if not found_common:
remain.append(test)
if common:
suite = self.makeSuite(common, ancestor)
tail = self.mixedSuites(remain)
return [suite] + tail
def wrapTests(self, tests):
log.debug("wrap %s", tests)
if callable(tests) or isinstance(tests, unittest.TestSuite):
log.debug("I won't wrap")
return tests
wrapped = []
for test in tests:
log.debug("wrapping %s", test)
if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
wrapped.append(test)
elif isinstance(test, ContextList):
wrapped.append(self.makeSuite(test, context=test.context))
else:
wrapped.append(
Test(test, config=self.config, resultProxy=self.resultProxy)
)
return wrapped
class ContextList(object):
"""Not quite a suite -- a group of tests in a context. This is used
to hint the ContextSuiteFactory about what context the tests
belong to, in cases where it may be ambiguous or missing.
"""
def __init__(self, tests, context=None):
self.tests = tests
self.context = context
def __iter__(self):
return iter(self.tests)
class FinalizingSuiteWrapper(unittest.TestSuite):
"""Wraps suite and calls final function after suite has
executed. Used to call final functions in cases (like running in
the standard test runner) where test running is not under nose's
control.
"""
def __init__(self, suite, finalize):
self.suite = suite
self.finalize = finalize
def __call__(self, *arg, **kw):
return self.run(*arg, **kw)
# 2.7 compat
def __iter__(self):
return iter(self.suite)
def run(self, *arg, **kw):
try:
return self.suite(*arg, **kw)
finally:
self.finalize(*arg, **kw)
# backwards compat -- sort of
class TestDir:
def __init__(*arg, **kw):
raise NotImplementedError(
"TestDir is not usable with nose 0.10. The class is present "
"in nose.suite for backwards compatibility purposes but it "
"may not be used.")
class TestModule:
def __init__(*arg, **kw):
raise NotImplementedError(
"TestModule is not usable with nose 0.10. The class is present "
"in nose.suite for backwards compatibility purposes but it "
"may not be used.")
|
|
"""Utility functions for the plugins table."""
import difflib
import random
import re
import sys
import rethinkdb as r
from slugify import slugify
import db.util
r_conn = db.util.r_conn
class RequiredProperty(object):
pass
_ROW_SCHEMA = {
# Primary key. Human-readable permalink for a plugin. Eg. 'python-2'
'slug': RequiredProperty(),
# A name used strictly for purposes of associating info from different
# sources together. Eg. "nerdtree" (instead of "the-NERD-Tree.vim")
'normalized_name': '',
# One of the IDs from db/categories.yaml
# Eg. 'language'
'category': 'uncategorized',
# eg. ['C/C++', 'autocomplete']
'tags': [],
# Unix timestamp in seconds
'created_at': 0,
'updated_at': 0,
###########################################################################
# Info from the script on vim.org.
# eg. http://www.vim.org/scripts/script.php?script_id=2736
# Eg. '1234' (string)
'vimorg_id': '',
# Eg. 'Syntastic'
'vimorg_name': '',
# Eg. 'Marty Grenfell'
'vimorg_author': '',
# eg. 'http://www.vim.org/scripts/script.php?script_id=2736'
'vimorg_url': '',
# eg. 'utility'
'vimorg_type': '',
'vimorg_rating': 0,
'vimorg_num_raters': 0,
'vimorg_downloads': 0,
'vimorg_short_desc': '',
'vimorg_long_desc': '',
'vimorg_install_details': '',
###########################################################################
# Info from the author's GitHub repo (eg. github.com/scrooloose/syntastic)
# The unique identifier of a GitHub repo that's preserved on name changes
# or owner transfers. eg. '123567'
'github_repo_id': '',
# eg. 'scrooloose'
'github_owner': '',
# eg. 'syntastic'
'github_repo_name': '',
# Eg. 'Martin Grenfell'
'github_author': '',
'github_stars': 0,
# eg. 'Syntax checking hacks for vim'
'github_short_desc': '',
# eg. 'http://valloric.github.io/YouCompleteMe/'
'github_homepage': '',
'github_readme': '',
'github_readme_filename': '',
###########################################################################
# Info from the github.com/vim-scripts mirror.
# eg. github.com/vim-scripts/Syntastic
# Eg. 'syntastic'
'github_vim_scripts_repo_name': '',
'github_vim_scripts_stars': 0,
###########################################################################
# Info derived from elsewhere
# Number of Vundle/Pathogen/NeoBundle etc. users that reference the
# author's GitHub repo.
'github_bundles': 0,
# Number of Vundle/Pathogen/NeoBundle etc. users that reference the
# vim-scripts GitHub mirror.
'github_vim_scripts_bundles': 0,
}
# Reserve some slug names for potential pages in case we want to be able to
# link to plugins top-level, as in vimawesome.com/:slug
_RESERVED_SLUGS = set([
'plugins',
'plugin',
'p',
'tags',
'tag',
't',
'about',
'submit',
'upload',
'search',
'faq',
'blog',
])
###############################################################################
# Routines for basic DB CRUD operations.
_GITHUB_REPO_URL_TEMPLATE = 'https://github.com/%s/%s'
def ensure_table():
db.util.ensure_table('plugins', primary_key='slug')
db.util.ensure_index('plugins', 'vimorg_id')
db.util.ensure_index('plugins', 'github_stars')
db.util.ensure_index('plugins', 'normalized_name')
db.util.ensure_index('plugins', 'github_repo_id')
db.util.ensure_index('plugins', 'github_owner_repo',
lambda p: [p['github_owner'], p['github_repo_name']])
# TODO(david): Yep, using an ODM enforcing a consistent schema on write AND
# read would be great.
def insert(plugins, *args, **kwargs):
"""Insert or update a plugin or list of plugins.
Although this would be more accurately named "upsert", this is a wrapper
around http://www.rethinkdb.com/api/python/#insert that ensures
a consistent plugin schema before inserting into DB.
"""
if not isinstance(plugins, list):
plugins = [plugins]
mapped_plugins = []
for plugin in plugins:
_normalize(plugin)
mapped_plugins.append(dict(_ROW_SCHEMA, **plugin))
return r.table('plugins').insert(mapped_plugins, *args, **kwargs).run(
r_conn())
def _normalize(plugin):
if not plugin.get('slug'):
plugin['slug'] = _generate_unique_slug(plugin)
if not plugin.get('normalized_name'):
plugin['normalized_name'] = _normalize_name(plugin)
# Normalize the GitHub URL properties
for key in ['github_owner', 'github_repo_name']:
# Vim.org plugins don't have GitHub info
if key in plugin:
plugin[key] = plugin[key].lower()
def _generate_unique_slug(plugin):
"""Create a unique, human-readable ID for this plugin that can be used in
a permalink URL.
WARNING: Not thread-safe.
"""
name = (plugin.get('vimorg_name') or plugin.get('github_repo_name') or
plugin.get('github_vim_scripts_repo_name'))
assert name
slug = slugify(name)
if not _slug_taken(slug):
return slug
# If the slug isn't unique, try appending different slug suffixes until we
# get a unique slug. Don't worry, these suffixes only show up in the URL.
# And it's more efficient to randomly permute these than using
# a monotonically increasing integer.
#
# Also this is just wayyyyyyyy more awesome than appending numbers. <3
slug_suffixes = [
'say-im-great',
'already-like-death',
'he-is-going',
'may-fear-less',
'are-made-of',
'is-written-on',
'lives-by-it',
'tree-and-truth',
'today-cannot-touch',
'face-rejection',
'hard-things',
'please-everybody',
'with-ourselves',
'frighten-you',
'it-has-you',
'hands-off',
'thing-itself',
'the-thing-itself',
'impatience-and-laziness',
'be-who-we-are',
'care-of-itself',
'would-can-one',
'left-unsaid',
'or-are-not',
'is-holy',
'the-heights',
'without-it',
'own-character',
'who-speaks',
'looking-forward',
]
random.shuffle(slug_suffixes)
for slug_suffix in slug_suffixes:
slug = slugify('%s-%s' % (name, slug_suffix))
if not _slug_taken(slug):
return slug
raise Exception('Uh oh, we need more song titles. Too many'
' collisions of %s' % name)
def _slug_taken(slug):
"""Returns whether a slug has already been used or is reserved."""
return bool(r.table('plugins').get(slug).run(r_conn())) or (
slug in _RESERVED_SLUGS)
def _normalize_name(plugin):
"""Returns a normalized name for a plugin that can be used for matching
against other similar names.
"""
name = (plugin.get('vimorg_name') or plugin.get('github_repo_name') or
plugin.get('github_vim_scripts_repo_name'))
assert name
# Remove anything including and after the first '--', which vim-scripts
# uses as a separator to append author name to get unique repo names.
name = name.split('--', 1)[0]
# Remove any trailing '.zip'
name = re.sub('\.zip$', '', name)
# Remove accents from chars, lowercases, and remove non-ASCII
name = slugify(name)
# Remove non-alphanumerics
name = re.sub(r'[\W_]+', '', name)
# Remove any number of leading {'vim', 'the'}, and any trailing 'vim'
name = re.sub('(?:^(vim|the)+|vim$)', '', name)
return name
def update_tags(plugin, tags):
"""Updates a plugin's tags to the given set, and updates aggregate tag
counts.
"""
plugin_tags = plugin.get('tags', [])
added_tags = set(tags) - set(plugin_tags)
removed_tags = set(plugin_tags) - set(tags)
# TODO(david): May have to hold a lock while doing this
map(db.tags.add_tag, added_tags)
map(db.tags.remove_tag, removed_tags)
plugin['tags'] = tags
def to_json(p):
"""Returns a JSON-compatible dict of a plugin that can be serialized and
sent to clients.
"""
name = (p['vimorg_name'] or p['github_repo_name'] or
p['github_vim_scripts_repo_name'])
author = (p['vimorg_author'].strip() or p['github_author'].strip())
plugin_manager_users = (p['github_bundles'] +
p['github_vim_scripts_bundles'])
short_desc = p['vimorg_short_desc']
if (p['github_owner'] and
p['github_stars'] >= p['github_vim_scripts_stars']):
github_url = _GITHUB_REPO_URL_TEMPLATE % (
p['github_owner'], p['github_repo_name'])
github_stars = p['github_stars']
short_desc = p['github_short_desc']
elif p['github_vim_scripts_repo_name']:
github_url = _GITHUB_REPO_URL_TEMPLATE % (
'vim-scripts', p['github_vim_scripts_repo_name'])
github_stars = p['github_vim_scripts_stars']
else:
github_url = None
github_stars = 0
plugin = dict(p, **{
'name': name,
'author': author,
'plugin_manager_users': plugin_manager_users,
'short_desc': short_desc,
'github_url': github_url,
'github_stars': github_stars,
})
return plugin
###############################################################################
# Routines for merging in data from scraped sources.
# TODO(david): Write a Craig-esque comment about how all this works.
def update_plugin(old_plugin, new_plugin):
"""Merges properties of new_plugin onto old_plugin, much like a dict
update.
This is used to reconcile differences of data that we might get from
multiple sources about the same plugin, such as from vim.org, vim-scripts
GitHub repo, and the author's original GitHub repo.
Does not mutate any arguments. Returns the updated plugin.
"""
updated_plugin = _merge_dict_except_none(old_plugin, new_plugin)
# Keep the latest updated date.
if old_plugin.get('updated_at') and new_plugin.get('updated_at'):
updated_plugin['updated_at'] = max(old_plugin['updated_at'],
new_plugin['updated_at'])
# Keep the earliest created date.
if old_plugin.get('created_at') and new_plugin.get('created_at'):
updated_plugin['created_at'] = min(old_plugin['created_at'],
new_plugin['created_at'])
return updated_plugin
def _merge_dict_except_none(dict_a, dict_b):
"""Returns dict_a updated with any key/value pairs from dict_b where the
value is not None.
Does not mutate arguments. Also, please don't drink and drive.
"""
dict_b_filtered = {k: v for k, v in dict_b.iteritems() if v is not None}
return dict(dict_a, **dict_b_filtered)
def _is_similar_author_name(name1, name2):
"""Returns whether two author names are similar enough that they're
probably the same person.
"""
def normalize_author_name(name):
# Remove accents from chars, lowercases, and remove non-ASCII
name = slugify(name)
# Remove non-alphanumerics
name = re.sub(r'[\W_]+', '', name)
return name
name1 = normalize_author_name(name1)
name2 = normalize_author_name(name2)
return difflib.SequenceMatcher(None, name1, name2).ratio() >= 0.6
def _find_matching_plugins(plugin_data, repo=None):
"""Attempts to find the matching plugin from the given data using various
heuristics.
Ideally, this would never return more than one matching plugin, but our
heuristics are not perfect and there are many similar vim.org plugins named
"python.vim," for example.
Arguments:
plugin_data: Scraped data about a plugin.
repo: (optional) If plugin_data is scraped from GitHub, the
corresponding github_repo document containing info about the GitHub
repo.
Returns:
A list of plugins that are likely to be the same as the given
plugin_data.
"""
# If we have a vimorg_id, then we have a direct key to a vim.org script
# if it's in DB.
if plugin_data.get('vimorg_id'):
query = r.table('plugins').get_all(plugin_data['vimorg_id'],
index='vimorg_id')
return list(query.run(r_conn()))
# If we have a (github_owner, github_repo_name) pair, try to match it with
# an existing github-scraped plugin.
if plugin_data.get('github_owner') and plugin_data.get('github_repo_name'):
github_owner_repo = [
plugin_data['github_owner'].lower(),
plugin_data['github_repo_name'].lower()]
query = r.table('plugins').get_all(
github_owner_repo,
index='github_owner_repo')
matching_plugins = list(query.run(r_conn()))
if matching_plugins:
return matching_plugins
# Ok, now we know we have a GitHub-scraped plugin that we haven't scraped
# before. Try to find an associated vim.org plugin.
normalized_name = _normalize_name(plugin_data)
# If there's a set of vim.org plugins that reference this GitHub repo, see
# if we find any with a similar name in that set.
if repo.get('from_vim_scripts'):
vimorg_ids = set(repo['from_vim_scripts'])
matching_plugins = list(r.table('plugins').get_all(
*list(vimorg_ids), index='vimorg_id').run(r_conn()))
# First, see if we get a normalized name match from that set.
normalized_name_matches = filter(lambda p:
p['normalized_name'] == normalized_name, matching_plugins)
if normalized_name_matches:
return normalized_name_matches
# If not, broaden the search to any matched plugin names that are
# slightly similar. This is for cases like 'vim-colors-solarized' -->
# 'solarized' or 'Python-mode-klen' --> 'python-mode'
matching_plugins = filter(lambda plugin: difflib.SequenceMatcher(None,
plugin['normalized_name'], normalized_name).ratio() >= 0.6,
matching_plugins)
if matching_plugins:
return matching_plugins
# Ok, last chance. Find a plugin with the same normalized name AND
# a similar author name among all plugins.
query = r.table('plugins').get_all(
normalized_name, index='normalized_name')
matching_plugins = list(query.run(r_conn()))
author = plugin_data['github_author']
assert author
return filter(lambda plugin: _is_similar_author_name(
plugin.get('vimorg_author', ''), author), matching_plugins)
def _are_plugins_different(p1, p2):
"""Returns whether two plugins should be two different DB rows."""
if (p1.get('vimorg_id') and p2.get('vimorg_id') and
p1['vimorg_id'] != p2['vimorg_id']):
return True
if (p1.get('github_owner') and p1.get('github_repo_name') and
p2.get('github_owner') and p2.get('github_repo_name') and
(p1['github_owner'].lower(), p1['github_repo_name'].lower()) !=
(p2['github_owner'].lower(), p2['github_repo_name'].lower())):
return True
return False
def _add_submission_data(plugin, submission):
"""Updates a plugin with info from a user submission."""
if (plugin.get('category', 'uncategorized') == 'uncategorized' and
submission.get('category', 'uncategorized') != 'uncategorized'):
plugin['category'] = submission['category']
if not plugin.get('tags') and submission.get('tags'):
update_tags(plugin, submission['tags'])
def add_scraped_data(plugin_data, repo=None, submission=None):
"""Adds scraped plugin data from either vim.org, a github.com/vim-scripts
repo, or an arbitrary GitHub repo.
This will attempt to match the plugin data with an existing plugin already
in the DB using various heuristics. If a reasonable match is found, we
update, else, we insert a new plugin.
Arguments:
plugin_data: Scraped data about a plugin.
repo: (optional) If plugin_data is scraped from GitHub, the
corresponding github_repo document containing info about the GitHub
repo.
submission: (optional) Associated user submission info for this plugin.
"""
if submission:
_add_submission_data(plugin_data, submission)
plugins = _find_matching_plugins(plugin_data, repo)
if len(plugins) == 1 and not _are_plugins_different(
plugins[0], plugin_data):
updated_plugin = update_plugin(plugins[0], plugin_data)
insert(updated_plugin, conflict='replace')
else:
insert(plugin_data)
print 'inserted new plugin %s ...' % plugin_data['slug'],
sys.stdout.flush()
###############################################################################
# Utility functions for powering the web search.
def get_search_index():
"""Returns a view of the plugins table that can be used for search.
More precisely, we return a sorted list of all plugins, with fields limited
to the set that need to be displayed in search results or needed for
filtering and sorting. A keywords field is added that can be matched on
user-given search keywords.
We perform a search on plugins loaded in-memory because this is a lot more
performant (20x-30x faster on my MBPr) than ReQL queries, and the ~5000
plugins fit comfortably into memory.
The return value of this function should be cached for these gains.
"""
query = r.table('plugins')
query = query.without(['vimorg_long_desc', 'vimorg_install_details',
'github_long_desc', 'github_readme'])
# Don't show plugin managers because they're not technically plugins, and
# also our usage counts for them are not all accurate.
plugin_manager_slugs = [
'vim-plug',
'vundle',
'neobundle-vim',
'neobundle-vim-back-to-december',
'pathogen-vim']
query = query.filter(
lambda row: r.expr(plugin_manager_slugs).contains(row['slug']).not_()
)
plugins = map(to_json, query.run(r_conn()))
# We can't order_by on multiple fields with secondary indexes due to the
# following RethinkDB bug: https://github.com/rethinkdb/docs/issues/160
# Thus, we sort in-memory for now because it's way faster than using
# Rethink's order_by w/o indices (~7 secs vs. ~0.012 secs on my MBPr).
# TODO(david): Pass sort ordering as an argument somehow.
plugins.sort(key=lambda p: (-p.get('plugin_manager_users', 0),
-p.get('github_stars', 0), -p.get('vimorg_rating', 0)))
for plugin in plugins:
tokens = _get_search_tokens_for_plugin(plugin)
plugin['keywords'] = ' '.join(sorted(tokens))
return plugins
def _get_search_tokens_for_plugin(plugin):
"""Returns a set of lowercased keywords generated from various fields on
the plugin that can be used for searching.
"""
search_fields = ['name', 'tags', 'vimorg_author', 'github_author',
'vimorg_short_desc', 'github_short_desc']
tokens = set()
for field in search_fields:
if field not in plugin:
continue
value = plugin[field]
if isinstance(value, basestring):
tokens_list = value.split()
elif isinstance(value, list):
tokens_list = value
elif value is None:
tokens_list = []
else:
raise Exception('Field %s has untokenizable type %s' % (
field, type(value)))
tokens |= set(t.lower() for t in tokens_list)
return tokens
|
|
import datetime
import hashlib
import heapq
import math
import os
import random
import re
import sys
import threading
import zlib
try:
from collections import Counter
except ImportError:
Counter = None
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from playhouse._sqlite_ext import TableFunction
except ImportError:
TableFunction = None
SQLITE_DATETIME_FORMATS = (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d',
'%H:%M:%S',
'%H:%M:%S.%f',
'%H:%M')
from peewee import format_date_time
def format_date_time_sqlite(date_value):
return format_date_time(date_value, SQLITE_DATETIME_FORMATS)
try:
from playhouse import _sqlite_udf as cython_udf
except ImportError:
cython_udf = None
# Group udf by function.
CONTROL_FLOW = 'control_flow'
DATE = 'date'
FILE = 'file'
HELPER = 'helpers'
MATH = 'math'
STRING = 'string'
AGGREGATE_COLLECTION = {}
TABLE_FUNCTION_COLLECTION = {}
UDF_COLLECTION = {}
class synchronized_dict(dict):
def __init__(self, *args, **kwargs):
super(synchronized_dict, self).__init__(*args, **kwargs)
self._lock = threading.Lock()
def __getitem__(self, key):
with self._lock:
return super(synchronized_dict, self).__getitem__(key)
def __setitem__(self, key, value):
with self._lock:
return super(synchronized_dict, self).__setitem__(key, value)
def __delitem__(self, key):
with self._lock:
return super(synchronized_dict, self).__delitem__(key)
STATE = synchronized_dict()
SETTINGS = synchronized_dict()
# Class and function decorators.
def aggregate(*groups):
def decorator(klass):
for group in groups:
AGGREGATE_COLLECTION.setdefault(group, [])
AGGREGATE_COLLECTION[group].append(klass)
return klass
return decorator
def table_function(*groups):
def decorator(klass):
for group in groups:
TABLE_FUNCTION_COLLECTION.setdefault(group, [])
TABLE_FUNCTION_COLLECTION[group].append(klass)
return klass
return decorator
def udf(*groups):
def decorator(fn):
for group in groups:
UDF_COLLECTION.setdefault(group, [])
UDF_COLLECTION[group].append(fn)
return fn
return decorator
# Register aggregates / functions with connection.
def register_aggregate_groups(db, *groups):
seen = set()
for group in groups:
klasses = AGGREGATE_COLLECTION.get(group, ())
for klass in klasses:
name = getattr(klass, 'name', klass.__name__)
if name not in seen:
seen.add(name)
db.register_aggregate(klass, name)
def register_table_function_groups(db, *groups):
seen = set()
for group in groups:
klasses = TABLE_FUNCTION_COLLECTION.get(group, ())
for klass in klasses:
if klass.name not in seen:
seen.add(klass.name)
db.register_table_function(klass)
def register_udf_groups(db, *groups):
seen = set()
for group in groups:
functions = UDF_COLLECTION.get(group, ())
for function in functions:
name = function.__name__
if name not in seen:
seen.add(name)
db.register_function(function, name)
def register_groups(db, *groups):
register_aggregate_groups(db, *groups)
register_table_function_groups(db, *groups)
register_udf_groups(db, *groups)
def register_all(db):
register_aggregate_groups(db, *AGGREGATE_COLLECTION)
register_table_function_groups(db, *TABLE_FUNCTION_COLLECTION)
register_udf_groups(db, *UDF_COLLECTION)
# Begin actual user-defined functions and aggregates.
# Scalar functions.
@udf(CONTROL_FLOW)
def if_then_else(cond, truthy, falsey=None):
if cond:
return truthy
return falsey
@udf(DATE)
def strip_tz(date_str):
date_str = date_str.replace('T', ' ')
tz_idx1 = date_str.find('+')
if tz_idx1 != -1:
return date_str[:tz_idx1]
tz_idx2 = date_str.find('-')
if tz_idx2 > 13:
return date_str[:tz_idx2]
return date_str
@udf(DATE)
def human_delta(nseconds, glue=', '):
parts = (
(86400 * 365, 'year'),
(86400 * 30, 'month'),
(86400 * 7, 'week'),
(86400, 'day'),
(3600, 'hour'),
(60, 'minute'),
(1, 'second'),
)
accum = []
for offset, name in parts:
val, nseconds = divmod(nseconds, offset)
if val:
suffix = val != 1 and 's' or ''
accum.append('%s %s%s' % (val, name, suffix))
if not accum:
return '0 seconds'
return glue.join(accum)
@udf(FILE)
def file_ext(filename):
try:
res = os.path.splitext(filename)
except ValueError:
return None
return res[1]
@udf(FILE)
def file_read(filename):
try:
with open(filename) as fh:
return fh.read()
except:
pass
if sys.version_info[0] == 2:
@udf(HELPER)
def gzip(data, compression=9):
return buffer(zlib.compress(data, compression))
@udf(HELPER)
def gunzip(data):
return zlib.decompress(data)
else:
@udf(HELPER)
def gzip(data, compression=9):
if isinstance(data, str):
data = bytes(data.encode('raw_unicode_escape'))
return zlib.compress(data, compression)
@udf(HELPER)
def gunzip(data):
return zlib.decompress(data)
@udf(HELPER)
def hostname(url):
parse_result = urlparse(url)
if parse_result:
return parse_result.netloc
@udf(HELPER)
def toggle(key):
key = key.lower()
STATE[key] = ret = not STATE.get(key)
return ret
@udf(HELPER)
def setting(key, value=None):
if value is None:
return SETTINGS.get(key)
else:
SETTINGS[key] = value
return value
@udf(HELPER)
def clear_settings():
SETTINGS.clear()
@udf(HELPER)
def clear_toggles():
STATE.clear()
@udf(MATH)
def randomrange(start, end=None, step=None):
if end is None:
start, end = 0, start
elif step is None:
step = 1
return random.randrange(start, end, step)
@udf(MATH)
def gauss_distribution(mean, sigma):
try:
return random.gauss(mean, sigma)
except ValueError:
return None
@udf(MATH)
def sqrt(n):
try:
return math.sqrt(n)
except ValueError:
return None
@udf(MATH)
def tonumber(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except:
return None
@udf(STRING)
def substr_count(haystack, needle):
if not haystack or not needle:
return 0
return haystack.count(needle)
@udf(STRING)
def strip_chars(haystack, chars):
return haystack.strip(chars)
def _hash(constructor, *args):
hash_obj = constructor()
for arg in args:
hash_obj.update(arg)
return hash_obj.hexdigest()
# Aggregates.
class _heap_agg(object):
def __init__(self):
self.heap = []
self.ct = 0
def process(self, value):
return value
def step(self, value):
self.ct += 1
heapq.heappush(self.heap, self.process(value))
class _datetime_heap_agg(_heap_agg):
def process(self, value):
return format_date_time_sqlite(value)
if sys.version_info[:2] == (2, 6):
def total_seconds(td):
return (td.seconds +
(td.days * 86400) +
(td.microseconds / (10.**6)))
else:
total_seconds = lambda td: td.total_seconds()
@aggregate(DATE)
class mintdiff(_datetime_heap_agg):
def finalize(self):
dtp = min_diff = None
while self.heap:
if min_diff is None:
if dtp is None:
dtp = heapq.heappop(self.heap)
continue
dt = heapq.heappop(self.heap)
diff = dt - dtp
if min_diff is None or min_diff > diff:
min_diff = diff
dtp = dt
if min_diff is not None:
return total_seconds(min_diff)
@aggregate(DATE)
class avgtdiff(_datetime_heap_agg):
def finalize(self):
if self.ct < 1:
return
elif self.ct == 1:
return 0
total = ct = 0
dtp = None
while self.heap:
if total == 0:
if dtp is None:
dtp = heapq.heappop(self.heap)
continue
dt = heapq.heappop(self.heap)
diff = dt - dtp
ct += 1
total += total_seconds(diff)
dtp = dt
return float(total) / ct
@aggregate(DATE)
class duration(object):
def __init__(self):
self._min = self._max = None
def step(self, value):
dt = format_date_time_sqlite(value)
if self._min is None or dt < self._min:
self._min = dt
if self._max is None or dt > self._max:
self._max = dt
def finalize(self):
if self._min and self._max:
td = (self._max - self._min)
return total_seconds(td)
return None
@aggregate(MATH)
class mode(object):
if Counter:
def __init__(self):
self.items = Counter()
def step(self, *args):
self.items.update(args)
def finalize(self):
if self.items:
return self.items.most_common(1)[0][0]
else:
def __init__(self):
self.items = []
def step(self, item):
self.items.append(item)
def finalize(self):
if self.items:
return max(set(self.items), key=self.items.count)
@aggregate(MATH)
class minrange(_heap_agg):
def finalize(self):
if self.ct == 0:
return
elif self.ct == 1:
return 0
prev = min_diff = None
while self.heap:
if min_diff is None:
if prev is None:
prev = heapq.heappop(self.heap)
continue
curr = heapq.heappop(self.heap)
diff = curr - prev
if min_diff is None or min_diff > diff:
min_diff = diff
prev = curr
return min_diff
@aggregate(MATH)
class avgrange(_heap_agg):
def finalize(self):
if self.ct == 0:
return
elif self.ct == 1:
return 0
total = ct = 0
prev = None
while self.heap:
if total == 0:
if prev is None:
prev = heapq.heappop(self.heap)
continue
curr = heapq.heappop(self.heap)
diff = curr - prev
ct += 1
total += diff
prev = curr
return float(total) / ct
@aggregate(MATH)
class _range(object):
name = 'range'
def __init__(self):
self._min = self._max = None
def step(self, value):
if self._min is None or value < self._min:
self._min = value
if self._max is None or value > self._max:
self._max = value
def finalize(self):
if self._min is not None and self._max is not None:
return self._max - self._min
return None
@aggregate(MATH)
class stddev(object):
def __init__(self):
self.n = 0
self.values = []
def step(self, v):
self.n += 1
self.values.append(v)
def finalize(self):
if self.n <= 1:
return 0
mean = sum(self.values) / self.n
return math.sqrt(sum((i - mean) ** 2 for i in self.values) / (self.n - 1))
if cython_udf is not None:
damerau_levenshtein_dist = udf(STRING)(cython_udf.damerau_levenshtein_dist)
levenshtein_dist = udf(STRING)(cython_udf.levenshtein_dist)
str_dist = udf(STRING)(cython_udf.str_dist)
median = aggregate(MATH)(cython_udf.median)
if TableFunction is not None:
@table_function(STRING)
class RegexSearch(TableFunction):
params = ['regex', 'search_string']
columns = ['match']
name = 'regex_search'
def initialize(self, regex=None, search_string=None):
self._iter = re.finditer(regex, search_string)
def iterate(self, idx):
return (next(self._iter).group(0),)
@table_function(DATE)
class DateSeries(TableFunction):
params = ['start', 'stop', 'step_seconds']
columns = ['date']
name = 'date_series'
def initialize(self, start, stop, step_seconds=86400):
self.start = format_date_time_sqlite(start)
self.stop = format_date_time_sqlite(stop)
step_seconds = int(step_seconds)
self.step_seconds = datetime.timedelta(seconds=step_seconds)
if (self.start.hour == 0 and
self.start.minute == 0 and
self.start.second == 0 and
step_seconds >= 86400):
self.format = '%Y-%m-%d'
elif (self.start.year == 1900 and
self.start.month == 1 and
self.start.day == 1 and
self.stop.year == 1900 and
self.stop.month == 1 and
self.stop.day == 1 and
step_seconds < 86400):
self.format = '%H:%M:%S'
else:
self.format = '%Y-%m-%d %H:%M:%S'
def iterate(self, idx):
if self.start > self.stop:
raise StopIteration
current = self.start
self.start += self.step_seconds
return (current.strftime(self.format),)
|
|
#!/usr/bin/env python
# This tool dumps imported Swift APIs to help validate changes in the
# Clang importer and its heuristics. One can execute it to dump the
# API of a given module within a particular SDK, e.g., UIKit from the
# iOS SDK as seen in Swift 3 after the "grand renaming":
#
# /path/to/bin/dir/swift-api-dump.py -3 -o output-dir -m UIKit -s iphoneos
#
# The -3 argument indicates that we're using the Swift 3 Clang
# importer rules. The "-m" argument can be omitted, in which case the
# script will collect all of the frameworks in the named SDK(s) and
# dump their APIs.
#
# One can supply multiple SDKs, written as a list. For example, to
# dump the API for all frameworks across OS X, iOS, watchOS, and tvOS,
# with the Swift 3 rules, use:
#
# /path/to/bin/dir/swift-api-dump.py -3 -o output-dir -s macosx iphoneos \
# watchos appletvos
#
from __future__ import print_function
import argparse
import multiprocessing
import os
import re
import subprocess
import sys
DEFAULT_TARGET_BASED_ON_SDK = {
'macosx': 'x86_64-apple-macosx10.11',
'iphoneos': 'arm64-apple-ios9.0',
'iphonesimulator': 'x86_64-apple-ios9.0',
'watchos': 'armv7k-apple-watchos2.0',
'watchos.simulator': 'i386-apple-watchos2.0',
'appletvos': 'arm64-apple-tvos9',
'appletvos.simulator': 'x86_64-apple-tvos9',
}
SKIPPED_FRAMEWORKS = {
'AppKitScripting',
'CalendarStore',
'CoreMIDIServer',
'DrawSprocket',
'DVComponentGlue',
'InstallerPlugins',
'InstantMessage',
'JavaFrameEmbedding',
'JavaVM',
'Kerberos',
'Kernel',
'LDAP',
'Message',
'PCSC',
'PubSub',
'QTKit',
'QuickTime',
'Ruby',
'Scripting',
'SyncServices',
'System',
'Tk',
'VideoDecodeAcceleration',
'vecLib',
}
def create_parser():
script_path = os.path.dirname(sys.argv[0])
script_path = os.path.abspath(script_path)
default_swift_ide_test = '%s/swift-ide-test' % (script_path)
parser = argparse.ArgumentParser(
description="Dumps imported Swift APIs for a module or SDK",
prog='swift-api-dump.py',
usage='%(prog)s -s iphoneos')
parser.add_argument('-m', '--module', help='The module name.')
parser.add_argument('-j', '--jobs', type=int,
help='The number of parallel jobs to execute')
parser.add_argument('-s', '--sdk', nargs='+',
required=True, help="The SDKs to use.")
parser.add_argument('-t', '--target', help="The target triple to use.")
parser.add_argument('-i', '--swift-ide-test',
default=default_swift_ide_test,
help="The swift-ide-test executable.")
parser.add_argument('-o', '--output-dir', default=os.getcwd(),
help='Directory to which the output will be emitted.')
parser.add_argument('-q', '--quiet', action='store_true',
help='Suppress printing of status messages.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Print extra information.')
parser.add_argument('-F', '--framework-dir', action='append',
help='Add additional framework directories')
parser.add_argument('-I', '--include-dir', action='append',
help='Add additional include directories')
parser.add_argument('--enable-infer-import-as-member', action='store_true',
help='Infer when a global could be imported as a ' +
'member.')
return parser
def output_command_result_to_file(command_args, filename):
with open(filename, 'w') as output_file:
subprocess.call(command_args, stdout=output_file)
def run_command(args):
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
exitcode = proc.returncode
return (exitcode, out, err)
# Collect the set of submodules for the given module.
def collect_submodules(common_args, module):
# Execute swift-ide-test to print the interface.
my_args = ['-module-print-submodules', '-module-to-print=%s' % (module)]
(exitcode, out, err) = run_command(common_args + my_args)
if exitcode != 0:
print(
'error: submodule collection failed for module %s with error %d' %
(module, exitcode))
return ()
# Find all of the submodule imports.
import_matcher = re.compile('.*import\s+%s\.([A-Za-z_0-9.]+)' % (module))
submodules = set()
for line in out.splitlines():
match = import_matcher.match(line)
if match:
submodules.add(match.group(1))
return sorted(list(submodules))
# Print out the command we're about to execute
def print_command(cmd, outfile=""):
str = " ".join(cmd)
if outfile != "":
str += " > " + outfile
print(str)
# Dump the API for the given module.
def dump_module_api((cmd, extra_dump_args, output_dir, module, quiet,
verbose)):
# Collect the submodules
submodules = collect_submodules(cmd, module)
# Dump the top-level module
if verbose:
print("mkdir -p %s/%s" % (output_dir, module))
subprocess.call(['mkdir', '-p', ('%s/%s' % (output_dir, module))])
output_file = '%s/%s/%s.swift' % (output_dir, module, module)
if not quiet:
print('Writing %s...' % output_file)
top_level_cmd = cmd + extra_dump_args + ['-module-to-print=%s' % (module)]
if verbose:
print_command(top_level_cmd, output_file)
output_command_result_to_file(top_level_cmd, output_file)
# Dump each submodule.
for submodule in submodules:
output_file = '%s/%s/%s.swift' % (output_dir, module, submodule)
if not quiet:
print('Writing %s...' % output_file)
full_submodule = '%s.%s' % (module, submodule)
submodule_cmd = cmd + extra_dump_args
submodule_cmd = submodule_cmd + \
['-module-to-print=%s' % (full_submodule)]
if verbose:
print_command(submodule_cmd, output_file)
output_command_result_to_file(submodule_cmd, output_file)
return
def pretty_sdk_name(sdk):
if sdk.find("macosx") == 0:
return 'OSX'
if sdk.find("iphoneos") == 0:
return 'iOS'
if sdk.find("watchos") == 0:
return 'watchOS'
if sdk.find("appletvos") == 0:
return 'tvOS'
return 'unknownOS'
# Collect the set of frameworks we should dump
def collect_frameworks(sdk):
(exitcode, sdk_path, err) = run_command(
["xcrun", "--show-sdk-path", "-sdk", sdk])
if exitcode != 0:
print('error: framework collection failed to find SDK path for %s '
'with error %d' % (sdk, exitcode))
return ()
sdk_path = sdk_path.rstrip()
(exitcode, sdk_version, err) = run_command(
["xcrun", "--show-sdk-version", "-sdk", sdk])
if exitcode != 0:
print('error: framework collection failed to find SDK version for %s '
'with error %d' % (sdk, exitcode))
return ()
sdk_version = sdk_version.rstrip()
print('Collecting frameworks from %s %s at %s' %
(pretty_sdk_name(sdk), sdk_version, sdk_path))
# Collect all of the framework names
frameworks_dir = '%s/System/Library/Frameworks' % sdk_path
framework_matcher = re.compile('([A-Za-z_0-9.]+)\.framework')
frameworks = set()
for entry in os.listdir(frameworks_dir):
match = framework_matcher.match(entry)
if match:
framework = match.group(1)
if framework not in SKIPPED_FRAMEWORKS:
frameworks.add(framework)
return (sorted(list(frameworks)), sdk_path)
def create_dump_module_api_args(cmd_common, cmd_extra_args, sdk, module,
target, source_filename, output_dir, quiet,
verbose):
# Determine the SDK root and collect the set of frameworks.
(frameworks, sdk_root) = collect_frameworks(sdk)
# Determine the default target.
if target:
sdk_target = target
else:
sdk_target = DEFAULT_TARGET_BASED_ON_SDK[sdk]
# Determine the output idirectory
pretty_sdk = pretty_sdk_name(sdk)
sdk_output_dir = '%s/%s' % (output_dir, pretty_sdk)
# Create the sets of arguments to dump_module_api.
results = []
cmd = cmd_common + ['-sdk', sdk_root, '-target', sdk_target]
if module:
results.append(
(cmd, cmd_extra_args, sdk_output_dir, module, quiet, verbose))
else:
for framework in frameworks:
results.append(
(cmd, cmd_extra_args, sdk_output_dir, framework, quiet,
verbose))
return results
def main():
source_filename = 'swift-api-dump.swift'
parser = create_parser()
args = parser.parse_args()
cmd_common = [
args.swift_ide_test,
'-print-module',
'-source-filename',
source_filename,
'-module-print-skip-overlay',
'-skip-unavailable',
'-skip-print-doc-comments',
'-always-argument-labels',
'-skip-overrides'
]
# Add -F / -I arguments.
if args.framework_dir:
for path in args.framework_dir:
cmd_common = cmd_common + ['-F', path]
if args.include_dir:
for path in args.include_dir:
cmd_common = cmd_common + ['-I', path]
# Determine the set of extra arguments we'll use.
extra_args = ['-skip-imports']
if args.enable_infer_import_as_member:
extra_args = extra_args + ['-enable-infer-import-as-member']
# Create a .swift file we can feed into swift-ide-test
subprocess.call(['touch', source_filename])
# Construct the set of API dumps we should perform.
jobs = []
for sdk in args.sdk:
jobs = jobs + create_dump_module_api_args(
cmd_common, extra_args, sdk, args.module,
args.target, source_filename, args.output_dir,
args.quiet, args.verbose)
# Execute the API dumps
pool = multiprocessing.Pool(processes=args.jobs)
pool.map(dump_module_api, jobs)
# Remove the .swift file we fed into swift-ide-test
subprocess.call(['rm', '-f', source_filename])
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
# Author: Jev Kuznetsov <jev.kuznetsov@gmail.com>
# License: BSD
"""
Toolset working with yahoo finance data
Module includes functions for easy access to YahooFinance data
"""
from datetime import datetime, date
import urllib.request
from pandas import DataFrame, Index, HDFStore, Panel
import numpy as np
import os
from .extra import ProgressBar
def parseStr(s):
''' convert string to a float or string '''
f = s.strip()
if f[0] == '"':
return f.strip('"')
elif f=='N/A':
return np.nan
else:
try: # try float conversion
prefixes = {'M':1e6, 'B': 1e9}
prefix = f[-1]
if prefix in prefixes: # do we have a Billion/Million character?
return float(f[:-1])*prefixes[prefix]
else: # no, convert to float directly
return float(f)
except ValueError: # failed, return original string
return s
class HistData(object):
''' a class for working with yahoo finance data '''
def __init__(self, autoAdjust=True):
self.startDate = (2008,1,1)
self.autoAdjust=autoAdjust
self.wp = Panel()
def load(self,dataFile):
"""load data from HDF"""
if os.path.exists(dataFile):
store = HDFStore(dataFile)
symbols = [str(s).strip('/') for s in list(store.keys()) ]
data = dict(list(zip(symbols,[store[symbol] for symbol in symbols])))
self.wp = Panel(data)
store.close()
else:
raise IOError('Data file does not exist')
def save(self,dataFile):
""" save data to HDF"""
print(('Saving data to', dataFile))
store = HDFStore(dataFile)
for symbol in self.wp.items:
store[symbol] = self.wp[symbol]
store.close()
def downloadData(self,symbols='all'):
''' get data from yahoo '''
if symbols == 'all':
symbols = self.symbols
#store = HDFStore(self.dataFile)
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
try:
df = getSymbolData(symbol,sDate=self.startDate,verbose=False)
if self.autoAdjust:
df = _adjust(df,removeOrig=True)
if len(self.symbols)==0:
self.wp = Panel({symbol:df})
else:
self.wp[symbol] = df
except Exception as e:
print(e)
p.animate(idx+1)
def getDataFrame(self,field='close'):
''' return a slice on wide panel for a given field '''
return self.wp.minor_xs(field)
@property
def symbols(self):
return self.wp.items.tolist()
def __repr__(self):
return str(self.wp)
def getQuote(symbols):
"""
get current yahoo quote
Parameters
-----------
symbols : list of str
list of ticker symbols
Returns
-----------
DataFrame , data is row-wise
"""
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
if not isinstance(symbols,list):
symbols = [symbols]
header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap']
request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1'])
data = dict(list(zip(header,[[] for i in range(len(header))])))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
try:
lines = urllib.request.urlopen(urlStr).readlines()
except Exception as e:
s = "Failed to download:\n{0}".format(e);
print(s)
for line in lines:
fields = line.decode().strip().split(',')
#print fields, len(fields)
for i,field in enumerate(fields):
data[header[i]].append( parseStr(field))
idx = data.pop('symbol')
return DataFrame(data,index=idx)
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]):
"""
generate url
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
return urlStr
def getHistoricData(symbols, **options):
'''
get data from Yahoo finance and return pandas dataframe
Will get OHLCV data frame if sinle symbol is provided.
If many symbols are provided, it will return a wide panel
Parameters
------------
symbols : str or list
Yahoo finanance symbol or a list of symbols
sDate : tuple (optional)
start date (y,m,d)
eDate : tuple (optional)
end date (y,m,d)
adjust : bool
T/[F] adjust data based on adj_close
Returns
---------
Panel
'''
assert isinstance(symbols,(list,str)), 'Input must be a string symbol or a list of symbols'
if isinstance(symbols,str):
return getSymbolData(symbols,**options)
else:
data = {}
print('Downloading data:')
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
p.animate(idx+1)
data[symbol] = getSymbolData(symbol,verbose=False,**options)
return Panel(data)
def getSymbolData(symbol, sDate=(1990,1,1), eDate=None, adjust=False, verbose=True):
"""
get data from Yahoo finance and return pandas dataframe
Parameters
-----------
symbol : str
Yahoo finanance symbol
sDate : tuple , optional
start date (y,m,d), defaults to 1 jan 1990
eDate : tuple , optional
end date (y,m,d), defaults to current date
adjust : bool , optional
use adjusted close values to correct OHLC. adj_close will be ommited
verbose : bool , optional
print output
Returns
---------
DataFrame
"""
if eDate is None: eDate = date.today().timetuple()[0:3]
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
try:
lines = urllib.request.urlopen(urlStr).readlines()
except Exception as e:
s = "Failed to download:\n{0}".format(e);
print(s)
return None
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
#print line
fields = line.decode().rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(list(zip(['open','high','low','close','volume','adj_close'],data)))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort_index()
if verbose:
print(('Got %i days of data' % len(df)))
if adjust:
return _adjust(df,removeOrig=True)
else:
return df
def _adjust(df, removeOrig=False):
'''
_adjustust hist data based on adj_close field
'''
c = df['close']/df['adj_close']
df['adj_open'] = df['open']/c
df['adj_high'] = df['high']/c
df['adj_low'] = df['low']/c
if removeOrig:
df=df.drop(['open','close','high','low'],axis=1)
renames = dict(list(zip(['adj_open','adj_close','adj_high','adj_low'],['open','close','high','low'])))
df=df.rename(columns=renames)
return df
def getScreenerSymbols(fileName):
''' read symbols from a .csv saved by yahoo stock screener '''
with open(fileName,'r') as fid:
lines = fid.readlines()
symbols = []
for line in lines[3:]:
fields = line.strip().split(',')
field = fields[0].strip()
if len(field) > 0:
symbols.append(field)
return symbols
|
|
'''Dependency checking functionality for Library, Designer and Smartgrid Actions.
Created on May 16, 2013
@author: Cam Moore
'''
from apps.widgets.smartgrid_library.models import LibraryAction
from apps.widgets.smartgrid_design.models import DesignerAction, DesignerGrid
from apps.managers.smartgrid_mgr.gcc_model import ActionNode, DependencyTree, Error, Warn
from collections import OrderedDict
def __build_library_nodes():
"""Creates a list of ActionNodes for all the LibraryActions."""
nodes = []
for action in LibraryAction.objects.all():
nodes.append(ActionNode(action, identifier=action.slug))
return nodes
def __build_designer_nodes(draft):
"""Creates a list of ActionNodes for all the DesignerActions in the given Draft."""
nodes = []
for action in DesignerAction.objects.filter(draft=draft):
locations = DesignerGrid.objects.filter(draft=draft, action=action)
if len(locations) == 0: # in palette
nodes.append(ActionNode(action, identifier=action.slug))
else:
for loc in locations:
nodes.append(ActionNode(action, level=loc.level, identifier=action.slug))
return nodes
def __build_designer_grid_nodes(draft):
"""Creates a list of ActionNodes for the DesignerActions in the Designer grid for the given
Draft."""
nodes = []
for action in DesignerAction.objects.filter(draft=draft):
locations = DesignerGrid.objects.filter(draft=draft, action=action)
for loc in locations:
nodes.append(ActionNode(action, level=loc.level, identifier=action.slug))
return nodes
def __get_submitted_action_slugs(node):
"""Returns the action slugs for submitted_action predicates in the given node's
unlock_condition."""
ret = []
if node.unlock_condition:
l = node.unlock_condition.split('submitted_action(')
if len(l) > 1:
index = l[1].find(')')
ret.append(l[1][:index].strip('"\''))
if len(l) > 2:
index = l[1].find(')')
ret.append(l[2][:index].strip('"\''))
return ret
def __get_approved_action_slugs(node):
"""Returns the action slugs for approved_action predicates in the node's unlock_condition."""
ret = []
if node.unlock_condition:
l = node.unlock_condition.split('approved_action(')
if len(l) > 1:
index = l[1].find(')')
ret.append(l[1][:index].strip('"\''))
if len(l) > 2:
index = l[1].find(')')
ret.append(l[2][:index].strip('"\''))
return ret
def __get_dependent_action_slugs(node):
"""Returns the action slugs in the node's unlock_condition."""
ret = []
for slug in __get_submitted_action_slugs(node):
ret.append(slug)
for slug in __get_approved_action_slugs(node):
ret.append(slug)
return ret
def build_library_trees():
"""Builds the DependencyTrees for the LibraryActions."""
nodes = __build_library_nodes()
trees = {}
for node in nodes:
if node.unlock_condition == "True" or node.unlock_condition.find("or True") != -1 \
or node.unlock_condition == "False" or node.unlock_condition.find("and False") != -1:
t = DependencyTree()
t.create_node(node.action, level=node.level, identifier=node.identifier)
trees[node.identifier] = t
for node in nodes:
slugs = __get_dependent_action_slugs(node)
for slug in slugs:
for k in list(trees):
if trees[k].get_node(slug):
trees[k].add_node(node, slug)
for node in nodes:
slugs = __get_dependent_action_slugs(node)
for slug in slugs:
for k in list(trees):
if trees[k].get_node(slug):
trees[k].add_node(node, slug)
return trees
def build_designer_grid_trees(draft):
"""Builds the DependencyTrees for the DesignerActions in the DesignerGrid."""
nodes = __build_designer_grid_nodes(draft)
trees = {}
for node in nodes:
if node.unlock_condition:
if node.unlock_condition == "True" or node.unlock_condition.find("or True") != -1 \
or node.unlock_condition == "False" or node.unlock_condition.find("and False") != -1:
t = DependencyTree()
t.create_node(node.action, level=node.level, identifier=node.identifier)
trees[node.identifier] = t
for i in range(10):
_ = i
for node in nodes:
slugs = __get_dependent_action_slugs(node)
for slug in slugs:
for k in list(trees):
if trees[k].get_node(slug):
trees[k].add_node(node, slug)
sorted_trees = OrderedDict(sorted(trees.items(), key=lambda t: -len(t[1])))
return sorted_trees
def check_unreachable_designer_actions(draft):
"""Returns a list of Errors for each unreachable DesignerAction in the draft."""
ret = []
nodes = __build_designer_grid_nodes(draft)
trees = build_designer_grid_trees(draft)
# check all the nodes
for node in nodes:
in_tree = False
for k in list(trees):
tree = trees[k]
if tree.get_node(node.identifier):
in_tree = True
if not in_tree:
ret.append(Error(message="Action not reachable/unlockable [%s]" % \
node.action.unlock_condition, action=node.action))
return ret
def check_false_unlock_designer_actions(draft):
"""Returns a list of Warnings for Designer actions whose root unlock_condition is False."""
ret = []
false_actions = []
trees = build_designer_grid_trees(draft)
for k in list(trees):
tree = trees[k]
root = tree.get_node(tree.root)
if root:
if root.unlock_condition == "False" or root.unlock_condition.find("and False") != -1:
for node_key in list(tree.nodes):
node = tree.nodes[node_key]
if not node.action in false_actions and not node.action.type == 'filler':
false_actions.append(node.action)
for action in false_actions:
ret.append(Warn(message="Depends on action with False unlock condition", action=action))
return ret
def check_missmatched_designer_level(draft):
"""Returns a list of Warnings for actions whose parent level is higher than their own."""
ret = []
trees = build_designer_grid_trees(draft)
for k in list(trees):
tree = trees[k]
for node_key in list(tree.nodes):
node = tree.nodes[node_key]
if node.level:
parent_name = node.parent
if parent_name:
parent = tree.nodes[parent_name]
if parent and parent.level and parent.level.priority > node.level.priority:
message = "with %s Depends on action %s with higher level %s" % \
(node.level, parent.admin_link(), parent.level)
ret.append(Warn(message=message, action=node.action))
return ret
def check_unreachable_library_actions():
"""Returns a list of Errors for each unreachable LibraryAction."""
ret = []
nodes = __build_library_nodes()
trees = build_library_trees()
# check all the nodes
for node in nodes:
in_tree = False
for k in list(trees):
tree = trees[k]
if tree.get_node(node.identifier):
in_tree = True
if not in_tree:
ret.append(Error(message="Action not reachable/unlockable", action=node.action))
return ret
def check_false_unlock_library_actions():
"""Returns a list of Warnings for LibraryActions whose root unlock_condition is False."""
ret = []
false_actions = []
trees = build_library_trees()
for k in list(trees):
tree = trees[k]
root = tree.get_node(tree.root)
if root:
if root.unlock_condition == "False" or root.unlock_condition.find("and False"):
for node_key in list(tree.nodes):
node = tree.nodes[node_key]
if not node.action in false_actions and not node.action.type == 'filler':
false_actions.append(node.action)
for action in false_actions:
ret.append(Warn(message="Depends on action with False unlock condition", action=action))
return ret
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8899")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8899")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Tetcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Tetcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
from pandac.PandaModules import *
from direct.distributed.DistributedObject import DistributedObject
from direct.task.Task import Task
from toontown.minigame import CannonGameGlobals
from toontown.minigame.CannonGameGlobals import *
from toontown.parties.Cannon import Cannon
from toontown.parties.CannonGui import CannonGui
from toontown.parties import PartyGlobals
from toontown.parties.DistributedPartyCannonActivity import DistributedPartyCannonActivity
LAND_TIME = 2
WORLD_SCALE = 2.0
GROUND_SCALE = 1.4 * WORLD_SCALE
CANNON_SCALE = 1.0
FAR_PLANE_DIST = 600 * WORLD_SCALE
GROUND_PLANE_MIN = -15
CANNON_Y = -int(CannonGameGlobals.TowerYRange / 2 * 1.3)
CANNON_X_SPACING = 12
CANNON_Z = 20
CANNON_ROTATION_MIN = -55
CANNON_ROTATION_MAX = 50
CANNON_ROTATION_VEL = 15.0
ROTATIONCANNON_ANGLE_MIN = 15
CANNON_ANGLE_MAX = 85
CANNON_ANGLE_VEL = 15.0
CANNON_MOVE_UPDATE_FREQ = 0.5
CAMERA_PULLBACK_MIN = 20
CAMERA_PULLBACK_MAX = 40
MAX_LOOKAT_OFFSET = 80
TOON_TOWER_THRESHOLD = 150
SHADOW_Z_OFFSET = 0.5
TOWER_HEIGHT = 43.85
TOWER_RADIUS = 10.5
BUCKET_HEIGHT = 36
TOWER_Y_RANGE = CannonGameGlobals.TowerYRange
TOWER_X_RANGE = int(TOWER_Y_RANGE / 2.0)
INITIAL_VELOCITY = 80.0
WHISTLE_SPEED = INITIAL_VELOCITY * 0.35
class DistributedPartyCannon(DistributedObject, Cannon):
notify = directNotify.newCategory('DistributedPartyCannon')
LOCAL_CANNON_MOVE_TASK = 'localCannonMoveTask'
def __init__(self, cr):
DistributedObject.__init__(self, cr)
Cannon.__init__(self, parent=self.getParentNodePath())
self.localCannonMoving = False
self.active = False
self.activityDoId = 0
self.activity = None
self.gui = None
self.toonInsideAvId = 0
self.sign = None
self.controllingToonAvId = None
return
def generateInit(self):
self.load()
self.activate()
def load(self):
self.notify.debug('load')
Cannon.load(self, self.uniqueName('Cannon'))
if base.cr and base.cr.partyManager and base.cr.partyManager.getShowDoid():
nameText = TextNode('nameText')
nameText.setCardAsMargin(0.1, 0.1, 0.1, 0.1)
nameText.setCardDecal(True)
nameText.setCardColor(1.0, 1.0, 1.0, 0.0)
r = 232.0 / 255.0
g = 169.0 / 255.0
b = 23.0 / 255.0
nameText.setTextColor(r, g, b, 1)
nameText.setAlign(nameText.ACenter)
nameText.setShadowColor(0, 0, 0, 1)
nameText.setText(str(self.doId))
namePlate = self.parentNode.attachNewNode(nameText)
namePlate.setDepthWrite(0)
namePlate.setPos(0, 0, 8)
namePlate.setScale(3)
def announceGenerate(self):
self.sign = self.activity.sign.instanceUnderNode(self.activity.getParentNodePath(), self.uniqueName('sign'))
self.sign.reparentTo(self.activity.getParentNodePath())
self.sign.setPos(self.parentNode, self.sign.getPos())
def unload(self):
self.notify.debug('unload')
if self.gui is not None:
self.gui.unload()
del self.gui
Cannon.unload(self)
if self.sign is not None:
self.sign.removeNode()
self.sign = None
self.ignoreAll()
return
def getParentNodePath(self):
if hasattr(base.cr.playGame, 'hood') and base.cr.playGame.hood and hasattr(base.cr.playGame.hood, 'loader') and base.cr.playGame.hood.loader and hasattr(base.cr.playGame.hood.loader, 'geom') and base.cr.playGame.hood.loader.geom:
return base.cr.playGame.hood.loader.geom
else:
self.notify.warning('Hood or loader not created, defaulting to render')
return render
def disable(self):
self.notify.debug('disable')
self.ignoreAll()
self.__disableCannonControl()
self.setMovie(PartyGlobals.CANNON_MOVIE_CLEAR, 0)
def delete(self):
self.deactivate()
self.unload()
DistributedObject.delete(self)
def destroy(self):
self.notify.debug('destroy')
DistributedObject.destroy(self)
def setPosHpr(self, x, y, z, h, p, r):
self.parentNode.setPosHpr(x, y, z, h, p, r)
def setActivityDoId(self, doId):
self.activityDoId = doId
self.activity = base.cr.doId2do[doId]
def activate(self):
self.accept(self.getEnterCollisionName(), self.__handleToonCollisionWithCannon)
Cannon.show(self)
self.active = True
def deactivate(self):
self.ignore(self.getEnterCollisionName())
Cannon.hide(self)
self.active = False
def setMovie(self, mode, avId):
self.notify.debug('%s setMovie(%s, %s)' % (self.doId, avId, mode))
if mode == PartyGlobals.CANNON_MOVIE_CLEAR:
self.setClear()
elif mode == PartyGlobals.CANNON_MOVIE_FORCE_EXIT:
self.exitCannon(avId)
self.setClear()
elif mode == PartyGlobals.CANNON_MOVIE_LOAD:
self.enterCannon(avId)
elif mode == PartyGlobals.CANNON_MOVIE_LANDED:
self.setLanded(avId)
else:
self.notify.error('setMovie Unhandled case mode=%d avId=%d' % (mode, avId))
def __handleToonCollisionWithCannon(self, collEntry):
self.notify.debug('collEntry: %s' % collEntry)
if base.cr.playGame.getPlace().getState() == 'walk' and self.toonInsideAvId == 0:
base.cr.playGame.getPlace().setState('activity')
self.d_requestEnter()
def d_requestEnter(self):
self.sendUpdate('requestEnter', [])
def requestExit(self):
self.notify.debug('requestExit')
base.localAvatar.reparentTo(render)
base.cr.playGame.getPlace().setState('walk')
def __avatarGone(self, avId):
if self.toonInsideAvId == avId:
self.notify.debug('__avatarGone in if')
if self.toonInside and not self.toonInside.isEmpty():
self.removeToonDidNotFire()
self.setMovie(PartyGlobals.CANNON_MOVIE_CLEAR, 0)
else:
self.notify.debug('__avatarGone in else, self.toonInsideAvId=%s avId=%s' % (self.toonInsideAvId, avId))
def enterCannon(self, avId):
if avId == base.localAvatar.doId:
base.localAvatar.pose('lose', 110)
base.localAvatar.pose('slip-forward', 25)
base.cr.playGame.getPlace().setState('activity')
base.localAvatar.collisionsOff()
base.camera.reparentTo(self.barrelNode)
base.camera.setPos(0, -2, 5)
base.camera.setP(-20)
if not self.activity.hasPlayedBefore():
self.activity.displayRules()
self.acceptOnce(DistributedPartyCannonActivity.RULES_DONE_EVENT, self.__enableCannonControl)
else:
self.__enableCannonControl()
self.controllingToonAvId = avId
if avId in self.cr.doId2do:
self.toonInsideAvId = avId
self.notify.debug('enterCannon self.toonInsideAvId=%d' % self.toonInsideAvId)
toon = base.cr.doId2do[avId]
if toon:
self.acceptOnce(toon.uniqueName('disable'), self.__avatarGone, extraArgs=[avId])
toon.stopSmooth()
toon.dropShadow.hide()
self.placeToonInside(toon)
else:
self.__avatarGone(avId)
else:
self.notify.warning('Unknown avatar %d in cannon %d' % (avId, self.doId))
def exitCannon(self, avId):
if avId == base.localAvatar.doId:
self.activity.finishRules()
self.ignore(DistributedPartyCannonActivity.RULES_DONE_EVENT)
self.ignoreDisableForAvId(avId)
if self.gui and avId == base.localAvatar.doId:
self.gui.unload()
toon = base.cr.doId2do.get(avId)
if toon and self.getToonInside() == toon:
self.resetToon()
else:
self.notify.debug('not resetting toon, toon=%s, self.getToonInside()=%s' % (toon, self.getToonInside()))
def resetToon(self, pos = None):
self.notify.debug('resetToon')
toon = self.getToonInside()
toonInsideAvId = self.toonInsideAvId
self.notify.debug('%d resetToon self.toonInsideAvId=%d' % (self.doId, self.toonInsideAvId))
self.removeToonDidNotFire()
self.__setToonUpright(toon, pos)
if toonInsideAvId == base.localAvatar.doId:
self.notify.debug('%d resetToon toonInsideAvId ==localAvatar.doId' % self.doId)
if pos:
self.notify.debug('toon setting position to %s' % pos)
base.localAvatar.setPos(pos)
base.camera.reparentTo(base.localAvatar)
base.localAvatar.collisionsOn()
base.localAvatar.startPosHprBroadcast()
base.localAvatar.enableAvatarControls()
self.notify.debug('currentState=%s, requesting walk' % base.cr.playGame.getPlace().getState())
base.cr.playGame.getPlace().setState('walk')
self.notify.debug('after request walk currentState=%s,' % base.cr.playGame.getPlace().getState())
toon.dropShadow.show()
self.d_setLanded()
def __setToonUpright(self, toon, pos = None):
if not pos:
pos = toon.getPos(render)
toon.setPos(render, pos)
toon.loop('neutral')
toon.lookAt(self.parentNode)
toon.setP(0)
toon.setR(0)
toon.setScale(1, 1, 1)
def d_setLanded(self):
self.notify.debugStateCall(self)
if self.toonInsideAvId == base.localAvatar.doId:
self.sendUpdate('setLanded', [base.localAvatar.doId])
def setLanded(self, avId):
self.removeAvFromCannon(avId)
self.ignoreDisableForAvId(avId)
def removeAvFromCannon(self, avId):
place = base.cr.playGame.getPlace()
av = base.cr.doId2do.get(avId)
print 'removeAvFromCannon'
if place:
if not hasattr(place, 'fsm'):
return
placeState = place.fsm.getCurrentState().getName()
print placeState
if placeState != 'fishing':
if av != None:
av.startSmooth()
self.__destroyToonModels(avId)
return
self.notify.debug('%s removeAvFromCannon' % self.doId)
if av != None:
self.notify.debug('%d removeAvFromCannon: destroying toon models' % self.doId)
av.resetLOD()
if av == base.localAvatar:
if place:
place.fsm.request('walk')
av.setPlayRate(1.0, 'run')
if av.nametag and self.toonHead:
av.nametag.remove(self.toonHead.tag)
if av.getParent().getName() == 'toonOriginChange':
av.wrtReparentTo(render)
self.__setToonUpright(av)
if av == base.localAvatar:
av.startPosHprBroadcast()
av.startSmooth()
av.setScale(1, 1, 1)
self.ignore(av.uniqueName('disable'))
self.__destroyToonModels(avId)
return
def __destroyToonModels(self, avId):
av = base.cr.doId2do.get(avId)
if not av:
return
if av != None:
av.dropShadow.show()
self.hitBumper = 0
self.hitTarget = 0
self.angularVel = 0
self.vel = Vec3(0, 0, 0)
self.lastVel = Vec3(0, 0, 0)
self.lastPos = Vec3(0, 0, 0)
self.landingPos = Vec3(0, 0, 0)
self.t = 0
self.lastT = 0
self.deltaT = 0
av = None
self.lastWakeTime = 0
self.localToonShooting = 0
if self.toonHead != None:
self.toonHead.reparentTo(hidden)
self.toonHead.stopBlink()
self.toonHead.stopLookAroundNow()
self.toonHead.delete()
self.toonHead = None
self.model_Created = 0
return
def setClear(self):
toon = base.cr.doId2do.get(self.toonInsideAvId)
toonName = 'None'
self.ignoreDisableForAvId(self.toonInsideAvId)
if toon and self.isToonInside():
toonName = toon.getName()
toon.resetLOD()
toon.setPlayRate(1.0, 'run')
if toon.getParent().getName() == 'toonOriginChange':
toon.wrtReparentTo(render)
self.__setToonUpright(toon)
toon.startSmooth()
toon.setScale(1, 1, 1)
self.ignore(toon.uniqueName('disable'))
if self.toonInsideAvId == base.localAvatar.doId:
toon.startPosHprBroadcast()
try:
base.localAvatar.enableAvatarControls()
except:
self.notify.warning("couldn't enable avatar controls")
base.cr.playGame.getPlace().setState('walk')
else:
self.notify.debug('setClear in else toon=%s, self.isToonInsde()=%s' % (toonName, self.isToonInside()))
self.toonInsideAvId = 0
self.notify.debug('setClear self.toonInsideAvId=%d' % self.toonInsideAvId)
if self.controllingToonAvId == base.localAvatar.doId:
self.notify.debug('set_clear turning off cannon control')
self.__disableCannonControl()
self.controllingToonAvId = 0
def __enableCannonControl(self):
if not self.gui:
self.gui = self.activity.gui
self.gui.load()
self.gui.enable(timer=PartyGlobals.CANNON_TIMEOUT)
self.d_setTimeout()
self.accept(CannonGui.FIRE_PRESSED, self.__handleFirePressed)
self.__startLocalCannonMoveTask()
def d_setTimeout(self):
self.sendUpdate('setTimeout')
def __disableCannonControl(self):
if self.gui:
self.gui.unload()
self.ignore(CannonGui.FIRE_PRESSED)
self.__stopLocalCannonMoveTask()
def __startLocalCannonMoveTask(self):
self.localCannonMoving = False
task = Task(self.__localCannonMoveTask)
task.lastPositionBroadcastTime = 0.0
taskMgr.add(task, self.LOCAL_CANNON_MOVE_TASK)
def __stopLocalCannonMoveTask(self):
taskMgr.remove(self.LOCAL_CANNON_MOVE_TASK)
if self.localCannonMoving:
self.localCannonMoving = False
self.stopMovingSound()
def __localCannonMoveTask(self, task):
rotVel = 0
if self.gui.leftPressed:
rotVel += CANNON_ROTATION_VEL
if self.gui.rightPressed:
rotVel -= CANNON_ROTATION_VEL
self.setRotation(self.getRotation() + rotVel * globalClock.getDt())
angVel = 0
if self.gui.upPressed:
angVel += CANNON_ANGLE_VEL
if self.gui.downPressed:
angVel -= CANNON_ANGLE_VEL
self.setAngle(self.getAngle() + angVel * globalClock.getDt())
if self.hasMoved():
if not self.localCannonMoving:
self.localCannonMoving = True
self.loopMovingSound()
self.updateModel()
if task.time - task.lastPositionBroadcastTime > CANNON_MOVE_UPDATE_FREQ:
self.notify.debug('Broadcast local cannon %s position' % self.doId)
task.lastPositionBroadcastTime = task.time
self.__broadcastLocalCannonPosition()
elif self.localCannonMoving:
self.localCannonMoving = False
self.stopMovingSound()
self.__broadcastLocalCannonPosition()
self.notify.debug('Cannon Rot = %s, Angle = %s' % (self._rotation, self._angle))
return Task.cont
def __broadcastLocalCannonPosition(self):
self.d_setCannonPosition(self._rotation, self._angle)
def d_setCannonPosition(self, zRot, angle):
self.sendUpdate('setCannonPosition', [zRot, angle])
def updateCannonPosition(self, avId, zRot, angle):
if avId and avId == self.toonInsideAvId and avId != base.localAvatar.doId:
self.notify.debug('update cannon %s position zRot = %d, angle = %d' % (self.doId, zRot, angle))
self.setRotation(zRot)
self.setAngle(angle)
self.updateModel()
def __handleFirePressed(self):
self.notify.debug('fire pressed')
self.__disableCannonControl()
self.__broadcastLocalCannonPosition()
self.d_setCannonLit(self._rotation, self._angle)
def d_setCannonLit(self, zRot, angle):
self.sendUpdate('setCannonLit', [zRot, angle])
def fire(self):
if base.localAvatar.doId == self.controllingToonAvId:
self.__disableCannonControl()
self.d_setFired()
self.playFireSequence()
self.controllingToonAvId = None
return
def d_setFired(self):
self.sendUpdate('setFired', [])
def ignoreDisableForAvId(self, avId):
toon = base.cr.doId2do.get(avId)
if toon:
self.notify.debug('ignoring %s' % toon.uniqueName('disable'))
self.ignore(toon.uniqueName('disable'))
else:
self.notify.debug('ignoring disable-%s' % self.toonInsideAvId)
self.ignore('disable-%s' % self.toonInsideAvId)
|
|
# Created by Pearu Peterson, June 2003
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, suppress_warnings)
from pytest import raises as assert_raises
from numpy import array, diff, linspace, meshgrid, ones, pi, shape
from scipy.interpolate.fitpack import bisplrep, bisplev
from scipy.interpolate.fitpack2 import (UnivariateSpline,
LSQUnivariateSpline, InterpolatedUnivariateSpline,
LSQBivariateSpline, SmoothBivariateSpline, RectBivariateSpline,
LSQSphereBivariateSpline, SmoothSphereBivariateSpline,
RectSphereBivariateSpline)
class TestUnivariateSpline:
def test_linear_constant(self):
x = [1,2,3]
y = [3,3,3]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[3,3])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2]),[3,3,3])
def test_preserve_shape(self):
x = [1, 2, 3]
y = [0, 2, 4]
lut = UnivariateSpline(x, y, k=1)
arg = 2
assert_equal(shape(arg), shape(lut(arg)))
assert_equal(shape(arg), shape(lut(arg, nu=1)))
arg = [1.5, 2, 2.5]
assert_equal(shape(arg), shape(lut(arg)))
assert_equal(shape(arg), shape(lut(arg, nu=1)))
def test_linear_1d(self):
x = [1,2,3]
y = [0,2,4]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[0,4])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2]),[0,1,2])
def test_subclassing(self):
# See #731
class ZeroSpline(UnivariateSpline):
def __call__(self, x):
return 0*array(x)
sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
assert_array_equal(sp([1.5, 2.5]), [0., 0.])
def test_empty_input(self):
# Test whether empty input returns an empty output. Ticket 1014
x = [1,3,5,7,9]
y = [0,4,9,12,21]
spl = UnivariateSpline(x, y, k=3)
assert_array_equal(spl([]), array([]))
def test_resize_regression(self):
"""Regression test for #1375."""
x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892,
-0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235,
0.65016502, 1.]
y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061,
0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223,
0.62928599, 1.]
w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02,
6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02,
6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02,
1.00000000e+12]
spl = UnivariateSpline(x=x, y=y, w=w, s=None)
desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344])
assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4)
def test_out_of_range_regression(self):
# Test different extrapolation modes. See ticket 3557
x = np.arange(5, dtype=float)
y = x**3
xp = linspace(-8, 13, 100)
xp_zeros = xp.copy()
xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0
xp_clip = xp.copy()
xp_clip[xp_clip < x[0]] = x[0]
xp_clip[xp_clip > x[-1]] = x[-1]
for cls in [UnivariateSpline, InterpolatedUnivariateSpline]:
spl = cls(x=x, y=y)
for ext in [0, 'extrapolate']:
assert_allclose(spl(xp, ext=ext), xp**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16)
for ext in [1, 'zeros']:
assert_allclose(spl(xp, ext=ext), xp_zeros**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16)
for ext in [2, 'raise']:
assert_raises(ValueError, spl, xp, **dict(ext=ext))
for ext in [3, 'const']:
assert_allclose(spl(xp, ext=ext), xp_clip**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp_clip**3, atol=1e-16)
# also test LSQUnivariateSpline [which needs explicit knots]
t = spl.get_knots()[3:4] # interior knots w/ default k=3
spl = LSQUnivariateSpline(x, y, t)
assert_allclose(spl(xp, ext=0), xp**3, atol=1e-16)
assert_allclose(spl(xp, ext=1), xp_zeros**3, atol=1e-16)
assert_raises(ValueError, spl, xp, **dict(ext=2))
assert_allclose(spl(xp, ext=3), xp_clip**3, atol=1e-16)
# also make sure that unknown values for `ext` are caught early
for ext in [-1, 'unknown']:
spl = UnivariateSpline(x, y)
assert_raises(ValueError, spl, xp, **dict(ext=ext))
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, ext=ext))
def test_lsq_fpchec(self):
xs = np.arange(100) * 1.
ys = np.arange(100) * 1.
knots = np.linspace(0, 99, 10)
bbox = (-1, 101)
assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots,
bbox=bbox)
def test_derivative_and_antiderivative(self):
# Thin wrappers to splder/splantider, so light smoke test only.
x = np.linspace(0, 1, 70)**3
y = np.cos(x)
spl = UnivariateSpline(x, y, s=0)
spl2 = spl.antiderivative(2).derivative(2)
assert_allclose(spl(0.3), spl2(0.3))
spl2 = spl.antiderivative(1)
assert_allclose(spl2(0.6) - spl2(0.2),
spl.integral(0.2, 0.6))
def test_derivative_extrapolation(self):
# Regression test for gh-10195: for a const-extrapolation spline
# its derivative evaluates to zero for extrapolation
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 5]
f = UnivariateSpline(x_values, y_values, ext='const', k=3)
x = [-1, 0, -0.5, 9, 9.5, 10]
assert_allclose(f.derivative()(x), 0, atol=1e-15)
def test_integral_out_of_bounds(self):
# Regression test for gh-7906: .integral(a, b) is wrong if both
# a and b are out-of-bounds
x = np.linspace(0., 1., 7)
for ext in range(4):
f = UnivariateSpline(x, x, s=0, ext=ext)
for (a, b) in [(1, 1), (1, 5), (2, 5),
(0, 0), (-2, 0), (-2, -1)]:
assert_allclose(f.integral(a, b), 0, atol=1e-15)
def test_nan(self):
# bail out early if the input data contains nans
x = np.arange(10, dtype=float)
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(x, y, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
y_end = y[-1]
for z in [np.nan, np.inf, -np.inf]:
y[-1] = z
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, check_finite=True))
y[-1] = y_end # check valid y but invalid w
w[-1] = z
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, w=w, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, w=w, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, w=w, check_finite=True))
def test_strictly_increasing_x(self):
# Test the x is required to be strictly increasing for
# UnivariateSpline if s=0 and for InterpolatedUnivariateSpline,
# but merely increasing for UnivariateSpline if s>0
# and for LSQUnivariateSpline; see gh-8535
xx = np.arange(10, dtype=float)
yy = xx**3
x = np.arange(10, dtype=float)
x[1] = x[0]
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(xx, yy, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
UnivariateSpline(x=x, y=y, w=w, s=1, check_finite=True)
LSQUnivariateSpline(x=x, y=y, t=t, w=w, check_finite=True)
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, s=0, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
def test_increasing_x(self):
# Test that x is required to be increasing, see gh-8535
xx = np.arange(10, dtype=float)
yy = xx**3
x = np.arange(10, dtype=float)
x[1] = x[0] - 1.0
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(xx, yy, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, w=w, check_finite=True))
def test_invalid_input_for_univariate_spline(self):
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5]
UnivariateSpline(x_values, y_values)
assert "x and y should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
w_values = [-1.0, 1.0, 1.0, 1.0]
UnivariateSpline(x_values, y_values, w=w_values)
assert "x, y, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-1)
UnivariateSpline(x_values, y_values, bbox=bbox)
assert "bbox shape should be (2,)" in str(info.value)
with assert_raises(ValueError) as info:
UnivariateSpline(x_values, y_values, k=6)
assert "k should be 1 <= k <= 5" in str(info.value)
with assert_raises(ValueError) as info:
UnivariateSpline(x_values, y_values, s=-1.0)
assert "s should be s >= 0.0" in str(info.value)
def test_invalid_input_for_interpolated_univariate_spline(self):
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5]
InterpolatedUnivariateSpline(x_values, y_values)
assert "x and y should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
w_values = [-1.0, 1.0, 1.0, 1.0]
InterpolatedUnivariateSpline(x_values, y_values, w=w_values)
assert "x, y, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-1)
InterpolatedUnivariateSpline(x_values, y_values, bbox=bbox)
assert "bbox shape should be (2,)" in str(info.value)
with assert_raises(ValueError) as info:
InterpolatedUnivariateSpline(x_values, y_values, k=6)
assert "k should be 1 <= k <= 5" in str(info.value)
def test_invalid_input_for_lsq_univariate_spline(self):
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
spl = UnivariateSpline(x_values, y_values, check_finite=True)
t_values = spl.get_knots()[3:4] # interior knots w/ default k=3
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5]
LSQUnivariateSpline(x_values, y_values, t_values)
assert "x and y should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
w_values = [1.0, 1.0, 1.0, 1.0]
LSQUnivariateSpline(x_values, y_values, t_values, w=w_values)
assert "x, y, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (100, -100)
LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
assert "Interior knots t must satisfy Schoenberg-Whitney conditions" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-1)
LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
assert "bbox shape should be (2,)" in str(info.value)
with assert_raises(ValueError) as info:
LSQUnivariateSpline(x_values, y_values, t_values, k=6)
assert "k should be 1 <= k <= 5" in str(info.value)
def test_array_like_input(self):
x_values = np.array([1, 2, 4, 6, 8.5])
y_values = np.array([0.5, 0.8, 1.3, 2.5, 2.8])
w_values = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
bbox = np.array([-100, 100])
# np.array input
spl1 = UnivariateSpline(x=x_values, y=y_values, w=w_values,
bbox=bbox)
# list input
spl2 = UnivariateSpline(x=x_values.tolist(), y=y_values.tolist(),
w=w_values.tolist(), bbox=bbox.tolist())
assert_allclose(spl1([0.1, 0.5, 0.9, 0.99]),
spl2([0.1, 0.5, 0.9, 0.99]))
class TestLSQBivariateSpline:
# NOTE: The systems in this test class are rank-deficient
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
assert_equal(len(r), 1)
assert_almost_equal(lut(2,2), 3.)
def test_bilinearity(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,7,8,3,4,7,1,3,4]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
# This seems to fail (ier=1, see ticket 1642).
sup.filter(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
tx, ty = lut.get_knots()
for xa, xb in zip(tx[:-1], tx[1:]):
for ya, yb in zip(ty[:-1], ty[1:]):
for t in [0.1, 0.5, 0.9]:
for s in [0.3, 0.4, 0.7]:
xp = xa*(1-t) + xb*t
yp = ya*(1-s) + yb*s
zp = (+ lut(xa, ya)*(1-t)*(1-s)
+ lut(xb, ya)*t*(1-s)
+ lut(xa, yb)*(1-t)*s
+ lut(xb, yb)*t*s)
assert_almost_equal(lut(xp,yp), zp)
def test_integral(self):
x = [1,1,1,2,2,2,8,8,8]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
assert_equal(len(r), 1)
tx, ty = lut.get_knots()
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]),
trpz)
def test_empty_input(self):
# Test whether empty inputs returns an empty output. Ticket 1014
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
assert_equal(len(r), 1)
assert_array_equal(lut([], []), np.zeros((0,0)))
assert_array_equal(lut([], [], grid=False), np.zeros((0,)))
def test_invalid_input(self):
s = 0.1
tx = [1 + s, 3 - s]
ty = [1 + s, 3 - s]
with assert_raises(ValueError) as info:
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0, num=10)
LSQBivariateSpline(x, y, z, tx, ty)
assert "x, y, and z should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0)
w = np.linspace(1.0, 10.0, num=20)
LSQBivariateSpline(x, y, z, tx, ty, w=w)
assert "x, y, z, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
w = np.linspace(-1.0, 10.0)
LSQBivariateSpline(x, y, z, tx, ty, w=w)
assert "w should be positive" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-100, 100, -100)
LSQBivariateSpline(x, y, z, tx, ty, bbox=bbox)
assert "bbox shape should be (4,)" in str(info.value)
with assert_raises(ValueError) as info:
LSQBivariateSpline(x, y, z, tx, ty, kx=10, ky=10)
assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in \
str(info.value)
with assert_raises(ValueError) as exc_info:
LSQBivariateSpline(x, y, z, tx, ty, eps=0.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
LSQBivariateSpline(x, y, z, tx, ty, eps=1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
def test_array_like_input(self):
s = 0.1
tx = np.array([1 + s, 3 - s])
ty = np.array([1 + s, 3 - s])
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0)
w = np.linspace(1.0, 10.0)
bbox = np.array([1.0, 10.0, 1.0, 10.0])
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
# np.array input
spl1 = LSQBivariateSpline(x, y, z, tx, ty, w=w, bbox=bbox)
# list input
spl2 = LSQBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
tx.tolist(), ty.tolist(), w=w.tolist(),
bbox=bbox)
assert_allclose(spl1(2.0, 2.0), spl2(2.0, 2.0))
assert_equal(len(r), 2)
def test_unequal_length_of_knots(self):
"""Test for the case when the input knot-location arrays in x and y are
of different lengths.
"""
x, y = np.mgrid[0:100, 0:100]
x = x.ravel()
y = y.ravel()
z = 3.0 * np.ones_like(x)
tx = np.linspace(0.1, 98.0, 29)
ty = np.linspace(0.1, 98.0, 33)
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x,y,z,tx,ty)
assert_equal(len(r), 1)
assert_almost_equal(lut(x, y, grid=False), z)
class TestSmoothBivariateSpline:
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]])
def test_linear_1d(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,0,0,2,2,2,4,4,4]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
def test_integral(self):
x = [1,1,1,2,2,2,4,4,4]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
with suppress_warnings() as sup:
# This seems to fail (ier=1, see ticket 1642).
sup.filter(UserWarning, "\nThe required storage space")
lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0)
tx = [1,2,4]
ty = [1,2,3]
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0)
assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz,
decimal=0) # the quadratures give 23.75 and 23.85
tz = lut(tx[:-1], ty[:-1])
trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
def test_rerun_lwrk2_too_small(self):
# in this setting, lwrk2 is too small in the default run. Here we
# check for equality with the bisplrep/bisplev output because there,
# an automatic re-run of the spline representation is done if ier>10.
x = np.linspace(-2, 2, 80)
y = np.linspace(-2, 2, 80)
z = x + y
xi = np.linspace(-1, 1, 100)
yi = np.linspace(-2, 2, 100)
tck = bisplrep(x, y, z)
res1 = bisplev(xi, yi, tck)
interp_ = SmoothBivariateSpline(x, y, z)
res2 = interp_(xi, yi)
assert_almost_equal(res1, res2)
def test_invalid_input(self):
with assert_raises(ValueError) as info:
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0, num=10)
SmoothBivariateSpline(x, y, z)
assert "x, y, and z should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0)
w = np.linspace(1.0, 10.0, num=20)
SmoothBivariateSpline(x, y, z, w=w)
assert "x, y, z, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
w = np.linspace(-1.0, 10.0)
SmoothBivariateSpline(x, y, z, w=w)
assert "w should be positive" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-100, 100, -100)
SmoothBivariateSpline(x, y, z, bbox=bbox)
assert "bbox shape should be (4,)" in str(info.value)
with assert_raises(ValueError) as info:
SmoothBivariateSpline(x, y, z, kx=10, ky=10)
assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in\
str(info.value)
with assert_raises(ValueError) as info:
SmoothBivariateSpline(x, y, z, s=-1.0)
assert "s should be s >= 0.0" in str(info.value)
with assert_raises(ValueError) as exc_info:
SmoothBivariateSpline(x, y, z, eps=0.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
SmoothBivariateSpline(x, y, z, eps=1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
def test_array_like_input(self):
x = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
y = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
z = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
w = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
bbox = np.array([1.0, 3.0, 1.0, 3.0])
# np.array input
spl1 = SmoothBivariateSpline(x, y, z, w=w, bbox=bbox, kx=1, ky=1)
# list input
spl2 = SmoothBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
bbox=bbox.tolist(), w=w.tolist(),
kx=1, ky=1)
assert_allclose(spl1(0.1, 0.5), spl2(0.1, 0.5))
class TestLSQSphereBivariateSpline:
def setup_method(self):
# define the input data and coordinates
ntheta, nphi = 70, 90
theta = linspace(0.5/(ntheta - 1), 1 - 0.5/(ntheta - 1), ntheta) * pi
phi = linspace(0.5/(nphi - 1), 1 - 0.5/(nphi - 1), nphi) * 2. * pi
data = ones((theta.shape[0], phi.shape[0]))
# define knots and extract data values at the knots
knotst = theta[::5]
knotsp = phi[::5]
knotdata = data[::5, ::5]
# calculate spline coefficients
lats, lons = meshgrid(theta, phi)
lut_lsq = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp)
self.lut_lsq = lut_lsq
self.data = knotdata
self.new_lons, self.new_lats = knotsp, knotst
def test_linear_constant(self):
assert_almost_equal(self.lut_lsq.get_residual(), 0.0)
assert_array_almost_equal(self.lut_lsq(self.new_lats, self.new_lons),
self.data)
def test_empty_input(self):
assert_array_almost_equal(self.lut_lsq([], []), np.zeros((0,0)))
assert_array_almost_equal(self.lut_lsq([], [], grid=False), np.zeros((0,)))
def test_invalid_input(self):
ntheta, nphi = 70, 90
theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
ntheta) * pi
phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1), nphi) * 2. * pi
data = ones((theta.shape[0], phi.shape[0]))
# define knots and extract data values at the knots
knotst = theta[::5]
knotsp = phi[::5]
with assert_raises(ValueError) as exc_info:
invalid_theta = linspace(-0.1, 1.0, num=ntheta) * pi
invalid_lats, lons = meshgrid(invalid_theta, phi)
LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp)
assert "theta should be between [0, pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_theta = linspace(0.1, 1.1, num=ntheta) * pi
invalid_lats, lons = meshgrid(invalid_theta, phi)
LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp)
assert "theta should be between [0, pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_phi = linspace(-0.1, 1.0, num=ntheta) * 2.0 * pi
lats, invalid_lons = meshgrid(theta, invalid_phi)
LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
data.T.ravel(), knotst, knotsp)
assert "phi should be between [0, 2pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_phi = linspace(0.0, 1.1, num=ntheta) * 2.0 * pi
lats, invalid_lons = meshgrid(theta, invalid_phi)
LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
data.T.ravel(), knotst, knotsp)
assert "phi should be between [0, 2pi]" in str(exc_info.value)
lats, lons = meshgrid(theta, phi)
with assert_raises(ValueError) as exc_info:
invalid_knotst = np.copy(knotst)
invalid_knotst[0] = -0.1
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), invalid_knotst, knotsp)
assert "tt should be between (0, pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_knotst = np.copy(knotst)
invalid_knotst[0] = pi
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), invalid_knotst, knotsp)
assert "tt should be between (0, pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_knotsp = np.copy(knotsp)
invalid_knotsp[0] = -0.1
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, invalid_knotsp)
assert "tp should be between (0, 2pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_knotsp = np.copy(knotsp)
invalid_knotsp[0] = 2 * pi
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, invalid_knotsp)
assert "tp should be between (0, 2pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
knotst, knotsp, w=invalid_w)
assert "w should be positive" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
knotst, knotsp, eps=0.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
knotst, knotsp, eps=1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
def test_array_like_input(self):
ntheta, nphi = 70, 90
theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
ntheta) * pi
phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1),
nphi) * 2. * pi
lats, lons = meshgrid(theta, phi)
data = ones((theta.shape[0], phi.shape[0]))
# define knots and extract data values at the knots
knotst = theta[::5]
knotsp = phi[::5]
w = ones((lats.ravel().shape[0]))
# np.array input
spl1 = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp, w=w)
# list input
spl2 = LSQSphereBivariateSpline(lats.ravel().tolist(),
lons.ravel().tolist(),
data.T.ravel().tolist(),
knotst.tolist(),
knotsp.tolist(), w=w.tolist())
assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
class TestSmoothSphereBivariateSpline:
def setup_method(self):
theta = array([.25*pi, .25*pi, .25*pi, .5*pi, .5*pi, .5*pi, .75*pi,
.75*pi, .75*pi])
phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
1.5 * pi])
r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
self.lut = SmoothSphereBivariateSpline(theta, phi, r, s=1E10)
def test_linear_constant(self):
assert_almost_equal(self.lut.get_residual(), 0.)
assert_array_almost_equal(self.lut([1, 1.5, 2],[1, 1.5]),
[[3, 3], [3, 3], [3, 3]])
def test_empty_input(self):
assert_array_almost_equal(self.lut([], []), np.zeros((0,0)))
assert_array_almost_equal(self.lut([], [], grid=False), np.zeros((0,)))
def test_invalid_input(self):
theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi, .5 * pi,
.75 * pi, .75 * pi, .75 * pi])
phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
1.5 * pi])
r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
with assert_raises(ValueError) as exc_info:
invalid_theta = array([-0.1 * pi, .25 * pi, .25 * pi, .5 * pi,
.5 * pi, .5 * pi, .75 * pi, .75 * pi,
.75 * pi])
SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
assert "theta should be between [0, pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi,
.5 * pi, .5 * pi, .75 * pi, .75 * pi,
1.1 * pi])
SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
assert "theta should be between [0, pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_phi = array([-.1 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
.5 * pi, pi, 1.5 * pi])
SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
assert "phi should be between [0, 2pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_phi = array([1.0 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
.5 * pi, pi, 2.1 * pi])
SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
assert "phi should be between [0, 2pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
SmoothSphereBivariateSpline(theta, phi, r, w=invalid_w, s=1E10)
assert "w should be positive" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
SmoothSphereBivariateSpline(theta, phi, r, s=-1.0)
assert "s should be positive" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
SmoothSphereBivariateSpline(theta, phi, r, eps=-1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
SmoothSphereBivariateSpline(theta, phi, r, eps=1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
def test_array_like_input(self):
theta = np.array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi,
.5 * pi, .75 * pi, .75 * pi, .75 * pi])
phi = np.array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi,
pi, 1.5 * pi])
r = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
w = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# np.array input
spl1 = SmoothSphereBivariateSpline(theta, phi, r, w=w, s=1E10)
# list input
spl2 = SmoothSphereBivariateSpline(theta.tolist(), phi.tolist(),
r.tolist(), w=w.tolist(), s=1E10)
assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
class TestRectBivariateSpline:
def test_defaults(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3]
yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0],
[0,0,-11,0,0],[0,0,4,0,0]])/6.
dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0],
[2,.25,0,-.25,-2],[4,-1,0,1,-4]])
dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26],
[-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1),dx)
assert_array_almost_equal(lut(x,y,dy=1),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy)
def test_derivatives(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([0,0,2./3,0,0])
dy = array([4,-1,0,-.25,-4])
dxdy = array([160,65,0,55,32])/24.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx)
assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy)
def test_broadcast(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_allclose(lut(x, y), lut(x[:,None], y[None,:], grid=False))
def test_invalid_input(self):
with assert_raises(ValueError) as info:
x = array([6, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
RectBivariateSpline(x, y, z)
assert "x must be strictly increasing" in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([2, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
RectBivariateSpline(x, y, z)
assert "y must be strictly increasing" in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1]])
RectBivariateSpline(x, y, z)
assert "x dimension of z must have same number of elements as x"\
in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 3, 2],
[1, 2, 2, 2], [1, 2, 1, 2]])
RectBivariateSpline(x, y, z)
assert "y dimension of z must have same number of elements as y"\
in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
bbox = (-100, 100, -100)
RectBivariateSpline(x, y, z, bbox=bbox)
assert "bbox shape should be (4,)" in str(info.value)
with assert_raises(ValueError) as info:
RectBivariateSpline(x, y, z, s=-1.0)
assert "s should be s >= 0.0" in str(info.value)
def test_array_like_input(self):
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
bbox = array([1, 5, 1, 5])
spl1 = RectBivariateSpline(x, y, z, bbox=bbox)
spl2 = RectBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
bbox=bbox.tolist())
assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
def test_not_increasing_input(self):
# gh-8565
NSamp = 20
Theta = np.random.uniform(0, np.pi, NSamp)
Phi = np.random.uniform(0, 2 * np.pi, NSamp)
Data = np.ones(NSamp)
Interpolator = SmoothSphereBivariateSpline(Theta, Phi, Data, s=3.5)
NLon = 6
NLat = 3
GridPosLats = np.arange(NLat) / NLat * np.pi
GridPosLons = np.arange(NLon) / NLon * 2 * np.pi
# No error
Interpolator(GridPosLats, GridPosLons)
nonGridPosLats = GridPosLats.copy()
nonGridPosLats[2] = 0.001
with assert_raises(ValueError) as exc_info:
Interpolator(nonGridPosLats, GridPosLons)
assert "x must be strictly increasing" in str(exc_info.value)
nonGridPosLons = GridPosLons.copy()
nonGridPosLons[2] = 0.001
with assert_raises(ValueError) as exc_info:
Interpolator(GridPosLats, nonGridPosLons)
assert "y must be strictly increasing" in str(exc_info.value)
class TestRectSphereBivariateSpline:
def test_defaults(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
yi = [0.2, 1, 2.3, 2.35, 3.0, 3.99, 5.25]
xi = [1.5, 0.4, 1.1, 0.45, 0.2345, 1., 0.0001]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
y = linspace(0.02, 2*pi-0.02, 7)
x = linspace(0.02, pi-0.02, 7)
assert_allclose(lut(x, y, dtheta=1), _numdiff_2d(lut, x, y, dx=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dphi=1), _numdiff_2d(lut, x, y, dy=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dtheta=1, dphi=1), _numdiff_2d(lut, x, y, dx=1, dy=1, eps=1e-6),
rtol=1e-3, atol=1e-3)
def test_derivatives(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
y = linspace(0.02, 2*pi-0.02, 7)
x = linspace(0.02, pi-0.02, 7)
assert_equal(lut(x, y, dtheta=1, grid=False).shape, x.shape)
assert_allclose(lut(x, y, dtheta=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dphi=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dy=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dtheta=1, dphi=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1, dy=1, eps=1e-6),
rtol=1e-3, atol=1e-3)
def test_invalid_input(self):
data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
with assert_raises(ValueError) as exc_info:
lats = np.linspace(0, 170, 9) * np.pi / 180.
lons = np.linspace(0, 350, 18) * np.pi / 180.
RectSphereBivariateSpline(lats, lons, data)
assert "u should be between (0, pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
lats = np.linspace(10, 180, 9) * np.pi / 180.
lons = np.linspace(0, 350, 18) * np.pi / 180.
RectSphereBivariateSpline(lats, lons, data)
assert "u should be between (0, pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
lats = np.linspace(10, 170, 9) * np.pi / 180.
lons = np.linspace(-181, 10, 18) * np.pi / 180.
RectSphereBivariateSpline(lats, lons, data)
assert "v[0] should be between [-pi, pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
lats = np.linspace(10, 170, 9) * np.pi / 180.
lons = np.linspace(-10, 360, 18) * np.pi / 180.
RectSphereBivariateSpline(lats, lons, data)
assert "v[-1] should be v[0] + 2pi or less" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
lats = np.linspace(10, 170, 9) * np.pi / 180.
lons = np.linspace(10, 350, 18) * np.pi / 180.
RectSphereBivariateSpline(lats, lons, data, s=-1)
assert "s should be positive" in str(exc_info.value)
def test_array_like_input(self):
y = linspace(0.01, 2 * pi - 0.01, 7)
x = linspace(0.01, pi - 0.01, 7)
z = array([[1, 2, 1, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
[1, 2, 3, 2, 1, 2, 1],
[1, 2, 2, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
[1, 2, 2, 2, 1, 2, 1],
[1, 2, 1, 2, 1, 2, 1]])
# np.array input
spl1 = RectSphereBivariateSpline(x, y, z)
# list input
spl2 = RectSphereBivariateSpline(x.tolist(), y.tolist(), z.tolist())
assert_array_almost_equal(spl1(x, y), spl2(x, y))
def test_negative_evaluation(self):
lats = np.array([25, 30, 35, 40, 45])
lons = np.array([-90, -85, -80, -75, 70])
mesh = np.meshgrid(lats, lons)
data = mesh[0] + mesh[1] # lon + lat value
lat_r = np.radians(lats)
lon_r = np.radians(lons)
interpolator = RectSphereBivariateSpline(lat_r, lon_r, data)
query_lat = np.radians(np.array([35, 37.5]))
query_lon = np.radians(np.array([-80, -77.5]))
data_interp = interpolator(query_lat, query_lon)
ans = np.array([[-45.0, -42.480862],
[-49.0625, -46.54315]])
assert_array_almost_equal(data_interp, ans)
def _numdiff_2d(func, x, y, dx=0, dy=0, eps=1e-8):
if dx == 0 and dy == 0:
return func(x, y)
elif dx == 1 and dy == 0:
return (func(x + eps, y) - func(x - eps, y)) / (2*eps)
elif dx == 0 and dy == 1:
return (func(x, y + eps) - func(x, y - eps)) / (2*eps)
elif dx == 1 and dy == 1:
return (func(x + eps, y + eps) - func(x - eps, y + eps)
- func(x + eps, y - eps) + func(x - eps, y - eps)) / (2*eps)**2
else:
raise ValueError("invalid derivative order")
|
|
#!/usr/bin/env python
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Help include git hash in tensorflow bazel build.
This creates symlinks from the internal git repository directory so
that the build system can see changes in the version state. We also
remember what branch git was on so when the branch changes we can
detect that the ref file is no longer correct (so we can suggest users
run ./configure again).
NOTE: this script is only used in opensource.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import subprocess
import shutil
def parse_branch_ref(filename):
"""Given a filename of a .git/HEAD file return ref path.
In particular, if git is in detached head state, this will
return None. If git is in attached head, it will return
the branch reference. E.g. if on 'master', the HEAD will
contain 'ref: refs/heads/master' so 'refs/heads/master'
will be returned.
Example: parse_branch_ref(".git/HEAD")
Args:
filename: file to treat as a git HEAD file
Returns:
None if detached head, otherwise ref subpath
Raises:
RuntimeError: if the HEAD file is unparseable.
"""
data = open(filename).read().strip()
items = data.split(" ")
if len(items) == 1:
return None
elif len(items) == 2 and items[0] == "ref:":
return items[1].strip()
else:
raise RuntimeError("Git directory has unparseable HEAD")
def configure(src_base_path, gen_path, debug=False):
"""Configure `src_base_path` to embed git hashes if available."""
# TODO(aselle): No files generated or symlinked here are deleted by
# the build system. I don't know of a way to do it in bazel. It
# should only be a problem if somebody moves a sandbox directory
# without running ./configure again.
git_path = os.path.join(src_base_path, ".git")
# Remove and recreate the path
if os.path.exists(gen_path):
if os.path.isdir(gen_path):
try:
shutil.rmtree(gen_path)
except OSError:
raise RuntimeError("Cannot delete directory %s due to permission "
"error, inspect and remove manually" % gen_path)
else:
raise RuntimeError("Cannot delete non-directory %s, inspect ",
"and remove manually" % gen_path)
os.makedirs(gen_path)
if not os.path.isdir(gen_path):
raise RuntimeError("gen_git_source.py: Failed to create dir")
# file that specifies what the state of the git repo is
spec = {}
# value file names will be mapped to the keys
link_map = {"head": None, "branch_ref": None}
if not os.path.isdir(git_path):
# No git directory
spec["git"] = False
open(os.path.join(gen_path, "head"), "w").write("")
open(os.path.join(gen_path, "branch_ref"), "w").write("")
else:
# Git directory, possibly detached or attached
spec["git"] = True
spec["path"] = src_base_path
git_head_path = os.path.join(git_path, "HEAD")
spec["branch"] = parse_branch_ref(git_head_path)
link_map["head"] = git_head_path
if spec["branch"] is not None:
# attached method
link_map["branch_ref"] = os.path.join(git_path, *
os.path.split(spec["branch"]))
# Create symlinks or dummy files
for target, src in link_map.items():
if src is None:
open(os.path.join(gen_path, target), "w").write("")
elif not os.path.exists(src):
# Git repo is configured in a way we don't support such as having
# packed refs. Even though in a git repo, tf.__git_version__ will not
# be accurate.
# TODO(mikecase): Support grabbing git info when using packed refs.
open(os.path.join(gen_path, target), "w").write("")
spec["git"] = False
else:
try:
# In python 3.5, symlink function exists even on Windows. But requires
# Windows Admin privileges, otherwise an OSError will be thrown.
if hasattr(os, 'symlink'):
os.symlink(src, os.path.join(gen_path, target))
else:
shutil.copy2(src, os.path.join(gen_path, target))
except OSError:
shutil.copy2(src, os.path.join(gen_path, target))
json.dump(spec, open(os.path.join(gen_path, "spec.json"), "w"), indent=2)
if debug:
print("gen_git_source.py: list %s" % gen_path)
print("gen_git_source.py: %s" + repr(os.listdir(gen_path)))
print("gen_git_source.py: spec is %r" % spec)
def get_git_version(git_base_path):
"""Get the git version from the repository.
This function runs `git describe ...` in the path given as `git_base_path`.
This will return a string of the form:
<base-tag>-<number of commits since tag>-<shortened sha hash>
For example, 'v0.10.0-1585-gbb717a6' means v0.10.0 was the last tag when
compiled. 1585 commits are after that commit tag, and we can get back to this
version by running `git checkout gbb717a6`.
Args:
git_base_path: where the .git directory is located
Returns:
A bytestring representing the git version
"""
unknown_label = b"unknown"
try:
val = bytes(subprocess.check_output([
"git", str("--git-dir=%s/.git" % git_base_path),
str("--work-tree=" + git_base_path), "describe", "--long", "--tags"
]).strip())
return val if val else unknown_label
except subprocess.CalledProcessError:
return unknown_label
def write_version_info(filename, git_version):
"""Write a c file that defines the version functions.
Args:
filename: filename to write to.
git_version: the result of a git describe.
"""
if b"\"" in git_version or b"\\" in git_version:
git_version = "git_version_is_invalid" # do not cause build to fail!
contents = """/* Generated by gen_git_source.py */
#include <string>
const char* tf_git_version() {return "%s";}
const char* tf_compiler_version() {return __VERSION__;}
const int tf_cxx11_abi_flag() {
#ifdef _GLIBCXX_USE_CXX11_ABI
return _GLIBCXX_USE_CXX11_ABI;
#else
return 0;
#endif
}
const int tf_monolithic_build() {
#ifdef TENSORFLOW_MONOLITHIC_BUILD
return 1;
#else
return 0;
#endif
}
""" % git_version
open(filename, "w").write(contents)
def generate(arglist):
"""Generate version_info.cc as given `destination_file`.
Args:
arglist: should be a sequence that contains
spec, head_symlink, ref_symlink, destination_file.
`destination_file` is the filename where version_info.cc will be written
`spec` is a filename where the file contains a JSON dictionary
'git' bool that is true if the source is in a git repo
'path' base path of the source code
'branch' the name of the ref specification of the current branch/tag
`head_symlink` is a filename to HEAD that is cross-referenced against
what is contained in the json branch designation.
`ref_symlink` is unused in this script but passed, because the build
system uses that file to detect when commits happen.
Raises:
RuntimeError: If ./configure needs to be run, RuntimeError will be raised.
"""
# unused ref_symlink arg
spec, head_symlink, _, dest_file = arglist
data = json.load(open(spec))
git_version = None
if not data["git"]:
git_version = b"unknown"
else:
old_branch = data["branch"]
new_branch = parse_branch_ref(head_symlink)
if new_branch != old_branch:
raise RuntimeError(
"Run ./configure again, branch was '%s' but is now '%s'" %
(old_branch, new_branch))
git_version = get_git_version(data["path"])
write_version_info(dest_file, git_version)
def raw_generate(output_file):
"""Simple generator used for cmake/make build systems.
This does not create any symlinks. It requires the build system
to build unconditionally.
Args:
output_file: Output filename for the version info cc
"""
git_version = get_git_version(".")
write_version_info(output_file, git_version)
parser = argparse.ArgumentParser(description="""Git hash injection into bazel.
If used with --configure <path> will search for git directory and put symlinks
into source so that a bazel genrule can call --generate""")
parser.add_argument(
"--debug",
type=bool,
help="print debugging information about paths",
default=False)
parser.add_argument(
"--configure", type=str,
help="Path to configure as a git repo dependency tracking sentinel")
parser.add_argument(
"--gen_root_path", type=str,
help="Root path to place generated git files (created by --configure).")
parser.add_argument(
"--generate",
type=str,
help="Generate given spec-file, HEAD-symlink-file, ref-symlink-file",
nargs="+")
parser.add_argument(
"--raw_generate",
type=str,
help="Generate version_info.cc (simpler version used for cmake/make)")
args = parser.parse_args()
if args.configure is not None:
if args.gen_root_path is None:
raise RuntimeError("Must pass --gen_root_path arg when running --configure")
configure(args.configure, args.gen_root_path, debug=args.debug)
elif args.generate is not None:
generate(args.generate)
elif args.raw_generate is not None:
raw_generate(args.raw_generate)
else:
raise RuntimeError("--configure or --generate or --raw_generate "
"must be used")
|
|
import hashlib
import os
from tempfile import NamedTemporaryFile
import numpy as np
from cnntools.caffefileproc import parse_model_definition_file
from cnntools.common_utils import ensuredir, safe_save_content
from cnntools.models import CaffeCNNSnapshot, CaffeCNNTrainingRun
from cnntools.utils import (add_caffe_to_path, get_file_content,
named_file_from_content)
from django.conf import settings
def upload_snapshot(caffe_cnn_trrun_id, snapshot_path, it, verbose=True):
if verbose:
print 'Uploading snapshot {}...'.format(snapshot_path)
snapshot_content = open(snapshot_path).read()
sha1 = hashlib.sha1(snapshot_content).hexdigest()
training_run = CaffeCNNTrainingRun.objects.get(id=caffe_cnn_trrun_id)
# Training run ID + iteration number uniquely identifies the snapshot
name = '{}-trrun{}-it{}'.format(
training_run.net.netid,
caffe_cnn_trrun_id,
it,
)
return CaffeCNNSnapshot.objects.create(
training_run_id=caffe_cnn_trrun_id,
iteration=it,
sha1=sha1,
model_snapshot=named_file_from_content(snapshot_content, name)
)
def download_snapshot(snapshot_id, transfer):
snapshot = CaffeCNNSnapshot.objects.get(id=snapshot_id)
snapshot_dir = os.path.join(
'snapshots',
str(snapshot_id),
)
snapshot_dirpath = os.path.join(settings.CAFFE_ROOT, snapshot_dir)
ensuredir(snapshot_dirpath)
weights_relpath = os.path.join(
snapshot_dir,
'snapshot.caffemodel'
)
deployfile_relpath = os.path.join(
snapshot_dir,
'deploy.prototxt'
)
trainfile_relpath = os.path.join(
snapshot_dir,
'train_val.prototxt'
)
weights_path = os.path.join(settings.CAFFE_ROOT, weights_relpath)
if not os.path.exists(weights_path):
print 'Downloading snapshot {}...'.format(weights_path)
snapshot_content = get_file_content(snapshot.model_snapshot)
sha1 = hashlib.sha1(snapshot_content).hexdigest()
if sha1 != snapshot.sha1:
raise ValueError(
'The SHA1 digest of the downloaded snapshot '
'doesn\'t match the uploaded one'
)
safe_save_content(snapshot_dirpath, weights_path, snapshot_content)
deployfile_path = os.path.join(settings.CAFFE_ROOT, deployfile_relpath)
if not os.path.exists(deployfile_path):
safe_save_content(
snapshot_dirpath, deployfile_path,
snapshot.training_run.get_deploy_file_content()
)
trainfile_path = os.path.join(settings.CAFFE_ROOT, trainfile_relpath)
if not os.path.exists(trainfile_path):
safe_save_content(
snapshot_dirpath, trainfile_path,
snapshot.training_run.get_model_file_content()
)
if transfer:
# Transfer the weights so they are compatible with the fully
# convolutional network
transferred_weights_relpath = os.path.join(
snapshot_dir,
'snapshot-conv.caffemodel'
)
if not os.path.exists(os.path.join(settings.CAFFE_ROOT, transferred_weights_relpath)):
transfer_weights(
temp_dir=snapshot_dirpath,
deployfile_source_path=os.path.join(settings.CAFFE_ROOT, trainfile_relpath),
weights_source_path=os.path.join(settings.CAFFE_ROOT, weights_relpath),
deployfile_target_path=os.path.join(settings.CAFFE_ROOT, deployfile_relpath),
weights_target_path=os.path.join(settings.CAFFE_ROOT, transferred_weights_relpath),
verbose=True,
)
else:
transferred_weights_relpath = weights_relpath
return deployfile_relpath, transferred_weights_relpath
def get_fc_layers(deployfile_source):
layers = deployfile_source.layer
fc_type = 'InnerProduct'
return [
layer.name
for layer in layers
if layer.type == fc_type
]
def convolutionize_net(deployfile_source):
fc_layers = get_fc_layers(deployfile_source)
print 'Found fully connected layers:', fc_layers
from caffe.proto import caffe_pb2
deployfile_target = caffe_pb2.NetParameter()
deployfile_target.CopyFrom(deployfile_source)
deployfile_target.ClearField('layer')
param_mapping = {}
for layer in deployfile_source.layer:
layer_target = deployfile_target.layer.add()
layer_target.CopyFrom(layer)
if layer.name in fc_layers:
layer_target.name = layer_target.name + '-conv'
layer_target.type = 'Convolution'
param_mapping[layer_target.name] = layer.name
# TODO: Compute proper convolution size....
return deployfile_target, param_mapping
def print_net_weight_stats_file(deployfile_path, weights_path):
add_caffe_to_path()
import caffe
net = caffe.Net(deployfile_path, weights_path, caffe.TEST)
print_net_weight_stats(net)
def print_net_weight_stats(net):
print 'Net weights stats:'
for layer_name, weights in net.params.iteritems():
print layer_name
print '\tweights mean: %f +/- %f' % (np.mean(weights[0].data), np.std(weights[0].data))
print '\tbias mean: %f +/- %f' % (np.mean(weights[1].data), np.std(weights[1].data))
def transfer_weights(temp_dir, deployfile_source_path, weights_source_path,
deployfile_target_path, weights_target_path,
verbose=True):
deployfile_source = parse_model_definition_file(deployfile_source_path)
# Modify deploy file
deployfile_target, param_mapping = convolutionize_net(deployfile_source)
#save_protobuf_file(deployfile_target_path, deployfile_target)
add_caffe_to_path()
import caffe
net_source = caffe.Net(
deployfile_source_path,
weights_source_path,
caffe.TEST
)
net_target = caffe.Net(
deployfile_target_path,
weights_source_path,
caffe.TEST
)
for t, s in param_mapping.iteritems():
if t not in net_target.params:
print 'WARNING: Couldn\'t find "%s" layer in the target model definition file, skipping...' % t
continue
for blob_idx in (0, 1):
if verbose:
print '%s %s %s <-- %s %s %s' % (
t, blob_idx, net_target.params[t][blob_idx].data.shape,
s, blob_idx, net_source.params[s][blob_idx].data.shape,
)
net_target.params[t][blob_idx].data[...] = (
np.reshape(
net_source.params[s][blob_idx].data,
net_target.params[t][blob_idx].data.shape
)
)
if verbose:
print_net_weight_stats(net_target)
# Download to a temp location, so there is not collision if multiple
# processes are trying to download the same file
f_temp = NamedTemporaryFile(dir=temp_dir, delete=False)
f_temp.close()
# Use the temp file's name
net_target.save(f_temp.name)
# Move it to the final destination in an atomic operation
os.rename(f_temp.name, weights_target_path)
def load_net_from_snapshot(snapshot_id):
deployfile_relpath, weights_relpath = download_snapshot(
snapshot_id=snapshot_id,
transfer=False,
)
add_caffe_to_path()
import caffe
deployfile_path = os.path.join(settings.CAFFE_ROOT, deployfile_relpath)
weights_path = os.path.join(settings.CAFFE_ROOT, weights_relpath)
return caffe.Net(deployfile_path, weights_path, caffe.TEST)
|
|
# ===========================================================================
import swap
import numpy as np
import pylab as plt
from subject import Ntrajectory
# ======================================================================
class Collection(object):
"""
NAME
Collection
PURPOSE
Model a collection of subjects.
COMMENTS
All subjects in a Collection are all Zooniverse subjects.
INITIALISATION
From scratch.
METHODS
Collection.member(Name) Returns the Subject called Name
Collection.size() Returns the size of the Collection
Collection.list() Returns the IDs of the members
BUGS
AUTHORS
This file is part of the Space Warps project, and is distributed
under the MIT license by the Space Warps Science Team.
http://spacewarps.org/
2013-04-17 Started: Marshall (Oxford)
"""
# ----------------------------------------------------------------------------
def __init__(self):
self.member = {}
self.probabilities = {'sim':np.array([]), 'dud':np.array([]), 'test':np.array([])}
self.exposure = {'sim':np.array([]), 'dud':np.array([]), 'test':np.array([])}
return None
# ----------------------------------------------------------------------------
def __str__(self):
return 'collection of %d subjects' % (self.size())
# ----------------------------------------------------------------------------
# Return the number of collection members:
def size(self):
return len(self.member)
# ----------------------------------------------------------------------------
# # Return an array giving each samples' exposure to the agents:
#
# def get_exposure(self):
#
# N = np.array([])
# for ID in self.list():
# subject = self.member[ID]
# N = np.append(N,subject.exposure)
#
# self.exposure = N
# return N
#
# ----------------------------------------------------------------------------
# Return a complete list of collection members:
def list(self):
return self.member.keys()
# ----------------------------------------------------------------------------
# Return a list of N collection members, selected at regular intervals. This
# *should* contain a significant number of training subjects, since on average
# 1 in 20 subjects are training...
def shortlist(self,N,kind='Any',status='Any'):
reallylonglist = self.list()
if kind == 'Any' and status == 'Any':
longlist = reallylonglist
else:
longlist = []
count = 0
for ID in reallylonglist:
subject = self.member[ID]
if (kind == 'Any' and subject.status == status) or \
(status == 'Any' and subject.kind == kind) or \
(kind == subject.kind and subject.status == status):
longlist.append(ID)
count += 1
if count < N: N = count
if N == 0:
shortlist = []
else:
shortlist = longlist[0::int(len(longlist)/N)][0:N]
return shortlist
# ----------------------------------------------------------------------------
# Get the probability thresholds for this sample:
def thresholds(self):
thresholds = {}
ID = self.shortlist(1)[0]
subject = self.member[ID]
thresholds['detection'] = subject.detection_threshold
thresholds['rejection'] = subject.rejection_threshold
return thresholds
# ----------------------------------------------------------------------------
# Extract all the lens probabilities of the members of a given kind:
def collect_probabilities(self,kind):
# p = np.array([])
# n = np.array([])
# for ID in self.list():
# subject = self.member[ID]
# if subject.kind == kind:
# p = np.append(p,subject.probability)
# n = np.append(n,subject.exposure)
#
# self.probabilities[kind] = p
# self.exposure[kind] = n
# print "Collecting probabilities in a faster way, size:",self.size()
# Appending wastes a lot of time
p = np.zeros(self.size())
n = np.zeros(self.size())
fill=0
for ID in self.list():
subject = self.member[ID]
if subject.kind == kind:
p[fill] = subject.mean_probability
n[fill] = subject.exposure
fill = fill + 1
self.probabilities[kind] = p[0:fill]
self.exposure[kind] = n[0:fill]
# print "Done collecting probabilities, hopefully faster now, size:",self.size()
return
# ----------------------------------------------------------------------
# Take stock: how many detections? how many rejections?
def take_stock(self):
self.N = 0
self.Ns = 0
self.Nt = 0
self.Ntl = 0
self.Ntd = 0
self.Ns_retired = 0
self.Ns_rejected = 0
self.Ns_detected = 0
self.Nt_rejected = 0
self.Nt_detected = 0
self.Ntl_rejected = 0
self.Ntl_detected = 0
self.Ntd_rejected = 0
self.Ntd_detected = 0
self.retirement_ages = np.array([])
for ID in self.list():
subject = self.member[ID]
self.N += 1
if subject.category == 'training':
self.Nt += 1
if subject.kind == 'sim':
self.Ntl += 1
elif subject.kind == 'dud':
self.Ntd += 1
else:
self.Ns += 1
# Detected or rejected?
if subject.status == 'detected':
if subject.category == 'training':
self.Nt_detected += 1
if subject.kind == 'sim':
self.Ntl_detected += 1
elif subject.kind == 'dud':
self.Ntd_detected += 1
else:
self.Ns_detected += 1
elif subject.status == 'rejected':
if subject.category == 'training':
self.Nt_rejected += 1
if subject.kind == 'sim':
self.Ntl_rejected += 1
elif subject.kind == 'dud':
self.Ntd_rejected += 1
else:
self.Ns_rejected += 1
if subject.state == 'inactive':
self.Ns_retired += 1
self.retirement_ages = np.append(self.retirement_ages,subject.retirement_age)
return
# ----------------------------------------------------------------------
# Make a list of subjects that have been retired during this run:
def retirementlist(self):
the_departed = ['none','yet']
return the_departed
# ----------------------------------------------------------------------
# Prepare to plot subjects' trajectories:
def start_trajectory_plot(self,final=False,title=None,histogram=True,logscale=True):
left, width = 0.15, 0.8
if histogram:
fig = plt.figure(figsize=(5,8), dpi=300)
upperarea = [left, 0.4, width, 0.5] # left, bottom, width, height
lowerarea = [left, 0.1, width, 0.3]
else:
fig = plt.figure(figsize=(5,5), dpi=300)
upperarea = [left, left, width, width] # left, bottom, width, height
lowerarea = []
# Upper panel: subjects drifting downwards:
# First plot an arrow to show the subjects entering the plot.
# This is non-trivial, you have to overlay in a different
# set of axes, with linear scales...
hax = fig.add_axes(upperarea)
hax.set_xlim(np.log10(swap.pmin),np.log10(swap.pmax))
hax.set_ylim(np.log10(swap.Ncmax),np.log10(swap.Ncmin))
for label in hax.get_xticklabels():
label.set_visible(False)
for label in hax.get_yticklabels():
label.set_visible(False)
for tick in hax.xaxis.get_ticklines():
tick.set_visible(False)
for tick in hax.yaxis.get_ticklines():
tick.set_visible(False)
plt.sca(hax)
if logscale:
plt.arrow(np.log10(2e-4), np.log10(0.3), 0.0, 0.1, fc="k", ec="k", linewidth=2, head_width=0.2, head_length=0.1)
else:
plt.arrow(np.log10(2e-4), -0.8, 0.0, 0.1, fc="k", ec="k", linewidth=2, head_width=0.2, head_length=0.1)
# hax.set_axis_off()
# Now overlay a transparent frame to plot the subjects in:
upper = fig.add_axes(upperarea, frameon=False)
plt.sca(upper)
upper.set_xlim(swap.pmin,swap.pmax)
upper.set_xscale('log')
upper.set_ylim(swap.Ncmax,swap.Ncmin)
if logscale:
upper.set_yscale('log')
# Vertical lines to mark prior and detect/rejection thresholds:
x = self.thresholds()
plt.axvline(x=swap.prior,color='gray',linestyle='dotted')
plt.axvline(x=x['detection'],color='blue',linestyle='dotted')
plt.axvline(x=x['rejection'],color='red',linestyle='dotted')
upper.set_ylabel('No. of classifications')
# Turn off upper panel x labels, if we are plotting a histogram:
if histogram:
for label in upper.get_xticklabels():
label.set_visible(False)
# Plot title:
if final:
upper.set_title('Candidate Trajectories')
else:
upper.set_title('Example Subject Trajectories')
# Manual over-ride:
if title is not None:
upper.set_title(title)
if histogram:
# Lower panel: histogram:
lower = fig.add_axes(lowerarea, sharex=upper)
plt.sca(lower)
lower.set_xlim(swap.pmin,swap.pmax)
lower.set_xscale('log')
lower.set_ylim(0.1,9999)
# lower.set_yscale('log')
plt.axvline(x=swap.prior,color='gray',linestyle='dotted')
plt.axvline(x=x['detection'],color='blue',linestyle='dotted')
plt.axvline(x=x['rejection'],color='red',linestyle='dotted')
lower.set_xlabel('Posterior Probability Pr(LENS|d)')
lower.set_ylabel('No. of subjects')
else:
lower = False
upper.set_xlabel('Posterior Probability Pr(LENS|d)')
return [upper,lower]
# ----------------------------------------------------------------------
# Prepare to plot subjects' trajectories:
def finish_trajectory_plot(self,axes,filename,t=None,final=None):
# If we are not plotting the histogram, the second axis is False...
if axes[1] is not False:
# Plot histograms! 0 is the upper panel, 1 the lower.
plt.sca(axes[1])
bins = np.linspace(np.log10(swap.pmin),np.log10(swap.pmax),32,endpoint=True)
bins = 10.0**bins
colors = ['dimgray','blue','red']
labels = ['Test: Survey','Training: Sims','Training: Duds']
thresholds = self.thresholds()
for j,kind in enumerate(['test','sim','dud']):
self.collect_probabilities(kind)
p = self.probabilities[kind]
# Sometimes all probabilities are lower than pmin!
# Snap to grid.
p[p<swap.pmin] = swap.pmin
# print "kind,bins,p = ",kind,bins,p
# Final plot - only show subjects above threshold:
if final:
p = p[p>thresholds['rejection']]
# Pylab histogram:
plt.hist(p, bins=bins, histtype='stepfilled', color=colors[j], alpha=1.0, label=labels[j])
plt.legend(prop={'size':10}, framealpha=1.0)
if t is not None:
# Add timestamp in top righthand corner:
plt.sca(axes[0])
plt.text(1.3*swap.prior,0.27,t,color='gray')
# Write out to file:
plt.savefig(filename,dpi=300)
return
# ======================================================================
|
|
"""
********************************************************************************
* Name: tethys_gizmos.py
* Author: Nathan Swain
* Created On: 2014
* Copyright: (c) Brigham Young University 2014
* License: BSD 2-Clause
********************************************************************************
"""
import os
import json
import time
import inspect
from datetime import datetime
from django.conf import settings
from django import template
from django.template.loader import get_template
from django.template import TemplateSyntaxError
from django.templatetags.static import static
from django.core.serializers.json import DjangoJSONEncoder
import plotly # noqa: F401
from plotly.offline.offline import get_plotlyjs
from tethys_apps.harvester import SingletonHarvester
from ..gizmo_options.base import TethysGizmoOptions
import tethys_sdk.gizmos
GIZMO_NAME_PROPERTY = 'gizmo_name'
GIZMO_NAME_MAP = {}
EXTENSION_PATH_MAP = {}
# Add gizmos to GIZMO_NAME_MAP
for name, cls in tethys_sdk.gizmos.__dict__.items():
if inspect.isclass(cls) and issubclass(cls, TethysGizmoOptions) and hasattr(cls, GIZMO_NAME_PROPERTY):
GIZMO_NAME_MAP[cls.gizmo_name] = cls
# Add extension gizmos to the GIZMO_NAME_MAP
harvester = SingletonHarvester()
extension_modules = harvester.extension_modules
for module_name, extension_module in extension_modules.items():
try:
gizmo_module = __import__('{}.gizmos'.format(extension_module), fromlist=[''])
for name, cls in gizmo_module.__dict__.items():
if inspect.isclass(cls) and issubclass(cls, TethysGizmoOptions) and hasattr(cls, GIZMO_NAME_PROPERTY):
GIZMO_NAME_MAP[cls.gizmo_name] = cls
gizmo_module_path = gizmo_module.__path__[0]
EXTENSION_PATH_MAP[cls.gizmo_name] = os.path.abspath(os.path.dirname(gizmo_module_path))
except ImportError:
# TODO: Add Log?
continue
register = template.Library()
CSS_OUTPUT_TYPE = 'css'
CSS_GLOBAL_OUTPUT_TYPE = 'global_css'
JS_OUTPUT_TYPE = 'js'
JS_GLOBAL_OUTPUT_TYPE = 'global_js'
CSS_EXTENSION = 'css'
JS_EXTENSION = 'js'
EXTERNAL_INDICATOR = '://'
CSS_OUTPUT_TYPES = (CSS_OUTPUT_TYPE, CSS_GLOBAL_OUTPUT_TYPE)
JS_OUTPUT_TYPES = (JS_OUTPUT_TYPE, JS_GLOBAL_OUTPUT_TYPE)
GLOBAL_OUTPUT_TYPES = (CSS_GLOBAL_OUTPUT_TYPE, JS_GLOBAL_OUTPUT_TYPE)
VALID_OUTPUT_TYPES = CSS_OUTPUT_TYPES + JS_OUTPUT_TYPES
class HighchartsDateEncoder(DjangoJSONEncoder):
"""
Special Json Encoder for Tethys
"""
def default(self, obj):
# Highcharts date serializer
if isinstance(obj, datetime):
return time.mktime(obj.timetuple()) * 1000
return super().default(obj)
class SetVarNode(template.Node):
def __init__(self, var_name, var_value):
self.var_names = var_name.split('.')
self.var_name = self.var_names.pop()
self.var_value = var_value
def render(self, context):
try:
value = template.Variable(self.var_value).resolve(context)
except template.VariableDoesNotExist:
value = ''
for name in self.var_names:
context = context[name]
context[self.var_name] = value
return ''
@register.tag(name='set')
def set_var(parser, token):
"""
{% set some_var = '123' %}
"""
parts = token.split_contents()
if len(parts) < 4:
raise template.TemplateSyntaxError("'set' tag must be of the form: {% set <var_name> = <var_value> %}")
return SetVarNode(parts[1], parts[3])
@register.filter(is_safe=True)
def isstring(value):
"""
Filter that returns a type
"""
if value is str:
return True
else:
return False
@register.filter
def return_item(container, i):
try:
return container[i]
except Exception:
return None
def json_date_handler(obj):
if isinstance(obj, datetime):
return time.mktime(obj.timetuple()) * 1000
else:
return obj
@register.filter
def jsonify(data):
"""
Convert python data structures into a JSON string
"""
return json.dumps(data, default=json_date_handler)
@register.filter
def divide(value, divisor):
"""
Divide value by divisor
"""
v = float(value)
d = float(divisor)
return v / d
class TethysGizmoIncludeDependency(template.Node):
"""
Custom template include node that returns Tethys gizmos
"""
def __init__(self, gizmo_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self._load_gizmo_name(gizmo_name)
def _load_gizmo_name(self, gizmo_name):
"""
This loads the rendered gizmos into context
"""
self.gizmo_name = gizmo_name
if self.gizmo_name is not None:
# Handle case where gizmo_name is a string literal
if self.gizmo_name[0] in ('"', "'"):
self.gizmo_name = self.gizmo_name.replace("'", '')
self.gizmo_name = self.gizmo_name.replace('"', '')
def _load_gizmos_rendered(self, context):
"""
This loads the rendered gizmos into context
"""
# Add gizmo name to 'gizmos_rendered' context variable (used to load static libraries
if 'gizmos_rendered' not in context:
context.update({'gizmos_rendered': []})
# add the gizmo in the tag to gizmos_rendered list
if self.gizmo_name is not None:
if self.gizmo_name not in context['gizmos_rendered']:
if self.gizmo_name not in GIZMO_NAME_MAP:
raise TemplateSyntaxError('The gizmo name "{0}" is invalid.'.format(self.gizmo_name))
context['gizmos_rendered'].append(self.gizmo_name)
def render(self, context):
"""
Load in the gizmos to be rendered
"""
try:
self._load_gizmos_rendered(context)
except Exception as e:
if settings.TEMPLATE_DEBUG:
raise e
return ''
class TethysGizmoIncludeNode(TethysGizmoIncludeDependency):
"""
Custom template include node that returns Tethys gizmos
"""
def __init__(self, options, gizmo_name, *args, **kwargs):
self.options = options
super().__init__(gizmo_name, *args, **kwargs)
def render(self, context):
resolved_options = template.Variable(self.options).resolve(context)
try:
if self.gizmo_name is None or self.gizmo_name not in GIZMO_NAME_MAP:
if hasattr(resolved_options, GIZMO_NAME_PROPERTY):
self._load_gizmo_name(resolved_options.gizmo_name)
else:
raise TemplateSyntaxError('A valid gizmo name is required for this input format.')
self._load_gizmos_rendered(context)
# Derive path to gizmo template
if self.gizmo_name not in EXTENSION_PATH_MAP:
# Determine path to gizmo template
gizmo_templates_root = os.path.join('tethys_gizmos', 'gizmos')
else:
gizmo_templates_root = os.path.join(EXTENSION_PATH_MAP[self.gizmo_name], 'templates', 'gizmos')
gizmo_file_name = '{0}.html'.format(self.gizmo_name)
template_name = os.path.join(gizmo_templates_root, gizmo_file_name)
# reset gizmo_name in case Node is rendered with different options
self._load_gizmo_name(None)
# Retrieve the gizmo template and render
t = get_template(template_name)
return t.render(resolved_options)
except Exception:
if hasattr(settings, 'TEMPLATES'):
for template_settings in settings.TEMPLATES:
if 'OPTIONS' in template_settings \
and 'debug' in template_settings['OPTIONS'] \
and template_settings['OPTIONS']['debug']:
raise
return ''
@register.tag
def gizmo(parser, token):
"""
Similar to the include tag, gizmo loads special templates called gizmos that come with the django-tethys_gizmo
app. Gizmos provide tools for developing user interface elements with minimal code. Examples include date pickers,
maps, and interactive plots.
To insert a gizmo, use the "gizmo" tag and give it a Gizmo object of configuration parameters.
Example::
{% load tethys_gizmos %}
{% gizmo options %}
The old method of using the gizmo name is still supported.
Example::
{% load tethys_gizmos %}
{% gizmo gizmo_name options %}
.. note: The Gizmo "options" object must be a template context variable.
.. note: All supporting css and javascript libraries are loaded using the gizmo_dependency tag (see below).
"""
gizmo_arg_list = token.split_contents()[1:]
if len(gizmo_arg_list) == 1:
gizmo_options = gizmo_arg_list[0]
gizmo_name = None
elif len(gizmo_arg_list) == 2:
gizmo_name, gizmo_options = gizmo_arg_list
else:
raise TemplateSyntaxError('"gizmo" tag takes at least one argument: the gizmo options object.')
return TethysGizmoIncludeNode(gizmo_options, gizmo_name)
@register.tag
def import_gizmo_dependency(parser, token):
"""
The gizmo dependency tag will add the dependencies for the gizmo specified
so that is will be loaded when using the *gizmo_dependencies* tag.
To manually import a gizmo's dependency, use the "import_gizmo_dependency"
tag and give it the name of a gizmo. It needs to be inside of the
"import_gizmos" block.
Example::
{% load tethys_gizmos %}
{% block import_gizmos %}
{% import_gizmo_dependency example_gizmo %}
{% import_gizmo_dependency "example_gizmo" %}
{% endblock %}
.. note: All supporting css and javascript libraries are loaded using the gizmo_dependencies tag (see below).
"""
try:
tag_name, gizmo_name = token.split_contents()
except ValueError:
raise TemplateSyntaxError('"%s" tag requires exactly one argument' % token.contents.split()[0])
return TethysGizmoIncludeDependency(gizmo_name)
class TethysGizmoDependenciesNode(template.Node):
"""
Loads gizmo dependencies and renders in "script" or "link" tag appropriately.
"""
def __init__(self, output_type, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_type = output_type
def _append_dependency(self, dependency, dependency_list):
"""
Add dependency to list if not already in list
"""
if EXTERNAL_INDICATOR in dependency:
static_url = dependency
else:
static_url = static(dependency)
if static_url not in dependency_list:
# Lookup the static url given the path
dependency_list.append(static_url)
def render(self, context):
"""
Load in JS/CSS dependencies to HTML
"""
# NOTE: Use render_context as it is recommended to do so here
# https://docs.djangoproject.com/en/1.10/howto/custom-template-tags/
# initialize lists to store global gizmo css/js dependencies
if 'global_gizmo_js_list' not in context.render_context:
context.render_context['global_gizmo_js_list'] = []
if 'global_gizmo_css_list' not in context.render_context:
context.render_context['global_gizmo_css_list'] = []
# initialize lists to store gizmo css/js dependencies
if 'gizmo_js_list' not in context.render_context:
context.render_context['gizmo_js_list'] = []
if 'gizmo_css_list' not in context.render_context:
context.render_context['gizmo_css_list'] = []
# load list of gizmo css/js dependencies
if 'gizmo_dependencies_loaded' not in context.render_context:
# add all gizmos in context to be loaded
for dict_element in context:
for key in dict_element:
resolved_options = template.Variable(key).resolve(context)
if hasattr(resolved_options, GIZMO_NAME_PROPERTY):
if resolved_options.gizmo_name not in context['gizmos_rendered']:
context['gizmos_rendered'].append(resolved_options.gizmo_name)
for rendered_gizmo in context['gizmos_rendered']:
# Retrieve the "gizmo_dependencies" module and find the appropriate function
dependencies_module = GIZMO_NAME_MAP[rendered_gizmo]
# Only append dependencies if they do not already exist
for dependency in dependencies_module.get_gizmo_css():
self._append_dependency(dependency, context.render_context['gizmo_css_list'])
for dependency in dependencies_module.get_gizmo_js():
self._append_dependency(dependency, context.render_context['gizmo_js_list'])
for dependency in dependencies_module.get_vendor_css():
self._append_dependency(dependency, context.render_context['global_gizmo_css_list'])
for dependency in dependencies_module.get_vendor_js():
self._append_dependency(dependency, context.render_context['global_gizmo_js_list'])
# Add the main gizmo dependencies last
for dependency in TethysGizmoOptions.get_tethys_gizmos_css():
self._append_dependency(dependency, context.render_context['gizmo_css_list'])
for dependency in TethysGizmoOptions.get_tethys_gizmos_js():
self._append_dependency(dependency, context.render_context['gizmo_js_list'])
context.render_context['gizmo_dependencies_loaded'] = True
# Create markup tags
script_tags = []
style_tags = []
if self.output_type == CSS_GLOBAL_OUTPUT_TYPE or self.output_type is None:
for dependency in context.render_context['global_gizmo_css_list']:
style_tags.append('<link href="{0}" rel="stylesheet" />'.format(dependency))
if self.output_type == CSS_OUTPUT_TYPE or self.output_type is None:
for dependency in context.render_context['gizmo_css_list']:
style_tags.append('<link href="{0}" rel="stylesheet" />'.format(dependency))
if self.output_type == JS_GLOBAL_OUTPUT_TYPE or self.output_type is None:
for dependency in context.render_context['global_gizmo_js_list']:
if dependency.endswith('plotly-load_from_python.js'):
script_tags.append(''.join(
[
'<script type="text/javascript">',
get_plotlyjs(),
'</script>',
])
)
else:
script_tags.append('<script src="{0}" type="text/javascript"></script>'.format(dependency))
if self.output_type == JS_OUTPUT_TYPE or self.output_type is None:
for dependency in context.render_context['gizmo_js_list']:
script_tags.append('<script src="{0}" type="text/javascript"></script>'.format(dependency))
# Combine all tags
tags = style_tags + script_tags
tags_string = '\n'.join(tags)
return tags_string
@register.tag
def gizmo_dependencies(parser, token):
"""
Write all gizmo dependencies (JavaScript and CSS) to HTML.
Example::
{% gizmo_dependencies css %}
{% gizmo_dependencies js %}
{% gizmo_dependencies global_css %}
{% gizmo_dependencies global_js %}
"""
output_type = None
bits = token.split_contents()
if len(bits) > 2:
raise TemplateSyntaxError('"{0}" takes at most one argument: the type of dependencies to output '
'(either "js" or "css")'.format(token.split_contents()[0]))
elif len(bits) == 2:
output_type = bits[1]
# Validate output_type
if output_type:
# Remove quotes
if output_type[0] in ('"', "'"):
output_type = output_type.replace("'", '')
output_type = output_type.replace('"', '')
# Lowercase
output_type = output_type.lower()
# Check for valid values
if output_type not in VALID_OUTPUT_TYPES:
raise TemplateSyntaxError('Invalid output type specified: only "js", "global_js", "css" and '
'"global_css" are allowed, "{0}" given.'.format(output_type))
return TethysGizmoDependenciesNode(output_type)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
World Wide DNS Driver
"""
__all__ = [
'WorldWideDNSDriver'
]
import re
from libcloud.common.types import LibcloudError
from libcloud.common.worldwidedns import WorldWideDNSConnection
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError
from libcloud.dns.types import RecordError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
MAX_RECORD_ENTRIES = 40 # Maximum record entries for zone
class WorldWideDNSError(LibcloudError):
def __repr__(self):
return ("<WorldWideDNSError in " +
repr(self.driver) +
" " +
repr(self.value) + ">")
class WorldWideDNSDriver(DNSDriver):
type = Provider.WORLDWIDEDNS
name = 'World Wide DNS'
website = 'https://www.worldwidedns.net/'
connectionCls = WorldWideDNSConnection
RECORD_TYPE_MAP = {
RecordType.MX: 'MX',
RecordType.CNAME: 'CNAME',
RecordType.A: 'A',
RecordType.NS: 'NS',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
}
def __init__(self, key, secret=None, reseller_id=None, secure=True,
host=None, port=None, **kwargs):
"""
:param key: API key or username to used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param reseller_id: Reseller ID for reseller accounts
:type reseller_id: ``str``
:param secure: Weither to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
:type secure: ``bool``
:param host: Override hostname used for connections.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:return: ``None``
"""
super(WorldWideDNSDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
self.reseller_id = reseller_id
def list_zones(self):
"""
Return a list of zones.
:return: ``list`` of :class:`Zone`
For more info, please see:
https://www.worldwidedns.net/dns_api_protocol_list.asp
or
https://www.worldwidedns.net/dns_api_protocol_list_reseller.asp
"""
action = '/api_dns_list.asp'
if self.reseller_id is not None:
action = '/api_dns_list_reseller.asp'
zones = self.connection.request(action)
if len(zones.body) == 0:
return []
else:
return self._to_zones(zones.body)
def iterate_records(self, zone):
"""
Return a generator to iterate over records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:rtype: ``generator`` of :class:`Record`
"""
records = self._to_records(zone)
for record in records:
yield record
def get_zone(self, zone_id):
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
zones = self.list_zones()
zone = [zone for zone in zones if zone.id == zone_id]
if len(zone) == 0:
raise ZoneDoesNotExistError(driver=self,
value="The zone doesn't exists",
zone_id=zone_id)
return zone[0]
def get_record(self, zone_id, record_id):
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID number of the required record.
:type record_id: ``str``
:rtype: :class:`Record`
"""
zone = self.get_zone(zone_id)
try:
if int(record_id) not in range(1, MAX_RECORD_ENTRIES + 1):
raise RecordDoesNotExistError(value="Record doesn't exists",
driver=zone.driver,
record_id=record_id)
except ValueError:
raise WorldWideDNSError(
value="Record id should be a string number", driver=self)
subdomain = zone.extra.get('S%s' % record_id)
type = zone.extra.get('T%s' % record_id)
data = zone.extra.get('D%s' % record_id)
record = self._to_record(record_id, subdomain, type, data, zone)
return record
def update_zone(self, zone, domain, type='master', ttl=None, extra=None,
ex_raw=False):
"""
Update an existing zone.
:param zone: Zone to update.
:type zone: :class:`Zone`
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (driver specific) (optional). Values not
specified such as *SECURE*, *IP*, *FOLDER*, *HOSTMASTER*,
*REFRESH*, *RETRY* and *EXPIRE* will be kept as already
is. The same will be for *S(1 to 40)*, *T(1 to 40)* and
*D(1 to 40)* if not in raw mode and for *ZONENS* and
*ZONEDATA* if it is.
:type extra: ``dict``
:param ex_raw: Mode we use to do the update using zone file or not.
:type ex_raw: ``bool``
:rtype: :class:`Zone`
For more info, please see
https://www.worldwidedns.net/dns_api_protocol_list_domain.asp
or
https://www.worldwidedns.net/dns_api_protocol_list_domain_raw.asp
or
https://www.worldwidedns.net/dns_api_protocol_list_domain_reseller.asp
or
https://www.worldwidedns.net/dns_api_protocol_list_domain_raw_reseller.asp
"""
if extra is not None:
not_specified = [key for key in zone.extra.keys() if key not in
extra.keys()]
else:
not_specified = zone.extra.keys()
if ttl is None:
ttl = zone.ttl
params = {'DOMAIN': domain,
'TTL': ttl}
for key in not_specified:
params[key] = zone.extra[key]
if extra is not None:
params.update(extra)
if ex_raw:
action = '/api_dns_modify_raw.asp'
if self.reseller_id is not None:
action = '/api_dns_modify_raw_reseller.asp'
method = 'POST'
else:
action = '/api_dns_modify.asp'
if self.reseller_id is not None:
action = '/api_dns_modify_reseller.asp'
method = 'GET'
response = self.connection.request(action, params=params, # noqa
method=method)
zone = self.get_zone(zone.id)
return zone
def update_record(self, record, name, type, data, extra=None):
"""
Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param type: DNS record type (MX, CNAME, A, NS, SRV, TXT).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Contains 'entry' Entry position (1 thru 40)
:type extra: ``dict``
:rtype: :class:`Record`
"""
if (extra is None) or ('entry' not in extra):
raise WorldWideDNSError(value="You must enter 'entry' parameter",
driver=self)
record_id = extra.get('entry')
if name == '':
name = '@'
if type not in self.RECORD_TYPE_MAP:
raise RecordError(value="Record type is not allowed",
driver=record.zone.driver,
record_id=name)
zone = record.zone
extra = {'S%s' % record_id: name,
'T%s' % record_id: type,
'D%s' % record_id: data}
zone = self.update_zone(zone, zone.domain, extra=extra)
record = self.get_record(zone.id, record_id)
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). (optional). Possible
parameter in here should be *DYN* which values should be
1 for standart and 2 for dynamic. Default is 1.
:type extra: ``dict``
:rtype: :class:`Zone`
For more info, please see
https://www.worldwidedns.net/dns_api_protocol_new_domain.asp
or
https://www.worldwidedns.net/dns_api_protocol_new_domain_reseller.asp
"""
if type == 'master':
_type = 0
elif type == 'slave':
_type = 1
if extra:
dyn = extra.get('DYN') or 1
else:
dyn = 1
params = {'DOMAIN': domain,
'TYPE': _type}
action = '/api_dns_new_domain.asp'
if self.reseller_id is not None:
params['DYN'] = dyn
action = '/api_dns_new_domain_reseller.asp'
self.connection.request(action, params=params)
zone = self.get_zone(domain)
if ttl is not None:
zone = self.update_zone(zone, zone.domain, ttl=ttl)
return zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
We can create 40 record per domain. If all slots are full, we can
replace one of them by choosing a specific entry in ``extra`` argument.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (MX, CNAME, A, NS, SRV, TXT).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Contains 'entry' Entry position (1 thru 40)
:type extra: ``dict``
:rtype: :class:`Record`
"""
if (extra is None) or ('entry' not in extra):
# If no entry is specified, we look for an available one. If all
# are full, raise error.
record_id = self._get_available_record_entry(zone)
if not record_id:
raise WorldWideDNSError(value="All record entries are full",
driver=zone.driver)
else:
record_id = extra.get('entry')
if name == '':
name = '@'
if type not in self.RECORD_TYPE_MAP:
raise RecordError(value="Record type is not allowed",
driver=zone.driver,
record_id=record_id)
extra = {'S%s' % record_id: name,
'T%s' % record_id: type,
'D%s' % record_id: data}
zone = self.update_zone(zone, zone.domain, extra=extra)
record = self.get_record(zone.id, record_id)
return record
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
For more information, please see
https://www.worldwidedns.net/dns_api_protocol_delete_domain.asp
or
https://www.worldwidedns.net/dns_api_protocol_delete_domain_reseller.asp
"""
params = {'DOMAIN': zone.domain}
action = '/api_dns_delete_domain.asp'
if self.reseller_id is not None:
action = '/api_dns_delete_domain_reseller.asp'
response = self.connection.request(action, params=params)
return response.success()
def delete_record(self, record):
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
"""
zone = record.zone
for index in range(MAX_RECORD_ENTRIES):
if record.name == zone.extra['S%s' % (index + 1)]:
entry = index + 1
break
extra = {'S%s' % entry: '',
'T%s' % entry: 'NONE',
'D%s' % entry: ''}
self.update_zone(zone, zone.domain, extra=extra)
return True
def ex_view_zone(self, domain, name_server):
"""
View zone file from a name server
:param domain: Domain name.
:type domain: ``str``
:param name_server: Name server to check. (1, 2 or 3)
:type name_server: ``int``
:rtype: ``str``
For more info, please see:
https://www.worldwidedns.net/dns_api_protocol_viewzone.asp
or
https://www.worldwidedns.net/dns_api_protocol_viewzone_reseller.asp
"""
params = {'DOMAIN': domain,
'NS': name_server}
action = '/api_dns_viewzone.asp'
if self.reseller_id is not None:
action = '/api_dns_viewzone_reseller.asp'
response = self.connection.request(action, params=params)
return response.object
def ex_transfer_domain(self, domain, user_id):
"""
This command will allow you, if you are a reseller, to change the
userid on a domain name to another userid in your account ONLY if that
new userid is already created.
:param domain: Domain name.
:type domain: ``str``
:param user_id: The new userid to connect to the domain name.
:type user_id: ``str``
:rtype: ``bool``
For more info, please see:
https://www.worldwidedns.net/dns_api_protocol_transfer.asp
"""
if self.reseller_id is None:
raise WorldWideDNSError("This is not a reseller account",
driver=self)
params = {'DOMAIN': domain,
'NEW_ID': user_id}
response = self.connection.request('/api_dns_transfer.asp',
params=params)
return response.success()
def _get_available_record_entry(self, zone):
"""Return an available entry to store a record."""
entries = zone.extra
for entry in range(1, MAX_RECORD_ENTRIES + 1):
subdomain = entries.get('S%s' % entry)
_type = entries.get('T%s' % entry)
data = entries.get('D%s' % entry)
if not any([subdomain, _type, data]):
return entry
return None
def _to_zones(self, data):
domain_list = re.split('\r?\n', data)
zones = []
for line in domain_list:
zone = self._to_zone(line)
zones.append(zone)
return zones
def _to_zone(self, line):
data = line.split('\x1f')
name = data[0]
if data[1] == "P":
type = "master"
domain_data = self._get_domain_data(name)
resp_lines = re.split('\r?\n', domain_data.body)
soa_block = resp_lines[:6]
zone_data = resp_lines[6:]
extra = {'HOSTMASTER': soa_block[0], 'REFRESH': soa_block[1],
'RETRY': soa_block[2], 'EXPIRE': soa_block[3],
'SECURE': soa_block[5]}
ttl = soa_block[4]
for line in range(MAX_RECORD_ENTRIES):
line_data = zone_data[line].split('\x1f')
extra['S%s' % (line + 1)] = line_data[0]
_type = line_data[1]
extra['T%s' % (line + 1)] = _type if _type != 'NONE' else ''
try:
extra['D%s' % (line + 1)] = line_data[2]
except IndexError:
extra['D%s' % (line + 1)] = ''
elif data[1] == 'S':
type = 'slave'
extra = {}
ttl = 0
return Zone(id=name, domain=name, type=type,
ttl=ttl, driver=self, extra=extra)
def _get_domain_data(self, name):
params = {'DOMAIN': name}
data = self.connection.request('/api_dns_list_domain.asp',
params=params)
return data
def _to_records(self, zone):
records = []
for record_id in range(1, MAX_RECORD_ENTRIES + 1):
subdomain = zone.extra['S%s' % (record_id)]
type = zone.extra['T%s' % (record_id)]
data = zone.extra['D%s' % (record_id)]
if subdomain and type and data:
record = self._to_record(
record_id, subdomain, type, data, zone)
records.append(record)
return records
def _to_record(self, _id, subdomain, type, data, zone):
return Record(_id, subdomain, type, data, zone, zone.driver)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.